yama commited on
Commit
df3d853
·
1 Parent(s): 05087f6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -2
app.py CHANGED
@@ -27,6 +27,7 @@ import contextlib
27
  from transformers import pipeline
28
  import psutil
29
  import openai
 
30
 
31
  whisper_models = ["tiny", "base", "small", "medium", "large-v1", "large-v2"]
32
  source_languages = {
@@ -287,7 +288,7 @@ video_in = gr.Video(label="Video file", mirror_webcam=False)
287
  youtube_url_in = gr.Textbox(label="Youtube url", lines=1, interactive=True)
288
  df_init = pd.DataFrame(columns=['Start', 'End', 'Speaker', 'Text'])
289
  memory = psutil.virtual_memory()
290
- selected_source_lang = gr.Dropdown(choices=source_language_list, type="value", value="ja",
291
  label="Spoken language in video", interactive=True)
292
  selected_whisper_model = gr.Dropdown(choices=whisper_models, type="value", value="base", label="Selected Whisper model",
293
  interactive=True)
@@ -307,6 +308,7 @@ openai_prompt_in = gr.TextArea(label="openai_prompt", value="""会議の文字
307
  - 会議の内容
308
  - 会議の結果""")
309
  openai_summary_out = gr.Textbox(label="openai_summary")
 
310
 
311
  title = "Whisper speaker diarization"
312
  demo = gr.Blocks(title=title)
@@ -385,7 +387,7 @@ with demo:
385
  openai_prompt_in.render()
386
  openai_summary_btn = gr.Button("Evaluate and analyze transcription content")
387
  openai_summary_btn.click(create_transcription_summary,
388
- [openai_key_in, openai_prompt_in, download_transcript],
389
  [openai_summary_out]
390
  )
391
 
 
27
  from transformers import pipeline
28
  import psutil
29
  import openai
30
+ import tempfile
31
 
32
  whisper_models = ["tiny", "base", "small", "medium", "large-v1", "large-v2"]
33
  source_languages = {
 
288
  youtube_url_in = gr.Textbox(label="Youtube url", lines=1, interactive=True)
289
  df_init = pd.DataFrame(columns=['Start', 'End', 'Speaker', 'Text'])
290
  memory = psutil.virtual_memory()
291
+ selected_source_lang = gr.Dropdown(choices=source_language_list, type="value", value="en",
292
  label="Spoken language in video", interactive=True)
293
  selected_whisper_model = gr.Dropdown(choices=whisper_models, type="value", value="base", label="Selected Whisper model",
294
  interactive=True)
 
308
  - 会議の内容
309
  - 会議の結果""")
310
  openai_summary_out = gr.Textbox(label="openai_summary")
311
+ save_path = "output/transcript_result.csv"
312
 
313
  title = "Whisper speaker diarization"
314
  demo = gr.Blocks(title=title)
 
387
  openai_prompt_in.render()
388
  openai_summary_btn = gr.Button("Evaluate and analyze transcription content")
389
  openai_summary_btn.click(create_transcription_summary,
390
+ [openai_key_in, openai_prompt_in, save_path],
391
  [openai_summary_out]
392
  )
393