yama commited on
Commit
427351c
·
1 Parent(s): 7d0d1e9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -11
app.py CHANGED
@@ -256,11 +256,11 @@ def speech_to_text(video_file_path, selected_source_lang, whisper_model, num_spe
256
  raise RuntimeError("Error Running inference with local model", e)
257
 
258
 
259
- def create_transcription_summary(openai_key, prompt, transcript_result_path):
260
  openai.api_key = openai_key
261
  system_template = prompt
262
 
263
- with open(transcript_result_path, "r") as file:
264
  transcript_text = file.read()
265
 
266
  completion = openai.ChatCompletion.create(
@@ -274,14 +274,6 @@ def create_transcription_summary(openai_key, prompt, transcript_result_path):
274
  return transcript_summary
275
 
276
 
277
- # def transcription_summary_btn_click_callback():
278
- # openai_key = openai_key_in.value
279
- # prompt = openai_prompt_in.value
280
- # transcript_result_path = "output/transcript_result.csv"
281
- # transcript_summary = create_transcription_summary(openai_key, prompt, transcript_result_path)
282
- # transcription_summary_out.value = transcript_summary
283
-
284
-
285
  # ---- Gradio Layout -----
286
  # Inspiration from https://huggingface.co/spaces/RASMUS/Whisper-youtube-crosslingual-subtitles
287
  video_in = gr.Video(label="Video file", mirror_webcam=False)
@@ -387,7 +379,7 @@ with demo:
387
  openai_prompt_in.render()
388
  openai_summary_btn = gr.Button("Evaluate and analyze transcription content")
389
  openai_summary_btn.click(create_transcription_summary,
390
- [openai_key_in, openai_prompt_in, "output/transcript_result.csv"],
391
  [openai_summary_out]
392
  )
393
 
 
256
  raise RuntimeError("Error Running inference with local model", e)
257
 
258
 
259
+ def create_transcription_summary(openai_key, prompt):
260
  openai.api_key = openai_key
261
  system_template = prompt
262
 
263
+ with open("output/transcript_result.csv", "r") as file:
264
  transcript_text = file.read()
265
 
266
  completion = openai.ChatCompletion.create(
 
274
  return transcript_summary
275
 
276
 
 
 
 
 
 
 
 
 
277
  # ---- Gradio Layout -----
278
  # Inspiration from https://huggingface.co/spaces/RASMUS/Whisper-youtube-crosslingual-subtitles
279
  video_in = gr.Video(label="Video file", mirror_webcam=False)
 
379
  openai_prompt_in.render()
380
  openai_summary_btn = gr.Button("Evaluate and analyze transcription content")
381
  openai_summary_btn.click(create_transcription_summary,
382
+ [openai_key_in, openai_prompt_in],
383
  [openai_summary_out]
384
  )
385