Spaces:
Build error
Build error
yama
commited on
Commit
·
f22e732
1
Parent(s):
7943b58
Update app.py
Browse files
app.py
CHANGED
@@ -352,11 +352,13 @@ def speech_to_text(video_file_path, selected_source_lang, whisper_model, num_spe
|
|
352 |
raise RuntimeError("Error Running inference with local model", e)
|
353 |
|
354 |
|
355 |
-
def create_transcription_summary(openai_key, prompt):
|
356 |
openai.api_key = openai_key
|
357 |
system_template = prompt
|
358 |
|
359 |
-
|
|
|
|
|
360 |
completion = openai.ChatCompletion.create(
|
361 |
model="gpt-3.5-turbo",
|
362 |
messages=[
|
@@ -368,6 +370,14 @@ def create_transcription_summary(openai_key, prompt):
|
|
368 |
return transcript_summary
|
369 |
|
370 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
371 |
# ---- Gradio Layout -----
|
372 |
# Inspiration from https://huggingface.co/spaces/RASMUS/Whisper-youtube-crosslingual-subtitles
|
373 |
video_in = gr.Video(label="Video file", mirror_webcam=False)
|
@@ -463,6 +473,7 @@ with demo:
|
|
463 |
openai_key_in.render()
|
464 |
openai_prompt_in.render()
|
465 |
transcription_summary_btn = gr.Button("Evaluate and analyze transcription content")
|
|
|
466 |
transcription_summary_out.render()
|
467 |
system_info.render()
|
468 |
gr.Markdown(
|
|
|
352 |
raise RuntimeError("Error Running inference with local model", e)
|
353 |
|
354 |
|
355 |
+
def create_transcription_summary(openai_key, prompt, transcript_result_path):
|
356 |
openai.api_key = openai_key
|
357 |
system_template = prompt
|
358 |
|
359 |
+
with open(transcript_result_path, "r") as file:
|
360 |
+
transcript_text = file.read()
|
361 |
+
|
362 |
completion = openai.ChatCompletion.create(
|
363 |
model="gpt-3.5-turbo",
|
364 |
messages=[
|
|
|
370 |
return transcript_summary
|
371 |
|
372 |
|
373 |
+
def transcription_summary_btn_click_callback():
|
374 |
+
openai_key = openai_key_in.value
|
375 |
+
prompt = openai_prompt_in.value
|
376 |
+
transcript_result_path = "output/transcript_result.csv"
|
377 |
+
transcript_summary = create_transcription_summary(openai_key, prompt, transcript_result_path)
|
378 |
+
transcription_summary_out.value = transcript_summary
|
379 |
+
|
380 |
+
|
381 |
# ---- Gradio Layout -----
|
382 |
# Inspiration from https://huggingface.co/spaces/RASMUS/Whisper-youtube-crosslingual-subtitles
|
383 |
video_in = gr.Video(label="Video file", mirror_webcam=False)
|
|
|
473 |
openai_key_in.render()
|
474 |
openai_prompt_in.render()
|
475 |
transcription_summary_btn = gr.Button("Evaluate and analyze transcription content")
|
476 |
+
transcription_summary_btn.click(transcription_summary_btn_click_callback)
|
477 |
transcription_summary_out.render()
|
478 |
system_info.render()
|
479 |
gr.Markdown(
|