Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1479,24 +1479,46 @@ if 'query' in st.query_params:
|
|
1479 |
# Display content or image based on the query
|
1480 |
display_content_or_image(query)
|
1481 |
|
1482 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1483 |
filename = save_and_play_audio(audio_recorder)
|
1484 |
if filename is not None:
|
1485 |
-
|
1486 |
-
try:
|
1487 |
-
transcript = transcription['text']
|
1488 |
-
st.write(transcript)
|
1489 |
|
1490 |
-
|
1491 |
-
|
1492 |
-
|
1493 |
-
|
1494 |
-
st.
|
1495 |
-
|
1496 |
-
|
1497 |
-
|
1498 |
-
|
1499 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1500 |
|
1501 |
# Whisper to Llama:
|
1502 |
response = StreamLLMChatResponse(transcript)
|
|
|
1479 |
# Display content or image based on the query
|
1480 |
display_content_or_image(query)
|
1481 |
|
1482 |
+
def transcribe_canary(filename):
|
1483 |
+
from gradio_client import Client
|
1484 |
+
|
1485 |
+
client = Client("https://awacke1-speech-recognition-canary-nvidiat4.hf.space/")
|
1486 |
+
result = client.predict(
|
1487 |
+
filename, # filepath in 'parameter_5' Audio component
|
1488 |
+
"English", # Literal['English', 'Spanish', 'French', 'German'] in 'Input audio is spoken in:' Dropdown component
|
1489 |
+
"English", # Literal['English', 'Spanish', 'French', 'German'] in 'Transcribe in language:' Dropdown component
|
1490 |
+
True, # bool in 'Punctuation & Capitalization in transcript?' Checkbox component
|
1491 |
+
api_name="/transcribe"
|
1492 |
+
)
|
1493 |
+
st.write(result)
|
1494 |
+
return result
|
1495 |
+
|
1496 |
filename = save_and_play_audio(audio_recorder)
|
1497 |
if filename is not None:
|
1498 |
+
transcript=''
|
|
|
|
|
|
|
1499 |
|
1500 |
+
transcript=transcribe_canary(filename)
|
1501 |
+
result = search_arxiv(transcript)
|
1502 |
+
result2 = search_glossary(result)
|
1503 |
+
#st.markdown(result)
|
1504 |
+
#st.markdown
|
1505 |
+
|
1506 |
+
|
1507 |
+
#transcription = transcribe_audio(filename)
|
1508 |
+
#try:
|
1509 |
+
# transcript = transcription['text']
|
1510 |
+
# st.write(transcript)
|
1511 |
+
|
1512 |
+
#except:
|
1513 |
+
# transcript=''
|
1514 |
+
# st.write(transcript)
|
1515 |
+
|
1516 |
+
#st.write('Reasoning with your inputs..')
|
1517 |
+
#response = chat_with_model(transcript)
|
1518 |
+
#st.write('Response:')
|
1519 |
+
#st.write(response)
|
1520 |
+
#filename = generate_filename(response, "txt")
|
1521 |
+
#create_file(filename, transcript, response, should_save)
|
1522 |
|
1523 |
# Whisper to Llama:
|
1524 |
response = StreamLLMChatResponse(transcript)
|