ja-818 commited on
Commit
3953375
·
1 Parent(s): 5305296

Added tabs and refactored code

Browse files
Files changed (1) hide show
  1. app.py +37 -18
app.py CHANGED
@@ -4,19 +4,41 @@ from models import infere_speech_emotion, infere_text_emotion, infere_voice2text
4
 
5
  # Create a Gradio app object
6
  with gr.Blocks() as demo:
7
- # HTML titles
8
- gr.HTML('''
9
- <h1 style="text-align:center;">Speech and Text Emotion Recognition</h1>
10
- <h2 style="text-align:center;">Determining someone's emotions can be challenging based solely on their tone or words <br> This app uses both to provide a more accurate analysis of emotional expression in a single audio recording</h2>
11
- ''')
12
- # Input and output fields
13
- with gr.Row():
14
- input = gr.Audio(label="Audio File", type="filepath")
15
- with gr.Column():
16
- output0 = gr.Textbox(label="Text from the audio")
17
- output1 = gr.Textbox(label="Speech emotion")
18
- output2 = gr.Textbox(label="Text emotion")
19
- btn = gr.Button("Analyze audio")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
 
21
  # Examples to be used as input
22
  gr.Examples(
@@ -24,13 +46,10 @@ with gr.Blocks() as demo:
24
  os.path.join(os.path.dirname(__file__), "audio/a_good_dream.wav"),
25
  os.path.join(os.path.dirname(__file__), "audio/hype_in_ai.wav"),
26
  ],
27
- input,
28
  label="Examples in which speech and words express different emotions:"
29
  )
30
 
31
- # Input-output logic based on button click
32
- btn.click(fn=infere_voice2text, inputs=input, outputs=output0)
33
- btn.click(fn=infere_speech_emotion, inputs=input, outputs=output1)
34
- output0.change(fn=infere_text_emotion, inputs=output0, outputs=output2)
35
 
36
  demo.launch()
 
4
 
5
  # Create a Gradio app object
6
  with gr.Blocks() as demo:
7
+ gr.Markdown(
8
+ '''
9
+ # Speech and Text Emotion Recognition
10
+ ## Determining someone's emotions can be challenging based solely on their tone or words
11
+ ### This app uses both to provide a more accurate analysis of emotional expression in a single audio recording
12
+ '''
13
+ )
14
+
15
+ # Upload audio input and output fields
16
+ with gr.Tab("Upload audio"):
17
+ with gr.Row():
18
+ upload_input = gr.Audio(label="Audio File", source="upload")
19
+ with gr.Column():
20
+ upload_output_1 = gr.Textbox(label="Text from the audio")
21
+ upload_output_2 = gr.Textbox(label="Speech emotion")
22
+ upload_output_3 = gr.Textbox(label="Text emotion")
23
+ btn0 = gr.Button("Analyze audio")
24
+ # Input-output logic based on button click
25
+ btn0.click(fn=infere_voice2text, inputs=upload_input, outputs=upload_output_1)
26
+ btn0.click(fn=infere_speech_emotion, inputs=upload_input, outputs=upload_output_2)
27
+ upload_output_1.change(fn=infere_text_emotion, inputs=upload_output_1, outputs=upload_output_3)
28
+
29
+ # Record audio input and output fields
30
+ with gr.Tab("Record audio"):
31
+ with gr.Row():
32
+ record_input = gr.Audio(label="Audio recording", type="microphone")
33
+ with gr.Column():
34
+ record_output_1 = gr.Textbox(label="Text from the audio")
35
+ record_output_2 = gr.Textbox(label="Speech emotion")
36
+ record_output_3 = gr.Textbox(label="Text emotion")
37
+ btn1 = gr.Button("Analyze audio")
38
+ # Input-output logic based on button click
39
+ btn1.click(fn=infere_voice2text, inputs=record_input, outputs=record_output_1)
40
+ btn1.click(fn=infere_speech_emotion, inputs=record_input, outputs=record_output_2)
41
+ record_output_1.change(fn=infere_text_emotion, inputs=record_output_1, outputs=record_output_3)
42
 
43
  # Examples to be used as input
44
  gr.Examples(
 
46
  os.path.join(os.path.dirname(__file__), "audio/a_good_dream.wav"),
47
  os.path.join(os.path.dirname(__file__), "audio/hype_in_ai.wav"),
48
  ],
49
+ upload_input,
50
  label="Examples in which speech and words express different emotions:"
51
  )
52
 
53
+
 
 
 
54
 
55
  demo.launch()