Spaces:
Sleeping
Sleeping
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr # type: ignore
|
2 |
+
from utils import generate_audio_response, generate_text_response, set_user_response, transcribe_audio, personality_app, create_line_plot, predict_personality
|
3 |
+
from huggingface_hub import login # type: ignore
|
4 |
+
import os
|
5 |
+
|
6 |
+
# Function to handle audio input and update chatbot
|
7 |
+
def handle_audio_input(audio_file_path, chat_history):
|
8 |
+
if audio_file_path is not None:
|
9 |
+
# Transcribe the audio
|
10 |
+
output = transcribe_audio(audio_file_path)
|
11 |
+
personality_scores=personality_app(output)
|
12 |
+
# Update the chat history with the transcription
|
13 |
+
_, chat_history = set_user_response(output, chat_history)
|
14 |
+
return output, chat_history, personality_scores
|
15 |
+
return None, chat_history, None
|
16 |
+
|
17 |
+
def clear_audio():
|
18 |
+
return None
|
19 |
+
|
20 |
+
def hide_textbox():
|
21 |
+
return gr.Textbox(visible=False)
|
22 |
+
|
23 |
+
def open_textbox():
|
24 |
+
return gr.Textbox(visible=True)
|
25 |
+
|
26 |
+
# Function to handle the model selection
|
27 |
+
def update_selected_model(selected_model):
|
28 |
+
print(f"Selected model: {selected_model}")
|
29 |
+
return selected_model
|
30 |
+
|
31 |
+
with gr.Blocks() as demo:
|
32 |
+
gr.Markdown("<center><h1>Multimodal Personality Adaptive Conversational AI</h1></center>")
|
33 |
+
gr.Markdown("<center><h5>Personality Adaptive AI This application uses LLMs to create a personality adaptive conversational AI that interacts with users and displays personality scores. (Description with links goes here)</h5></center>")
|
34 |
+
with gr.Row():
|
35 |
+
with gr.Column(scale=6):
|
36 |
+
# Audio recording component
|
37 |
+
audio_input = gr.Microphone(sources=["microphone"], type="filepath", label="Tell Me How You're Feeling", container=True, interactive=True)
|
38 |
+
output_text = gr.Textbox(label="Transcription", placeholder="What you said appears here..")
|
39 |
+
chatbot = gr.Chatbot(label="Carebot", height=450) #Chatbot interface
|
40 |
+
msg = gr.Textbox(label="Type your message here:") # Textbox for user input
|
41 |
+
|
42 |
+
# with gr.Group():
|
43 |
+
with gr.Row():
|
44 |
+
Run = gr.Button("Run",variant="primary", size="sm")
|
45 |
+
clear = gr.ClearButton(size="sm") #To clear the chat
|
46 |
+
# generate = gr.Button("Generate", size="sm")
|
47 |
+
# save_chat = gr.Button("Save", size="sm")
|
48 |
+
|
49 |
+
# Display some query examples
|
50 |
+
examples = gr.Examples(examples=["I'm feeling Sad all the time", "Tell me a joke.", "Cheer Me Up!", "Tell me about Seattle"], inputs=msg)
|
51 |
+
#Clear the message
|
52 |
+
clear.click(lambda: None, None, chatbot, queue=False)
|
53 |
+
|
54 |
+
# Right side - Information, Visualization, and Dropdown
|
55 |
+
with gr.Column(scale=4):
|
56 |
+
# 1st component - Dropdown to choose models
|
57 |
+
model_selection = gr.Dropdown(
|
58 |
+
["Llama-2-7b-chat-Counsel-finetuned", "Llama-3-8B", "gpt-4", "gpt-3.5-turbo"], label="Models", info="Choose your LLM model", value="Llama-2-7b-chat-Counsel-finetuned")
|
59 |
+
|
60 |
+
# Textbox to display the selected model
|
61 |
+
selected_model = gr.Textbox(label="Selected Model", interactive=False, visible=False) # not displayed in the app
|
62 |
+
|
63 |
+
model_selection.change(fn=update_selected_model, inputs=model_selection, outputs=selected_model)
|
64 |
+
|
65 |
+
# 2nd component - Live Personality Score Visualization
|
66 |
+
personality_score = gr.LinePlot(x="Personality", y="Score",label="Personality Scores", height=300)
|
67 |
+
|
68 |
+
#Generate responses to the user's audio query
|
69 |
+
if audio_input is not None and output_text != None:
|
70 |
+
|
71 |
+
gr.on(audio_input.change, fn=handle_audio_input, inputs=[audio_input, chatbot], outputs=[output_text, chatbot, personality_score], queue=False).then(fn=generate_audio_response, inputs=[chatbot,selected_model], outputs=chatbot)
|
72 |
+
audio_input.change(clear_audio, inputs=None, outputs=audio_input)
|
73 |
+
|
74 |
+
pass
|
75 |
+
|
76 |
+
if msg is not None:
|
77 |
+
# Submit the response to LLM
|
78 |
+
gr.on(triggers=[msg.submit, Run.click],fn=personality_app, inputs=msg, outputs=personality_score).then(fn=set_user_response, inputs=[msg, chatbot], outputs=[msg, chatbot], queue=False).then(fn=generate_text_response, inputs=[chatbot, selected_model], outputs=chatbot)
|
79 |
+
|
80 |
+
# Launch the Gradio app
|
81 |
+
demo.queue()
|
82 |
+
|
83 |
+
if __name__ == '__main__':
|
84 |
+
login(token = os.getenv("HF_TOKEN")) # HF Login
|
85 |
+
demo.launch()
|