TVRRaviteja commited on
Commit
388e865
·
verified ·
1 Parent(s): 8b1a242

Create utils.py

Browse files
Files changed (1) hide show
  1. utils.py +165 -0
utils.py ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import openai # type: ignore
2
+ from openai import OpenAI
3
+ from dotenv import load_dotenv, find_dotenv # type: ignore
4
+ from huggingface_hub import InferenceClient # type: ignore
5
+ import pandas as pd # type: ignore
6
+ import os, time
7
+
8
+ load_dotenv(find_dotenv())
9
+ # Setup API keys
10
+ openai.api_key = os.getenv("OPENAI_API_KEY")
11
+ os.environ["HF_TOKEN"] = os.getenv("HF_TOKEN")
12
+
13
+ client = OpenAI()
14
+
15
+ # Define a few-shot prompt for personality prediction
16
+ few_shot_prompt = """
17
+ You are an expert in personality psychology. Based on the text provided, predict the personality scores for the Big Five personality traits: Openness, Conscientiousness, Extraversion, Agreeableness, and Neuroticism. Each score should be a floating-point number between 0 and 1.
18
+
19
+ Example 1:
20
+ Text: "I love exploring new ideas and trying new things."
21
+ Scores: Openness: 0.9, Conscientiousness: 0.4, Extraversion: 0.7, Agreeableness: 0.5, Neuroticism: 0.3
22
+
23
+ Example 2:
24
+ Text: "I prefer to plan everything in advance and stick to the plan."
25
+ Scores: Openness: 0.3, Conscientiousness: 0.8, Extraversion: 0.4, Agreeableness: 0.6, Neuroticism: 0.4
26
+
27
+ Now, predict the scores for the following text:
28
+ """
29
+
30
+ def predict_personality(text):
31
+ # Prepare the prompt with the user's text
32
+ prompt = few_shot_prompt + f"Text: \"{text}\"\nScores:"
33
+
34
+ messages = [
35
+ {"role": "system", "content": "You are a helpful assistant."},
36
+ {"role": "user", "content": prompt}
37
+ ]
38
+
39
+ # Call the OpenAI API to get the prediction
40
+ response = openai.chat.completions.create(
41
+ model="gpt-4",
42
+ messages=messages,
43
+ max_tokens=50,
44
+ temperature=0.5
45
+ )
46
+
47
+ # Extract the predicted scores from the response
48
+ scores_text = response.choices[0].message.content.strip()
49
+ scores = [float(score.split(":")[1].strip()) for score in scores_text.split(",")]
50
+ return scores
51
+
52
+ def create_line_plot(scores):
53
+ labels = ['Openness', 'Conscientiousness', 'Extraversion', 'Agreeableness', 'Neuroticism']
54
+ data = {'Personality': labels, 'Score': scores}
55
+ return pd.DataFrame(data)
56
+
57
+ # Gradio interface
58
+ def personality_app(text):
59
+ scores = predict_personality(text)
60
+ df = create_line_plot(scores)
61
+ return df
62
+
63
+
64
+ def transcribe_audio(audio_path):
65
+ with open(audio_path, "rb") as audio_file:
66
+ transcript = client.audio.transcriptions.create(model="whisper-1", file=audio_file)
67
+ return transcript.text
68
+
69
+ def openai_chat_completion(messages: list, selected_model: str) -> list[str]:
70
+ try:
71
+ response = openai.chat.completions.create(
72
+ # model='gpt-3.5-turbo',
73
+ model=selected_model,
74
+ messages=messages,
75
+ # temperature=0.5,
76
+ )
77
+ collected_messages = response.choices[0].message.content.strip().split('\n')
78
+ return collected_messages # return all the collected chunks of messages
79
+
80
+ except Exception as e:
81
+ return [str(e)]
82
+
83
+ def llama2_chat_completion(messages: list, hf_model_id: str, selected_model: str) -> list[str]:
84
+ try:
85
+ hf_token = os.getenv("HF_TOKEN")
86
+ client = InferenceClient(model=hf_model_id, token=hf_token)
87
+ # Start the chat completion process with streaming enabled
88
+ response_stream = client.chat_completion(messages, max_tokens=400, stream=True)
89
+
90
+ # Collect the generated message chunks
91
+ collected_messages = []
92
+ for completion in response_stream:
93
+ # Assuming the response structure is similar to OpenAI's
94
+ delta = completion['choices'][0]['delta']
95
+ if 'content' in delta.keys():
96
+ collected_messages.append(delta['content'])
97
+ # Return the collected messages
98
+ return collected_messages
99
+
100
+ except Exception as e:
101
+ return [str(e)]
102
+
103
+ def generate_messages(messages: list) -> list:
104
+ formatted_messages = [ # first format of messages for chat completion
105
+ {
106
+ 'role': 'system',
107
+ 'content': 'You are a helpful assistant.'
108
+ }
109
+
110
+ ]
111
+ for m in messages: # Loop over the existing chat history and create user, assistant responses.
112
+ formatted_messages.append({
113
+ 'role': 'user',
114
+ 'content': m[0]
115
+ })
116
+ if m[1] != None:
117
+ formatted_messages.append({
118
+ 'role': 'assistant',
119
+ 'content': m[1]
120
+ })
121
+ return formatted_messages
122
+
123
+ def generate_audio_response(chat_history: list, selected_model: str) -> list: # type: ignore
124
+ messages = generate_messages(chat_history) # generates messages based on chat history
125
+ if selected_model == "gpt-4" or "gpt-3.5-turbo":
126
+ bot_message = openai_chat_completion(messages, selected_model) # Get all the collected chunks of messages for streaming
127
+ if selected_model == "Llama-3-8B":
128
+ hf_model_id = "meta-llama/Meta-Llama-3-8B"
129
+ bot_message = llama2_chat_completion(messages, hf_model_id, selected_model)
130
+ if selected_model == "Llama-2-7b-chat-Counsel-finetuned":
131
+ hf_model_id = "TVRRaviteja/Llama-2-7b-chat-Counsel-finetuned"
132
+ bot_message = llama2_chat_completion(messages, hf_model_id, selected_model)
133
+ else:
134
+ selected_model='gpt-3.5-turbo'
135
+ bot_message = openai_chat_completion(messages, selected_model)
136
+
137
+ chat_history[-1][1] = '' # [-1] -> last conversation, [1] -> current carebot message
138
+ for bm in bot_message: # Loop over the collected messages
139
+ chat_history[-1][1] += bm
140
+ time.sleep(0.05)
141
+ yield chat_history # For streamed carebot responses
142
+
143
+ def generate_text_response(chat_history: list, selected_model: str) -> list: # type: ignore
144
+ messages = generate_messages(chat_history) # generates messages based on chat history
145
+ if selected_model == "gpt-4" or "gpt-3.5-turbo":
146
+ bot_message = openai_chat_completion(messages, selected_model) # Get all the collected chunks of messages for streaming
147
+ if selected_model == "Llama-3-8B":
148
+ hf_model_id = "meta-llama/Meta-Llama-3-8B"
149
+ bot_message = llama2_chat_completion(messages, hf_model_id, selected_model)
150
+ if selected_model == "Llama-2-7b-chat-Counsel-finetuned":
151
+ hf_model_id = "TVRRaviteja/Llama-2-7b-chat-Counsel-finetuned"
152
+ bot_message = llama2_chat_completion(messages, hf_model_id, selected_model)
153
+ else:
154
+ selected_model='gpt-3.5-turbo'
155
+ bot_message = openai_chat_completion(messages, selected_model)
156
+
157
+ chat_history[-1][1] = '' # [-1] -> last conversation, [1] -> current carebot message
158
+ for bm in bot_message: # Loop over the collected messages
159
+ chat_history[-1][1] += bm
160
+ time.sleep(0.05)
161
+ yield chat_history # For streamed carebot responses
162
+
163
+ def set_user_response(user_message: str, chat_history: list) -> tuple:
164
+ chat_history += [[user_message, None]] #Append the recent user message into the chat history
165
+ return '', chat_history