nachoremer's picture
alternativa
94c2cbf
#https://discuss.huggingface.co/t/dynamical-flexible-output/18146/6
#https://github.com/gradio-app/gradio/issues/2066
import gradio as gr
from openai import OpenAI
import pandas as pd
from datetime import datetime, timedelta, timezone
from config import groq_token, groq_model, QUESTION_PROMPT, init_google_sheets_client, groq_model, default_model_name, user_names, google_sheets_name, AtlasClient, custom_model, open_ruter_token, openai_model
import gspread
from groq import Client
import random, string, json, io
#from trash_folder.alter_app import Local_llm
from import_model import ModelLoader
import groq
import torch
print(groq.__version__)
# Initialize Google Sheets client
client = init_google_sheets_client()
sheet = client.open(google_sheets_name)
#sheet = client.open_by_key('1kA37sJps3nhki-s9S7J_mQtNoqoWOLvezV0HobHzQ4s') ID planilla chatbot test nuevo
stories_sheet = sheet.worksheet("Stories")
system_prompts_sheet = sheet.worksheet("System Prompts")
# Combine both model dictionaries
all_models = {}
all_models.update(openai_model)
if torch.cuda.is_available():
all_models.update(custom_model)
#init local modeel as None
local_model = None
def randomize_key_order(aux):
keys = list(aux.keys())
#Shuffle the list of keys
random.shuffle(keys)
#Create a new dictionary with shuffled keys
return {key: aux[key] for key in keys}
alphabet = list(string.ascii_uppercase)
# Initialize GROQ client
#groq_clinet = Client(api_key=groq_token)
# Load stories from Google Sheets
def load_stories():
stories_data = stories_sheet.get_all_values()
stories = [{"title": story[0], "story": story[1]} for story in stories_data if story[0] != "Title"] # Skip header row
return stories
# Load system prompts from Google Sheets
def load_system_prompts():
system_prompts_data = system_prompts_sheet.get_all_values()
system_prompts = [prompt[0] for prompt in system_prompts_data[1:]] # Skip header row
return system_prompts
# Load available stories and system prompts
stories = load_stories()
system_prompts = load_system_prompts()
# Initialize the selected model
selected_model = default_model_name
tokenizer, model = None, None
# Initialize the data list
data = []
# Chat history
chat_history = []
model_history = []
#init atlas client
atlas_client = AtlasClient("chatbot_test")
def save_answers(all_answers, user_name):
timestamp = datetime.now(timezone.utc) - timedelta(hours=3) # Adjust to GMT-3
timestamp_str = timestamp.strftime("%Y-%m-%d %H:%M:%S")
data = {}
data["timestamp"] = timestamp_str
data["answers"] = all_answers
data['author'] = user_name
atlas_client.add(data, "dialogs")
#Function to save comment and score
def save_comment_score(score, comment, story_name, user_name, system_prompt, models):
print("Saving comment and score...")
print(chat_history)
print(model_history)
full_chat_history = ""
# Create formatted chat history with roles
#and model in model_history
for message in chat_history:
print(message['role'])
if message['role'] == 'user': # User message
full_chat_history += f"User: {message['content']}\n"
if message['role'] == 'assistant': # Assistant message
full_chat_history += f"Model:{model_history.pop(0)} Assistant: {message['content']}\n"
timestamp = datetime.now(timezone.utc) - timedelta(hours=3) # Adjust to GMT-3
timestamp_str = timestamp.strftime("%Y-%m-%d %H:%M:%S")
model_name = (' ').join(models)
# Append data to local data storage
print(full_chat_history)
data.append([
timestamp_str,
user_name,
model_name,
system_prompt,
story_name,
full_chat_history,
score,
comment
])
# Append data to Google Sheets
try:
user_sheet = client.open(google_sheets_name).worksheet(user_name)
except gspread.exceptions.WorksheetNotFound:
user_sheet = client.open(google_sheets_name).add_worksheet(title=user_name, rows="100", cols="20")
user_sheet.append_row([timestamp_str, user_name, model_name, system_prompt, story_name, full_chat_history, score, comment])
# Save all answers to AtlaS collection
print(f"all answers...\n{all_answers}")
save_answers(all_answers, user_name)
#Append data and render the data table
df = pd.DataFrame(data, columns=["Timestamp", "User Name", "Model Name", "System Prompt", "Story Name", "Chat History", "Score", "Comment"])
return df[["Chat History", "Score", "Comment"]], gr.update(value="") # Show only the required columns and clear the comment input box
clinet = OpenAI(
base_url="https://openrouter.ai/api/v1",
api_key="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",
)
#def interact_openrouter(context, model_name):
# completion = client.chat.completions.create(
# model=model_name,
# messages=context,
# )
# return completion.choices[0].message.content
# Function to handle interaction with model
def interact_groq(context, model_name):
chat_completion = clinet.chat.completions.create(
messages=context,
model=model_name,
temperature=0.1,
max_tokens=100,
)
#print(chat_completion)
return chat_completion.choices[0].message.content
#i=[story_dropdown, model_dropdown, system_prompt_dropdown],
#o=[chatbot_output, chat_history_json, data_table, selected_story_textbox])
# Function to send selected story and initial message
def send_selected_story(title, model_name, system_prompt):
global local_model
global chat_history
global selected_story
global data # Ensure data is reset
data = [] # Reset data for new story
selected_story = title
for story in stories:
if story["title"] == title:
system_prompt = f"""
{system_prompt}
Here is the story:
---
{story['story']}
---
"""
combined_message = system_prompt.strip()
if combined_message:
chat_history = [] # Reset chat history
chat_history.append({"role": "system", "content": combined_message})
chat_history.append({"role": "user", "content": QUESTION_PROMPT})
if model_name in custom_model:
if local_model is None or local_model.model_name != custom_model[model_name]:
#si hay que cambiar el modelo o levantarlo
del local_model
torch.cuda.empty_cache()
torch.cuda.synchronize() #ver si funciona este
local_model = ModelLoader(custom_model[model_name])
response = local_model.interact(chat_history)
else:
response = interact_groq(chat_history, model_name)
resp = {"role": "assistant", "content": response.strip()}
return resp, chat_history, story["story"]
else:
print("Combined message is empty.")
else:
print("Story title does not match.")
#i=[story_dropdown, model_dropdown, system_prompt_dropdown],
#o=[chatbot_output, chat_history_json, data_table, selected_story_textbox])
#recibo varios respuestas las muestro nomas, agrego al contexto solo la que se
#story_dropdown, model_checkbox, system_prompt_dropdown]
def send_multiple_selected_story(title, models, system_prompt):
global model_history
global chatbot_answser_list
global all_answers
resp_list = []
print(models)
#iterate over words
#shuffle_models = randomize_key_order(all_models)
random.shuffle(models)
print(f"models shuffled: {models}")
for index, model in enumerate(models):
resp, context, _ = send_selected_story(title, model, system_prompt)
chatbot_answser_list[alphabet[index]] = {'response': resp, 'model': model}
try:
print(resp)
resp_list.append(gr.Chatbot(value=[resp], visible=True, type='messages'))
except gr.exceptions.Error:
print(f"error for en modelo {model}")
rest = [model for model in model_list if model not in models]
for model in rest:
try:
resp_list.append(gr.Chatbot(type='messages', visible=False))
except gr.exceptions.Error:
print(f"error, else en modelo {model}")
try:
resp_list.insert(0, gr.Chatbot(value=context, type='messages'))
#chat_history ya se hace en send_selected_story
except gr.exceptions.Error:
print(f"error en main output\n {context}")
return resp_list
#inputs=[user_input, chatbot_main_output, model_checkbox, chat_radio, assistant_user_input, chatbot_resp[0], chatbot_resp[1], chatbot_resp[2], chatbot_resp[3]],# interaction_count],
def remove_metadata(json_array):
print(json_array)
print(type(json_array))
json_aux = []
for json_obj in json_array:
print(f'objeto{json_obj}')
json_aux.append({'role':json_obj["role"], 'content':json_obj["content"]})
return json_aux
# dont know the correct model beacuse it shuffles each time
#selected model it's only the index in radio input
def multiple_interact(query, models, selected_model, assistant_user_input): #, interaction_count)
#print(f'chat_checkbox: {selected_model}')
resp_list = []
#print(model_history)
if selected_model == "user_input":
user_dialog = [{'response': {'role': 'assistant', 'content': assistant_user_input}, 'model': 'user_input'}]
dialog = {
"context": remove_metadata(chat_history),
"assistant": user_dialog + list(chatbot_answser_list.values()),
"selected": "user_input",
}
chat_history.append({"role": "assistant", "content": assistant_user_input})
chat_history.append({"role": "user", "content": query})
else:
dialog = {
"context": remove_metadata(chat_history),
"assistant": list(chatbot_answser_list.values()),
"selected": None,
}
#chatbot_answser_list
#get the previous answer of the selected model
for index, model in enumerate(models):
if alphabet[index] == selected_model:
selected_model_history = chatbot_answser_list[selected_model]['response']
print(f"selected_model_history: {selected_model_history}")
chat_history.append(selected_model_history)
chat_history.append({"role": "user","content": query.strip()})
#si es la correcta guardarla
dialog["selected"] = chatbot_answser_list[selected_model]['model']
break
#APPE
all_answers.append(dialog)
#save to csv
selected_model_history = {} #reset history
#creo que no precisa
aux_history = remove_metadata(chat_history)
#print(aux_history)
#no es models es....
random.shuffle(active_models)
for index, model in enumerate(active_models):
if model in custom_model:
resp = local_model.interact(aux_history)
else:
resp = interact_groq(aux_history, model)
resp = {"role": "assistant", "content": resp.strip()}
chatbot_answser_list[alphabet[index]] = {'response': resp, 'model': model}
try:
print(resp)
resp_list.append(gr.Chatbot(value=[resp], visible=True, type='messages'))
except gr.exceptions.Error:
print(f"error for en modelo {model}")
rest = [model for model in model_list if model not in active_models]
for model in rest:
try:
resp_list.append(gr.Chatbot(type='messages', visible=False))
except gr.exceptions.Error:
print(f"error, else en modelo {model}")
resp_list.insert(0, gr.Chatbot(value=aux_history, type='messages'))
model_history.append(selected_model)
print(model_history)
return resp_list
# Function to load user guide from a file
def load_user_guide():
with open('user_guide.txt', 'r') as file:
return file.read()
def change_textbox(checkbox):
if checkbox == "user_input":
return gr.Textbox(placeholder="Type your message here...", label="Assistant input", visible=True)
else:
return gr.Textbox(value="", visible=False)
def change_checkbox(checkbox):
print(f'checkbox: {checkbox}')
#luego cuando sean variables
global active_models
active_models = checkbox
quant_models = len(checkbox)
words = [alphabet[i] for i in range(quant_models)]
checkbox = gr.Radio(label="Select Model to respond...", choices=words+["user_input"])
#checkbox = gr.Radio(label="Select Model to respond...", choices=checkbox+["user_input"])
return checkbox
def change_story(story_title, ret="gradio"):
for story in stories:
if story["title"] == story_title:
if ret== "gradio":
return gr.Textbox(label="Selected Story", lines=10, interactive=False, value=story["story"])
else: #"string"
return story["story"]
return gr.Textbox(label="Error", lines=10, interactive=False, value="Story title does not match.")
chatbot_list = []
model_list = list(all_models.keys())
active_models = []
#chatbot_answer_list['model'] = "respuesta aqui"
chatbot_answser_list = {}
all_answers = [] #save all answers of all chatbots
# Create the chat interface using Gradio Blocks
active_models = []
with gr.Blocks() as demo:
with gr.Tabs():
with gr.TabItem("Chat"):
gr.Markdown("# Demo Chatbot V3")
gr.Markdown("## Context")
with gr.Group():
model_dropdown = gr.Dropdown(choices=list(all_models.keys()), label="Select Models", value=model_list[0])
user_dropdown = gr.Dropdown(choices=user_names, label="Select User Name")
initial_story = stories[0]["title"] if stories else None
story_dropdown = gr.Dropdown(choices=[story["title"] for story in stories], label="Select Story", value=initial_story)
system_prompt_dropdown = gr.Dropdown(choices=system_prompts, label="Select System Prompt", value=system_prompts[0])
send_story_button = gr.Button("Send Story")
gr.Markdown("## Chat")
with gr.Group():
selected_story_textbox = gr.Textbox(label="Selected Story", lines=10, interactive=False)
chatbot_output = gr.Chatbot(label="Chat History", type='messages')
chatbot_input = gr.Textbox(placeholder="Type your message here...", label="User Input")
send_message_button = gr.Button("Send")
gr.Markdown("## Evaluation")
with gr.Group():
score_input = gr.Slider(minimum=0, maximum=5, step=1, label="Score")
comment_input = gr.Textbox(placeholder="Add a comment...", label="Comment")
save_button = gr.Button("Save Score and Comment")
data_table = gr.DataFrame(headers=["Chat History", "Score", "Comment"])
with gr.TabItem("User Guide"):
gr.Textbox(label="User Guide", value=load_user_guide(), lines=20)
with gr.TabItem("Multiple Evaluation"):
with gr.Group():
#model_dropdown = gr.Dropdown(choices=list(all_models.keys()), label="Select Model", value=default_model_name)
model_checkbox = gr.CheckboxGroup(choices=list(all_models.keys()), label="Select Model", value=None) #value=[default_model_name])
user_dropdown = gr.Dropdown(choices=user_names, label="Select User Name")
story_dropdown = gr.Dropdown(choices=[story["title"] for story in stories], label="Select Story", value=initial_story)
system_prompt_dropdown = gr.Dropdown(choices=system_prompts, label="Select System Prompt", value=system_prompts[0])
send_multiple_story_button = gr.Button("Send Story")
gr.Markdown("## Chat")
with gr.Group():
selected_story_textbox = gr.Textbox(label="Selected Story", lines=10, interactive=False, value=change_story(initial_story, "string"))
#aqui armar una ventana x cada modelo seleccionado
chatbot_list.append(gr.Chatbot(label="Chat History", type='messages'))
with gr.Row():
for i, model in enumerate(model_list):
label = f"Model {alphabet[i % len(alphabet)]}"
aux = gr.Chatbot(label=label, visible=False, type='messages')
chatbot_list.append(aux)
user_input = gr.Textbox(placeholder="Type your message here...", label="User Input")
#chat_radio = gr.Radio(choices=list(model_list)+["user_input"], label="Sent something to continue...", value=[model_list[0]])
chat_radio = gr.Radio(label="Select Model to respond...")
#elegir respuesta primero, luego enviar mensaje
assistant_user_input = gr.Textbox(interactive=True, show_copy_button=True, visible=False)
send_multiple_message_button = gr.Button("Send")
gr.Markdown("## Evaluation")
with gr.Group():
score_input = gr.Slider(minimum=0, maximum=5, step=1, label="Score")
comment_input = gr.Textbox(placeholder="Add a comment...", label="Comment")
save_button_multievaluation = gr.Button("Save Score and Comment")
data_table = gr.DataFrame(headers=["Chat History", "Score", "Comment"])
interaction_count = gr.Number(value=0, visible=False)
selected_model_array = gr.List(value=None, visible=False)
#input es las entradas a la funcion
#output es las salidas de la funcion? puede ser lo que se creo si
#send_story_button.click(fn=send_selected_story, inputs=[story_dropdown, model_dropdown, system_prompt_dropdown], outputs=[chatbot_output, chat_history_json, data_table, selected_story_textbox])
#send_message_button.click(fn=interact, inputs=[chatbot_input, chat_history_json, interaction_count, model_dropdown], outputs=[chatbot_input, chatbot_output, chat_history_json, interaction_count])
#save_button.click(fn=save_comment_score, inputs=[chatbot_output, score_input, comment_input, story_dropdown, user_dropdown, system_prompt_dropdown], outputs=[data_table, comment_input])
chat_radio.change(fn=change_textbox, inputs=chat_radio, outputs=assistant_user_input)
#al elegir modelo cambia el chat radio, setea los modelos elegidos
model_checkbox.input(fn=change_checkbox, inputs=model_checkbox, outputs=chat_radio)
story_dropdown.input(fn=change_story, inputs=[story_dropdown], outputs=selected_story_textbox)
send_multiple_story_button.click(
fn=send_multiple_selected_story,
inputs=[story_dropdown, model_checkbox, system_prompt_dropdown],
outputs=chatbot_list,
)
#Tengo que cambiar para que los modelos responan solo las respuestas y no todo el historial
#preciso las historias previas de cada una
#el modelo que se haya elegido
#aqui mando a solicitar...
#luego retorno:
#en
send_multiple_message_button.click(
fn=multiple_interact,
inputs=[user_input, model_checkbox, chat_radio, assistant_user_input],# interaction_count],
outputs=chatbot_list,
)
#quiza tenga que guardar una variable con los valores de los checkbox
save_button_multievaluation.click(
fn=save_comment_score,
inputs=[score_input, comment_input, story_dropdown, user_dropdown, system_prompt_dropdown, model_checkbox],
outputs=[data_table, comment_input])
demo.launch(share=False)
#demo.launch(share=True)