Spaces:
Sleeping
Sleeping
Commit
·
94c2cbf
1
Parent(s):
a436de7
alternativa
Browse files
app.py
CHANGED
@@ -1,14 +1,15 @@
|
|
1 |
#https://discuss.huggingface.co/t/dynamical-flexible-output/18146/6
|
2 |
#https://github.com/gradio-app/gradio/issues/2066
|
3 |
import gradio as gr
|
|
|
4 |
import pandas as pd
|
5 |
from datetime import datetime, timedelta, timezone
|
6 |
-
from config import groq_token, groq_model, QUESTION_PROMPT, init_google_sheets_client, groq_model, default_model_name, user_names, google_sheets_name, AtlasClient, custom_model
|
7 |
import gspread
|
8 |
from groq import Client
|
9 |
import random, string, json, io
|
10 |
#from trash_folder.alter_app import Local_llm
|
11 |
-
from import_model import
|
12 |
import groq
|
13 |
import torch
|
14 |
print(groq.__version__)
|
@@ -21,7 +22,7 @@ system_prompts_sheet = sheet.worksheet("System Prompts")
|
|
21 |
|
22 |
# Combine both model dictionaries
|
23 |
all_models = {}
|
24 |
-
all_models.update(
|
25 |
if torch.cuda.is_available():
|
26 |
all_models.update(custom_model)
|
27 |
|
@@ -39,7 +40,7 @@ def randomize_key_order(aux):
|
|
39 |
alphabet = list(string.ascii_uppercase)
|
40 |
|
41 |
# Initialize GROQ client
|
42 |
-
groq_clinet = Client(api_key=groq_token)
|
43 |
|
44 |
# Load stories from Google Sheets
|
45 |
def load_stories():
|
@@ -141,25 +142,21 @@ def save_comment_score(score, comment, story_name, user_name, system_prompt, mod
|
|
141 |
|
142 |
|
143 |
|
144 |
-
|
145 |
-
client = OpenAI(
|
146 |
base_url="https://openrouter.ai/api/v1",
|
147 |
-
api_key="
|
148 |
)
|
149 |
-
def interact_openrouter(context, model_name):
|
150 |
-
completion = client.chat.completions.create(
|
151 |
-
model=model_name,
|
152 |
-
messages=context,
|
153 |
-
)
|
154 |
-
return completion.choices[0].message.content
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
|
159 |
|
160 |
# Function to handle interaction with model
|
161 |
def interact_groq(context, model_name):
|
162 |
-
chat_completion =
|
163 |
messages=context,
|
164 |
model=model_name,
|
165 |
temperature=0.1,
|
@@ -199,7 +196,7 @@ Here is the story:
|
|
199 |
del local_model
|
200 |
torch.cuda.empty_cache()
|
201 |
torch.cuda.synchronize() #ver si funciona este
|
202 |
-
local_model =
|
203 |
response = local_model.interact(chat_history)
|
204 |
else:
|
205 |
response = interact_groq(chat_history, model_name)
|
@@ -489,5 +486,5 @@ with gr.Blocks() as demo:
|
|
489 |
inputs=[score_input, comment_input, story_dropdown, user_dropdown, system_prompt_dropdown, model_checkbox],
|
490 |
outputs=[data_table, comment_input])
|
491 |
|
492 |
-
demo.launch(share=
|
493 |
#demo.launch(share=True)
|
|
|
1 |
#https://discuss.huggingface.co/t/dynamical-flexible-output/18146/6
|
2 |
#https://github.com/gradio-app/gradio/issues/2066
|
3 |
import gradio as gr
|
4 |
+
from openai import OpenAI
|
5 |
import pandas as pd
|
6 |
from datetime import datetime, timedelta, timezone
|
7 |
+
from config import groq_token, groq_model, QUESTION_PROMPT, init_google_sheets_client, groq_model, default_model_name, user_names, google_sheets_name, AtlasClient, custom_model, open_ruter_token, openai_model
|
8 |
import gspread
|
9 |
from groq import Client
|
10 |
import random, string, json, io
|
11 |
#from trash_folder.alter_app import Local_llm
|
12 |
+
from import_model import ModelLoader
|
13 |
import groq
|
14 |
import torch
|
15 |
print(groq.__version__)
|
|
|
22 |
|
23 |
# Combine both model dictionaries
|
24 |
all_models = {}
|
25 |
+
all_models.update(openai_model)
|
26 |
if torch.cuda.is_available():
|
27 |
all_models.update(custom_model)
|
28 |
|
|
|
40 |
alphabet = list(string.ascii_uppercase)
|
41 |
|
42 |
# Initialize GROQ client
|
43 |
+
#groq_clinet = Client(api_key=groq_token)
|
44 |
|
45 |
# Load stories from Google Sheets
|
46 |
def load_stories():
|
|
|
142 |
|
143 |
|
144 |
|
145 |
+
clinet = OpenAI(
|
|
|
146 |
base_url="https://openrouter.ai/api/v1",
|
147 |
+
api_key="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",
|
148 |
)
|
149 |
+
#def interact_openrouter(context, model_name):
|
150 |
+
# completion = client.chat.completions.create(
|
151 |
+
# model=model_name,
|
152 |
+
# messages=context,
|
153 |
+
# )
|
154 |
+
# return completion.choices[0].message.content
|
|
|
|
|
|
|
155 |
|
156 |
|
157 |
# Function to handle interaction with model
|
158 |
def interact_groq(context, model_name):
|
159 |
+
chat_completion = clinet.chat.completions.create(
|
160 |
messages=context,
|
161 |
model=model_name,
|
162 |
temperature=0.1,
|
|
|
196 |
del local_model
|
197 |
torch.cuda.empty_cache()
|
198 |
torch.cuda.synchronize() #ver si funciona este
|
199 |
+
local_model = ModelLoader(custom_model[model_name])
|
200 |
response = local_model.interact(chat_history)
|
201 |
else:
|
202 |
response = interact_groq(chat_history, model_name)
|
|
|
486 |
inputs=[score_input, comment_input, story_dropdown, user_dropdown, system_prompt_dropdown, model_checkbox],
|
487 |
outputs=[data_table, comment_input])
|
488 |
|
489 |
+
demo.launch(share=False)
|
490 |
#demo.launch(share=True)
|
config.py
CHANGED
@@ -8,6 +8,7 @@ hugging_face_token = os.getenv("HUGGING_FACE_TOKEN")
|
|
8 |
replicate_token = os.getenv("REPLICATE_TOKEN")
|
9 |
groq_token = os.getenv("GROQ_TOKEN")
|
10 |
atlas_token = os.getenv("ATLAS_TOKEN")
|
|
|
11 |
|
12 |
#atlas configuration
|
13 |
class AtlasClient:
|
|
|
8 |
replicate_token = os.getenv("REPLICATE_TOKEN")
|
9 |
groq_token = os.getenv("GROQ_TOKEN")
|
10 |
atlas_token = os.getenv("ATLAS_TOKEN")
|
11 |
+
open_ruter_token = os.getenv("OPEN_RUTER_TOKEN")
|
12 |
|
13 |
#atlas configuration
|
14 |
class AtlasClient:
|