Cartinoe5930 commited on
Commit
68ebf3b
1 Parent(s): fb8863f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +20 -5
app.py CHANGED
@@ -5,6 +5,8 @@ import os
5
  from model_inference import Inference
6
  HF_TOKEN = os.environ.get("HF_TOKEN")
7
 
 
 
8
  model_list = ["llama", "llama-chat", "vicuna", "falcon", "falcon-instruct", "orca", "wizardlm"]
9
 
10
  with open("src/inference_endpoint.json", "r") as f:
@@ -13,6 +15,16 @@ with open("src/inference_endpoint.json", "r") as f:
13
  for i in range(len(model_list)):
14
  inference_endpoint[model_list[i]]["headers"]["Authorization"] += HF_TOKEN
15
 
 
 
 
 
 
 
 
 
 
 
16
  def warmup(model_list=model_list, model_inference_endpoints=inference_endpoint):
17
  for i in range(len(model_list)):
18
  API_URL = model_inference_endpoints[model_list[i]["API_URL"]]
@@ -81,9 +93,9 @@ def load_questions(math, gsm):
81
  gsm_questions = []
82
  # mmlu_questions = []
83
  for i in range(100):
84
- math_questions.append(f"{i+1}. " + math[i]["question"])
85
- gsm_questions.append(f"{i+1}. " + gsm[i]["question"])
86
- # mmlu_questions.append(f"{i+1}. " + mmlu[i]["question"])
87
 
88
  return math_questions, gsm_questions#, mmlu_questions
89
 
@@ -91,6 +103,9 @@ math_result, gsm_result = load_responses()
91
 
92
  math_questions, gsm_questions = load_questions(math_result, gsm_result)
93
 
 
 
 
94
  TITLE = """<h1 align="center">LLM Agora 🗣️🏦</h1>"""
95
 
96
  INTRODUCTION_TEXT = """
@@ -194,11 +209,11 @@ with gr.Blocks() as demo:
194
 
195
  with gr.Tab("Math"):
196
  math_cot = gr.Checkbox(label="CoT", info="If you want to see CoT result, please check the box.")
197
- math_question_list = gr.Dropdown(math_questions, label="Math Question", every=0.1, type="index")
198
 
199
  with gr.Column():
200
  with gr.Row(elem_id="model1_response"):
201
- math_model1_output1 = gr.Textbox(label="Llama2🦙's 1️⃣st response", value=math_result[int(math_question_list)]["agent_response"]["llama"][0]) # value=math_result[math_question_list]["agent_response"]["llama"][0]
202
  math_model2_output1 = gr.Textbox(label="WizardLM🧙‍♂️'s 1️⃣st response")
203
  math_model3_output1 = gr.Textbox(label="Orca🐬's 1️⃣st response")
204
  math_summarization_text1 = gr.Textbox(lebel="Summarization 1️⃣")
 
5
  from model_inference import Inference
6
  HF_TOKEN = os.environ.get("HF_TOKEN")
7
 
8
+ question_selector_map = {}
9
+
10
  model_list = ["llama", "llama-chat", "vicuna", "falcon", "falcon-instruct", "orca", "wizardlm"]
11
 
12
  with open("src/inference_endpoint.json", "r") as f:
 
15
  for i in range(len(model_list)):
16
  inference_endpoint[model_list[i]]["headers"]["Authorization"] += HF_TOKEN
17
 
18
+ def build_question_selector_map(questions):
19
+ question_selector_map = {}
20
+
21
+ # Build question selector map
22
+ for q in questions:
23
+ preview = f"{q['question_id']+1}: " + q["question"][:128] + "..."
24
+ question_selector_map[preview] = q
25
+
26
+ return question_selector_map
27
+
28
  def warmup(model_list=model_list, model_inference_endpoints=inference_endpoint):
29
  for i in range(len(model_list)):
30
  API_URL = model_inference_endpoints[model_list[i]["API_URL"]]
 
93
  gsm_questions = []
94
  # mmlu_questions = []
95
  for i in range(100):
96
+ math_questions.append(f"{i+1}: " + math[i]["question"][:128] + "...")
97
+ gsm_questions.append(f"{i+1}: " + gsm[i]["question"][:128] + "...")
98
+ # mmlu_questions.append(f"{i+1}: " + mmlu[i]["question"][:128] + "...")
99
 
100
  return math_questions, gsm_questions#, mmlu_questions
101
 
 
103
 
104
  math_questions, gsm_questions = load_questions(math_result, gsm_result)
105
 
106
+ math_question_selector_map = build_question_selector_map(math_result)
107
+ gsm_question_selector_map = build_question_selector_map(gsm_result)
108
+
109
  TITLE = """<h1 align="center">LLM Agora 🗣️🏦</h1>"""
110
 
111
  INTRODUCTION_TEXT = """
 
209
 
210
  with gr.Tab("Math"):
211
  math_cot = gr.Checkbox(label="CoT", info="If you want to see CoT result, please check the box.")
212
+ math_question_list = gr.Dropdown(math_questions, value=math_questions[0], label="Math Question", every=0.1)
213
 
214
  with gr.Column():
215
  with gr.Row(elem_id="model1_response"):
216
+ math_model1_output1 = gr.Textbox(label="Llama2🦙's 1️⃣st response") # value=math_result[math_question_list]["agent_response"]["llama"][0]
217
  math_model2_output1 = gr.Textbox(label="WizardLM🧙‍♂️'s 1️⃣st response")
218
  math_model3_output1 = gr.Textbox(label="Orca🐬's 1️⃣st response")
219
  math_summarization_text1 = gr.Textbox(lebel="Summarization 1️⃣")