Spaces:
Runtime error
Runtime error
Cartinoe5930
commited on
Commit
•
50f0def
1
Parent(s):
897a9d9
Update app.py
Browse files
app.py
CHANGED
@@ -50,9 +50,8 @@ def mmlu_display_question_answer(question, cot, request: gr.Request):
|
|
50 |
|
51 |
return q["agent_response"]["llama"][0], q["agent_response"]["wizardlm"][0], q["agent_response"]["orca"][0], q["summarization"][0], q["agent_response"]["llama"][1], q["agent_response"]["wizardlm"][1], q["agent_response"]["orca"][1], q["summarization"][1], q["agent_response"]["llama"][2], q["agent_response"]["wizardlm"][2], q["agent_response"]["orca"][2]
|
52 |
|
53 |
-
|
54 |
-
|
55 |
-
for model in model_list:
|
56 |
API_URL = model_inference_endpoints[model]["API_URL"]
|
57 |
headers = model_inference_endpoints[model]["headers"]
|
58 |
headers["Authorization"] += HF_TOKEN
|
@@ -66,6 +65,7 @@ def warmup(model_list=warmup_test, model_inference_endpoints=inference_endpoint)
|
|
66 |
|
67 |
time.sleep(1)
|
68 |
return {
|
|
|
69 |
options: gr.update(visible=True),
|
70 |
inputbox: gr.update(visible=True),
|
71 |
submit: gr.update(visible=True),
|
@@ -157,8 +157,8 @@ The Math, GSM8K, and MMLU Tabs show the results of the experiment(Llama2, Wizard
|
|
157 |
|
158 |
Here's how to use LLM Agora!
|
159 |
|
160 |
-
1. Before starting, click the 'Warm-up LLM Agora 🔥' button and wait until 'LLM Agora
|
161 |
-
2.
|
162 |
3. Check the CoT box if you want to utilize the Chain-of-Thought while inferencing.
|
163 |
4. Please fill in your OpenAI API KEY, it will be used to use ChatGPT to summarize the responses.
|
164 |
5. Type your question in the Question box and click the 'Submit' button! If you do so, LLM Agora will show you improved answers! 🤗 (It will take roughly a minute! Please wait for an answer!)
|
@@ -218,11 +218,11 @@ with gr.Blocks() as demo:
|
|
218 |
gr.Markdown(INTRODUCTION_TEXT)
|
219 |
with gr.Column():
|
220 |
with gr.Tab("Inference"):
|
|
|
221 |
warmup_button = gr.Button("Warm-up LLM Agora 🔥", visible=True)
|
222 |
welcome_message = gr.HTML(WELCOME_TEXT, visible=False)
|
223 |
with gr.Row(visible=False) as options:
|
224 |
with gr.Column():
|
225 |
-
model_list = gr.CheckboxGroup(["Llama2", "Llama2-Chat", "Vicuna", "Falcon", "Falcon-Instruct", "WizardLM", "Orca"], label="Model Selection", info="Choose 3 LMs to participate in LLM Agora.", type="value")
|
226 |
cot = gr.Checkbox(label="CoT", info="Do you want to use CoT for inference?")
|
227 |
with gr.Column() as inputbox:
|
228 |
API_KEY = gr.Textbox(label="OpenAI API Key", value="", info="Please fill in your OpenAI API token.", placeholder="sk..", type="password")
|
@@ -357,7 +357,5 @@ with gr.Blocks() as demo:
|
|
357 |
with gr.Accordion("※ Specific information about LLM Agora", open=False):
|
358 |
gr.Markdown(SPECIFIC_INFORMATION)
|
359 |
|
360 |
-
warmup_button.click(warmup, [], [options, inputbox, submit, warmup_button, welcome_message])
|
361 |
submit.click(inference, [model_list, question, API_KEY, cot], [output_msg, output_col, model1_output1, model2_output1, model3_output1, summarization_text1, model1_output2, model2_output2, model3_output2, summarization_text2, model1_output3, model2_output3, model3_output3])
|
362 |
-
|
363 |
-
demo.launch()
|
|
|
50 |
|
51 |
return q["agent_response"]["llama"][0], q["agent_response"]["wizardlm"][0], q["agent_response"]["orca"][0], q["summarization"][0], q["agent_response"]["llama"][1], q["agent_response"]["wizardlm"][1], q["agent_response"]["orca"][1], q["summarization"][1], q["agent_response"]["llama"][2], q["agent_response"]["wizardlm"][2], q["agent_response"]["orca"][2]
|
52 |
|
53 |
+
def warmup(list_model, model_inference_endpoints=inference_endpoint):
|
54 |
+
for model in list_model:
|
|
|
55 |
API_URL = model_inference_endpoints[model]["API_URL"]
|
56 |
headers = model_inference_endpoints[model]["headers"]
|
57 |
headers["Authorization"] += HF_TOKEN
|
|
|
65 |
|
66 |
time.sleep(1)
|
67 |
return {
|
68 |
+
model_list: gr.update(visible=False)
|
69 |
options: gr.update(visible=True),
|
70 |
inputbox: gr.update(visible=True),
|
71 |
submit: gr.update(visible=True),
|
|
|
157 |
|
158 |
Here's how to use LLM Agora!
|
159 |
|
160 |
+
1. Before starting, choose just 3 models and click the 'Warm-up LLM Agora 🔥' button and wait until '🤗🔥 Welcome to LLM Agora 🔥🤗' appears. (Suggest to go grab a coffee☕ since it takes 5 minutes!)
|
161 |
+
2. Once the interaction space is available, proceed with the following process.
|
162 |
3. Check the CoT box if you want to utilize the Chain-of-Thought while inferencing.
|
163 |
4. Please fill in your OpenAI API KEY, it will be used to use ChatGPT to summarize the responses.
|
164 |
5. Type your question in the Question box and click the 'Submit' button! If you do so, LLM Agora will show you improved answers! 🤗 (It will take roughly a minute! Please wait for an answer!)
|
|
|
218 |
gr.Markdown(INTRODUCTION_TEXT)
|
219 |
with gr.Column():
|
220 |
with gr.Tab("Inference"):
|
221 |
+
model_list = gr.CheckboxGroup(["Llama2", "Llama2-Chat", "Vicuna", "Falcon", "Falcon-Instruct", "WizardLM", "Orca"], label="Model Selection", info="Choose 3 LMs to participate in LLM Agora.", type="value", visible=True)
|
222 |
warmup_button = gr.Button("Warm-up LLM Agora 🔥", visible=True)
|
223 |
welcome_message = gr.HTML(WELCOME_TEXT, visible=False)
|
224 |
with gr.Row(visible=False) as options:
|
225 |
with gr.Column():
|
|
|
226 |
cot = gr.Checkbox(label="CoT", info="Do you want to use CoT for inference?")
|
227 |
with gr.Column() as inputbox:
|
228 |
API_KEY = gr.Textbox(label="OpenAI API Key", value="", info="Please fill in your OpenAI API token.", placeholder="sk..", type="password")
|
|
|
357 |
with gr.Accordion("※ Specific information about LLM Agora", open=False):
|
358 |
gr.Markdown(SPECIFIC_INFORMATION)
|
359 |
|
360 |
+
warmup_button.click(warmup, [model_list], [model_list, options, inputbox, submit, warmup_button, welcome_message])
|
361 |
submit.click(inference, [model_list, question, API_KEY, cot], [output_msg, output_col, model1_output1, model2_output1, model3_output1, summarization_text1, model1_output2, model2_output2, model3_output2, summarization_text2, model1_output3, model2_output3, model3_output3])
|
|
|
|