Cartinoe5930 commited on
Commit
4cb1f6b
·
1 Parent(s): a2e4026

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +53 -22
app.py CHANGED
@@ -1,46 +1,77 @@
1
  import gradio as gr
2
- import json
3
- import requests
4
- import openai
5
 
 
 
 
6
 
 
 
 
 
7
 
8
- def response_print(model_list, response_list):
9
- answer = ""
10
- cot = "CoT"
11
- none = "None"
12
- for idx in range(len(model_list)):
13
- answer = answer + f"# {model_list[idx]}'s response: {cot if response_list else none}\n"
14
- return answer
15
 
16
  TITLE = """<h1 align="center">LLM Agora 🗣️🏦</h1>"""
17
 
18
  INTRODUCTION_TEXT = """
19
- The **LLM Agora 🗣️🏦** aims to improve the quality of open-source LMs' responses through debate & revision introduced in [Improving Factuality and Reasoning in Language Models through Multiagent Debate](https://arxiv.org/abs/2305.14325).
20
 
21
  Do you know that? 🤔 **LLMs can also improve their responses by debating with other LLMs**! 😮 We applied this concept to several open-source LMs to verify that the open-source model, not the proprietary one, can sufficiently improve the response through discussion. 🤗
22
  For more details, please refer to the GitHub Repository below.
23
 
24
  You can use LLM Agora with your own questions if the response of open-source LM is not satisfactory and you want to improve the quality!
25
  The Math, GSM8K, and MMLU Tabs show the results of the experiment, and for inference, please use the 'Inference' tab.
26
-
27
  Please check the more specific information in [GitHub Repository](https://github.com/gauss5930/LLM-Agora)!
28
  """
29
 
 
 
30
  with gr.Blocks() as demo:
31
  gr.HTML(TITLE)
32
  gr.Markdown(INTRODUCTION_TEXT)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
 
34
- with gr.Tab("Inference"):
35
- gr.CheckboxGroup(["Llama2", "Alpaca", "Vicuna", "Koala", "Falcon", "Baize", "WizardLM", "Orca", "phi-1.5"], label="Model Selection", info="Choose 3 LMs to participate in LLM Agora."),
36
- gr.Checkbox(label="CoT", info="Do you want to use CoT for inference?")
37
-
38
- with gr.Tab("Math"):
39
- text
40
-
41
- with gr.Tab("GSM8K"):
 
 
 
 
 
 
42
 
43
- with gr.Tab("MMLU"):
 
 
 
 
 
44
 
 
45
 
46
- demo.launch()
 
1
  import gradio as gr
 
 
 
2
 
3
+ def inference(model_list, cot):
4
+ if len(model_list) != 3:
5
+ raise gr.Error("Please choose just '3' models! Neither more nor less!")
6
 
7
+ if cot:
8
+ b = "CoT was also used."
9
+ else:
10
+ b = ""
11
 
12
+ a = f"Hello, {model_list[0]}, {model_list[1]}, and {model_list[2]}!! {b}"
13
+ return {
14
+ output_msg: gr.update(visible=True),
15
+ output_col: gr.update(visible=True),
16
+ model1_output1: a
17
+ }
 
18
 
19
  TITLE = """<h1 align="center">LLM Agora 🗣️🏦</h1>"""
20
 
21
  INTRODUCTION_TEXT = """
22
+ The **LLM Agora** 🗣️🏦 aims to improve the quality of open-source LMs' responses through debate & revision introduced in [Improving Factuality and Reasoning in Language Models through Multiagent Debate](https://arxiv.org/abs/2305.14325).
23
 
24
  Do you know that? 🤔 **LLMs can also improve their responses by debating with other LLMs**! 😮 We applied this concept to several open-source LMs to verify that the open-source model, not the proprietary one, can sufficiently improve the response through discussion. 🤗
25
  For more details, please refer to the GitHub Repository below.
26
 
27
  You can use LLM Agora with your own questions if the response of open-source LM is not satisfactory and you want to improve the quality!
28
  The Math, GSM8K, and MMLU Tabs show the results of the experiment, and for inference, please use the 'Inference' tab.
 
29
  Please check the more specific information in [GitHub Repository](https://github.com/gauss5930/LLM-Agora)!
30
  """
31
 
32
+ RESPONSE_TEXT = """<h1 align="center">🤗 Here are the responses to each model!! 🤗</h1>"""
33
+
34
  with gr.Blocks() as demo:
35
  gr.HTML(TITLE)
36
  gr.Markdown(INTRODUCTION_TEXT)
37
+ with gr.Column():
38
+ with gr.Tab("Inference"):
39
+ with gr.Row():
40
+ with gr.Column():
41
+ model_list = gr.CheckboxGroup(["Llama2", "Alpaca", "Vicuna", "Koala", "Falcon", "Baize", "WizardLM", "Orca", "phi-1.5"], label="Model Selection", info="Choose 3 LMs to participate in LLM Agora.", type="value")
42
+ cot = gr.Checkbox(label="CoT", info="Do you want to use CoT for inference?")
43
+ with gr.Column():
44
+ API_KEY = gr.Textbox(label="OpenAI API Key", value="", info="Please fill in your OpenAI API token.", placeholder="sk..", type="password")
45
+ auth_token = gr.Textbox(label="Huggingface Authentication Token", value="", info="Please fill in your HuggingFace Authentication token.", placeholder="hf..", type="password")
46
+ with gr.Column():
47
+ question = gr.Textbox(value="", info="Please type your question!", placeholder="")
48
+ output = gr.Textbox()
49
+ submit = gr.Button("Submit")
50
+ with gr.Row(visible=False) as output_msg:
51
+ gr.HTML(RESPONSE_TEXT)
52
 
53
+ with gr.Row(visible=False) as output_col:
54
+ with gr.Column():
55
+ model1_output1 = gr.Textbox(label="1️⃣ model's initial response")
56
+ model1_output2 = gr.Textbox(label="1️⃣ model's revised response")
57
+ model1_output3 = gr.Textbox(label="1️⃣ model's final response")
58
+ with gr.Column():
59
+ model2_output1 = gr.Textbox(label="2️⃣ model's initial response")
60
+ model2_output2 = gr.Textbox(label="2️⃣ model's revised response")
61
+ model2_output3 = gr.Textbox(label="2️⃣ model's final response")
62
+ with gr.Column():
63
+ model2_output1 = gr.Textbox(label="3️⃣ model's initial response")
64
+ model2_output2 = gr.Textbox(label="3️⃣ model's revised response")
65
+ model2_output3 = gr.Textbox(label="3️⃣ model's final response")
66
+
67
 
68
+ with gr.Tab("Math"):
69
+ output_math = gr.Textbox()
70
+ with gr.Tab("GSM8K"):
71
+ output_gsm = gr.Textbox()
72
+ with gr.Tab("MMLU"):
73
+ output_mmlu = gr.Textbox()
74
 
75
+ submit.click(inference, [model_list], [output_msg, output_col, model1_output1])
76
 
77
+ demo.launch(debug=True)