Spaces:
Paused
Paused
add retry
Browse files
app.py
CHANGED
@@ -8,6 +8,8 @@ import os
|
|
8 |
openai.api_key = os.getenv('API_KEY')
|
9 |
openai.request_times = 0
|
10 |
|
|
|
|
|
11 |
def ask(question, history, behavior):
|
12 |
openai.request_times += 1
|
13 |
print(f"request times {openai.request_times}: {datetime.datetime.now()}: {question}")
|
@@ -30,6 +32,7 @@ def ask(question, history, behavior):
|
|
30 |
print(e)
|
31 |
response = 'Timeout! Please wait a few minutes and retry'
|
32 |
history = history + [question, response]
|
|
|
33 |
return history
|
34 |
|
35 |
def num_tokens_from_messages(messages, model="gpt-3.5-turbo"):
|
@@ -60,6 +63,15 @@ def forget_long_term(messages, max_num_tokens=4000):
|
|
60 |
messages = messages[1:]
|
61 |
return messages
|
62 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
63 |
|
64 |
import gradio as gr
|
65 |
|
@@ -83,9 +95,29 @@ def to_md(content):
|
|
83 |
|
84 |
|
85 |
def predict(question, history=[], behavior=[]):
|
|
|
|
|
86 |
history = ask(question, history, behavior)
|
87 |
response = [(to_md(history[i]),to_md(history[i+1])) for i in range(0,len(history)-1,2)]
|
88 |
-
return "", history, response
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
89 |
|
90 |
|
91 |
with gr.Blocks() as demo:
|
@@ -135,12 +167,15 @@ with gr.Blocks() as demo:
|
|
135 |
txt = gr.Textbox(show_label=False, placeholder="输入你想让ChatGPT回答的问题").style(container=False)
|
136 |
with gr.Row():
|
137 |
button_gen = gr.Button("Submit")
|
|
|
138 |
button_clr = gr.Button("Clear")
|
139 |
-
|
|
|
140 |
gr.Examples(examples=examples_bhv, inputs=bhv, label="Examples for setting behavior")
|
141 |
gr.Examples(examples=examples_txt, inputs=txt, label="Examples for asking question")
|
142 |
txt.submit(predict, [txt, state, behavior], [txt, state, chatbot])
|
143 |
-
button_gen.click(fn=predict, inputs=[txt, state, behavior], outputs=[txt, state, chatbot])
|
|
|
144 |
button_clr.click(fn=lambda :([],[]), inputs=None, outputs=[chatbot, state])
|
145 |
|
146 |
demo.launch()
|
|
|
8 |
openai.api_key = os.getenv('API_KEY')
|
9 |
openai.request_times = 0
|
10 |
|
11 |
+
all_dialogue = []
|
12 |
+
|
13 |
def ask(question, history, behavior):
|
14 |
openai.request_times += 1
|
15 |
print(f"request times {openai.request_times}: {datetime.datetime.now()}: {question}")
|
|
|
32 |
print(e)
|
33 |
response = 'Timeout! Please wait a few minutes and retry'
|
34 |
history = history + [question, response]
|
35 |
+
record_dialogue(history)
|
36 |
return history
|
37 |
|
38 |
def num_tokens_from_messages(messages, model="gpt-3.5-turbo"):
|
|
|
63 |
messages = messages[1:]
|
64 |
return messages
|
65 |
|
66 |
+
def record_dialogue(history):
|
67 |
+
dialogue = json.dumps(history, ensure_ascii=False)
|
68 |
+
for i in range(len(all_dialogue)):
|
69 |
+
if dialogue.startswith(all_dialogue[i]):
|
70 |
+
all_dialogue[i] = dialogue
|
71 |
+
return
|
72 |
+
all_dialogue.append(dialogue)
|
73 |
+
return
|
74 |
+
|
75 |
|
76 |
import gradio as gr
|
77 |
|
|
|
95 |
|
96 |
|
97 |
def predict(question, history=[], behavior=[]):
|
98 |
+
if question.startswith(f"{openai.api_key}:"):
|
99 |
+
return adminInstruct(question)
|
100 |
history = ask(question, history, behavior)
|
101 |
response = [(to_md(history[i]),to_md(history[i+1])) for i in range(0,len(history)-1,2)]
|
102 |
+
return "", history, response, None
|
103 |
+
|
104 |
+
|
105 |
+
def retry(question, history=[], behavior=[]):
|
106 |
+
if len(history)<2:
|
107 |
+
return "", history, [], None
|
108 |
+
question = history[-1]
|
109 |
+
history = history[:-2]
|
110 |
+
return predict(question, history, behavior)
|
111 |
+
|
112 |
+
|
113 |
+
def adminInstruct(question):
|
114 |
+
if "download-log" in question:
|
115 |
+
with open("./all_dialogue.jsonl", "w", encoding="utf-8") as f:
|
116 |
+
for dialogue in all_dialogue:
|
117 |
+
f.write(dialogue + "\n")
|
118 |
+
response = [(to_md(history[i]),to_md(history[i+1])) for i in range(0,len(history)-1,2)]
|
119 |
+
return "", history, response, gr.File.update(value="./all_dialogue.jsonl", visible=True)
|
120 |
+
return "", history, response, None
|
121 |
|
122 |
|
123 |
with gr.Blocks() as demo:
|
|
|
167 |
txt = gr.Textbox(show_label=False, placeholder="输入你想让ChatGPT回答的问题").style(container=False)
|
168 |
with gr.Row():
|
169 |
button_gen = gr.Button("Submit")
|
170 |
+
button_rtr = gr.Button("Retry")
|
171 |
button_clr = gr.Button("Clear")
|
172 |
+
|
173 |
+
downloadfile = gr.File(None, interactive=False, show_label=False, visible=False)
|
174 |
gr.Examples(examples=examples_bhv, inputs=bhv, label="Examples for setting behavior")
|
175 |
gr.Examples(examples=examples_txt, inputs=txt, label="Examples for asking question")
|
176 |
txt.submit(predict, [txt, state, behavior], [txt, state, chatbot])
|
177 |
+
button_gen.click(fn=predict, inputs=[txt, state, behavior], outputs=[txt, state, chatbot, downloadfile])
|
178 |
+
button_rtr.click(fn=retry, inputs=[txt, state, behavior], outputs=[txt, state, chatbot, downloadfile])
|
179 |
button_clr.click(fn=lambda :([],[]), inputs=None, outputs=[chatbot, state])
|
180 |
|
181 |
demo.launch()
|