Spaces:
Paused
Paused
limit context length to 1500
Browse files
app.py
CHANGED
@@ -25,10 +25,12 @@ def ask(question, history, behavior):
|
|
25 |
for i,content in enumerate(history + [question])
|
26 |
]
|
27 |
length_messages = num_tokens_from_messages(messages)
|
|
|
28 |
time_penalty = (length_messages-1000)//10
|
29 |
if time_penalty>0:
|
30 |
print(f"sleep for {time_penalty:.2f}s for too long a quest: {length_messages}")
|
31 |
time.sleep(time_penalty)
|
|
|
32 |
response = openai.ChatCompletion.create(
|
33 |
model="gpt-3.5-turbo",
|
34 |
messages=forget_long_term(messages)
|
@@ -62,7 +64,7 @@ def num_tokens_from_messages(messages, model="gpt-3.5-turbo"):
|
|
62 |
raise NotImplementedError(f"""num_tokens_from_messages() is not presently implemented for model {model}.
|
63 |
See https://github.com/openai/openai-python/blob/main/chatml.md for information on how messages are converted to tokens.""")
|
64 |
|
65 |
-
def forget_long_term(messages, max_num_tokens=
|
66 |
while num_tokens_from_messages(messages)>max_num_tokens:
|
67 |
if messages[0]["role"]=="system" and not len(messages[0]["content"]>=max_num_tokens):
|
68 |
messages = messages[:1] + messages[2:]
|
@@ -194,5 +196,5 @@ with gr.Blocks() as demo:
|
|
194 |
button_rtr.click(fn=retry, inputs=[txt, state, behavior], outputs=[txt, state, chatbot, downloadfile])
|
195 |
button_clr.click(fn=lambda :([],[]), inputs=None, outputs=[chatbot, state])
|
196 |
|
197 |
-
demo.queue(concurrency_count=3, max_size=10)
|
198 |
demo.launch()
|
|
|
25 |
for i,content in enumerate(history + [question])
|
26 |
]
|
27 |
length_messages = num_tokens_from_messages(messages)
|
28 |
+
"""
|
29 |
time_penalty = (length_messages-1000)//10
|
30 |
if time_penalty>0:
|
31 |
print(f"sleep for {time_penalty:.2f}s for too long a quest: {length_messages}")
|
32 |
time.sleep(time_penalty)
|
33 |
+
"""
|
34 |
response = openai.ChatCompletion.create(
|
35 |
model="gpt-3.5-turbo",
|
36 |
messages=forget_long_term(messages)
|
|
|
64 |
raise NotImplementedError(f"""num_tokens_from_messages() is not presently implemented for model {model}.
|
65 |
See https://github.com/openai/openai-python/blob/main/chatml.md for information on how messages are converted to tokens.""")
|
66 |
|
67 |
+
def forget_long_term(messages, max_num_tokens=1500):
|
68 |
while num_tokens_from_messages(messages)>max_num_tokens:
|
69 |
if messages[0]["role"]=="system" and not len(messages[0]["content"]>=max_num_tokens):
|
70 |
messages = messages[:1] + messages[2:]
|
|
|
196 |
button_rtr.click(fn=retry, inputs=[txt, state, behavior], outputs=[txt, state, chatbot, downloadfile])
|
197 |
button_clr.click(fn=lambda :([],[]), inputs=None, outputs=[chatbot, state])
|
198 |
|
199 |
+
#demo.queue(concurrency_count=3, max_size=10)
|
200 |
demo.launch()
|