Update app.py
Browse files
app.py
CHANGED
@@ -25,7 +25,7 @@ def predict(message, history, temperature, max_tokens, top_p, top_k):
|
|
25 |
stop = StopOnTokens()
|
26 |
messages = "".join(["".join(["\n<|end|>\n<|user|>\n"+item[0], "\n<|end|>\n<|assistant|>\n"+item[1]]) for item in history_transformer_format])
|
27 |
model_inputs = tokenizer([messages], return_tensors="pt").to("cuda")
|
28 |
-
streamer = TextIteratorStreamer(tokenizer, timeout=
|
29 |
generate_kwargs = dict(
|
30 |
model_inputs,
|
31 |
streamer=streamer,
|
@@ -50,7 +50,8 @@ demo = gr.ChatInterface(
|
|
50 |
additional_inputs=[
|
51 |
gr.Slider(0.1, 0.9, value=0.7, label="Temperature"),
|
52 |
gr.Slider(512, 8192, value=4096, label="Max Tokens"),
|
53 |
-
gr.Slider(0.1, 0.9, value=0.7, label="top_p"
|
|
|
54 |
gr.Slider(10, 90, value=40, label="top_k"),
|
55 |
]
|
56 |
)
|
|
|
25 |
stop = StopOnTokens()
|
26 |
messages = "".join(["".join(["\n<|end|>\n<|user|>\n"+item[0], "\n<|end|>\n<|assistant|>\n"+item[1]]) for item in history_transformer_format])
|
27 |
model_inputs = tokenizer([messages], return_tensors="pt").to("cuda")
|
28 |
+
streamer = TextIteratorStreamer(tokenizer, timeout=300., skip_prompt=True, skip_special_tokens=True)
|
29 |
generate_kwargs = dict(
|
30 |
model_inputs,
|
31 |
streamer=streamer,
|
|
|
50 |
additional_inputs=[
|
51 |
gr.Slider(0.1, 0.9, value=0.7, label="Temperature"),
|
52 |
gr.Slider(512, 8192, value=4096, label="Max Tokens"),
|
53 |
+
gr.Slider(0.1, 0.9, value=0.7, label="top_p"
|
54 |
+
),
|
55 |
gr.Slider(10, 90, value=40, label="top_k"),
|
56 |
]
|
57 |
)
|