Update app.py
Browse files
app.py
CHANGED
@@ -2,20 +2,32 @@ from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
|
|
2 |
import gradio as gr
|
3 |
import torch
|
4 |
|
5 |
-
model_id = "tiiuae/falcon-rw-1b"
|
|
|
6 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
7 |
model = AutoModelForCausalLM.from_pretrained(model_id)
|
8 |
-
pipe = pipeline(
|
|
|
|
|
|
|
|
|
|
|
9 |
|
10 |
def chat(user_input, history):
|
11 |
prompt = ""
|
12 |
for user, bot in history:
|
13 |
prompt += f"User: {user}\nBot: {bot}\n"
|
14 |
prompt += f"User: {user_input}\nBot:"
|
15 |
-
|
16 |
-
|
17 |
-
|
|
|
18 |
history.append((user_input, reply))
|
19 |
return history, history
|
20 |
|
21 |
-
gr.ChatInterface(
|
|
|
|
|
|
|
|
|
|
|
|
2 |
import gradio as gr
|
3 |
import torch
|
4 |
|
5 |
+
model_id = "tiiuae/falcon-rw-1b"
|
6 |
+
|
7 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
8 |
model = AutoModelForCausalLM.from_pretrained(model_id)
|
9 |
+
pipe = pipeline(
|
10 |
+
"text-generation",
|
11 |
+
model=model,
|
12 |
+
tokenizer=tokenizer,
|
13 |
+
device=0 if torch.cuda.is_available() else -1,
|
14 |
+
)
|
15 |
|
16 |
def chat(user_input, history):
|
17 |
prompt = ""
|
18 |
for user, bot in history:
|
19 |
prompt += f"User: {user}\nBot: {bot}\n"
|
20 |
prompt += f"User: {user_input}\nBot:"
|
21 |
+
|
22 |
+
result = pipe(prompt, max_new_tokens=64, do_sample=False)
|
23 |
+
output = result[0]["generated_text"]
|
24 |
+
reply = output.split("Bot:")[-1].strip()
|
25 |
history.append((user_input, reply))
|
26 |
return history, history
|
27 |
|
28 |
+
gr.ChatInterface(
|
29 |
+
fn=chat,
|
30 |
+
chatbot=gr.Chatbot(),
|
31 |
+
title="Tiny Falcon Chatbot",
|
32 |
+
theme="default",
|
33 |
+
).launch()
|