kevalfst commited on
Commit
796991d
·
verified ·
1 Parent(s): 58d5252

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -6
app.py CHANGED
@@ -2,20 +2,32 @@ from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
2
  import gradio as gr
3
  import torch
4
 
5
- model_id = "tiiuae/falcon-rw-1b" # small enough to run in Hugging Face Space
 
6
  tokenizer = AutoTokenizer.from_pretrained(model_id)
7
  model = AutoModelForCausalLM.from_pretrained(model_id)
8
- pipe = pipeline("text-generation", model=model, tokenizer=tokenizer, device=0 if torch.cuda.is_available() else -1)
 
 
 
 
 
9
 
10
  def chat(user_input, history):
11
  prompt = ""
12
  for user, bot in history:
13
  prompt += f"User: {user}\nBot: {bot}\n"
14
  prompt += f"User: {user_input}\nBot:"
15
-
16
- response = pipe(prompt, max_new_tokens=128, do_sample=True, temperature=0.7)[0]["generated_text"]
17
- reply = response.split("Bot:")[-1].strip()
 
18
  history.append((user_input, reply))
19
  return history, history
20
 
21
- gr.ChatInterface(chat, chatbot=gr.Chatbot(), title="Lightweight Chatbot").launch()
 
 
 
 
 
 
2
  import gradio as gr
3
  import torch
4
 
5
+ model_id = "tiiuae/falcon-rw-1b"
6
+
7
  tokenizer = AutoTokenizer.from_pretrained(model_id)
8
  model = AutoModelForCausalLM.from_pretrained(model_id)
9
+ pipe = pipeline(
10
+ "text-generation",
11
+ model=model,
12
+ tokenizer=tokenizer,
13
+ device=0 if torch.cuda.is_available() else -1,
14
+ )
15
 
16
  def chat(user_input, history):
17
  prompt = ""
18
  for user, bot in history:
19
  prompt += f"User: {user}\nBot: {bot}\n"
20
  prompt += f"User: {user_input}\nBot:"
21
+
22
+ result = pipe(prompt, max_new_tokens=64, do_sample=False)
23
+ output = result[0]["generated_text"]
24
+ reply = output.split("Bot:")[-1].strip()
25
  history.append((user_input, reply))
26
  return history, history
27
 
28
+ gr.ChatInterface(
29
+ fn=chat,
30
+ chatbot=gr.Chatbot(),
31
+ title="Tiny Falcon Chatbot",
32
+ theme="default",
33
+ ).launch()