gaurav2003 commited on
Commit
7f8e9ac
·
verified ·
1 Parent(s): d37f7e0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -5
app.py CHANGED
@@ -10,25 +10,27 @@ base_model = AutoModelForCausalLM.from_pretrained(
10
  device_map="auto"
11
  )
12
 
13
- # Load LoRA Adapter
14
  model = PeftModel.from_pretrained(base_model, "gaurav2003/room-service-chatbot")
15
 
16
- # Load tokenizer (from base model)
17
  tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-Instruct-v0.1")
18
  tokenizer.pad_token = tokenizer.eos_token
19
 
20
  def chat(user_input, history=[]):
21
  input_ids = tokenizer(user_input, return_tensors="pt", padding=True).input_ids.to(model.device)
22
- output = model.generate(input_ids, max_new_tokens=150)
23
  response = tokenizer.decode(output[0], skip_special_tokens=True)
24
  return response
25
 
26
  iface = gr.Interface(
27
  fn=chat,
28
- inputs=gr.Textbox(placeholder="Ask something..."),
29
  outputs="text",
30
- title="Room Service Chatbot"
 
31
  )
32
 
33
  if __name__ == "__main__":
34
  iface.launch()
 
 
10
  device_map="auto"
11
  )
12
 
13
+ # Load LoRA adapter
14
  model = PeftModel.from_pretrained(base_model, "gaurav2003/room-service-chatbot")
15
 
16
+ # Tokenizer
17
  tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-Instruct-v0.1")
18
  tokenizer.pad_token = tokenizer.eos_token
19
 
20
  def chat(user_input, history=[]):
21
  input_ids = tokenizer(user_input, return_tensors="pt", padding=True).input_ids.to(model.device)
22
+ output = model.generate(input_ids, max_new_tokens=150, do_sample=True, temperature=0.7)
23
  response = tokenizer.decode(output[0], skip_special_tokens=True)
24
  return response
25
 
26
  iface = gr.Interface(
27
  fn=chat,
28
+ inputs=gr.Textbox(placeholder="Ask something...", lines=2),
29
  outputs="text",
30
+ title="Room Service Chatbot",
31
+ description="Ask anything related to your stay or room service."
32
  )
33
 
34
  if __name__ == "__main__":
35
  iface.launch()
36
+