hannantoprak naelghouti commited on
Commit
4ba9389
·
verified ·
1 Parent(s): de7d3aa

Update app.py (#2)

Browse files

- Update app.py (97048a68d6668d7e8d1b3e8e76a7642ce905a50b)


Co-authored-by: nadia elghouti <[email protected]>

Files changed (1) hide show
  1. app.py +32 -3
app.py CHANGED
@@ -1,9 +1,14 @@
 
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
- from fastapi.middleware.cors import CORSMiddleware
4
 
 
 
 
5
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
6
 
 
7
  def respond(
8
  message,
9
  history: list[tuple[str, str]],
@@ -13,9 +18,33 @@ def respond(
13
  top_p,
14
  ):
15
  messages = [{"role": "system", "content": system_message}]
16
-
17
- # ... (rest of your respond function remains the same)
18
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
  # Create Gradio app with queue enabled
20
  app = gr.ChatInterface(
21
  respond,
 
1
+ this is what i have in the app
2
+
3
  import gradio as gr
4
  from huggingface_hub import InferenceClient
 
5
 
6
+ """
7
+ For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
8
+ """
9
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
10
 
11
+
12
  def respond(
13
  message,
14
  history: list[tuple[str, str]],
 
18
  top_p,
19
  ):
20
  messages = [{"role": "system", "content": system_message}]
 
 
21
 
22
+ for val in history:
23
+ if val[0]:
24
+ messages.append({"role": "user", "content": val[0]})
25
+ if val[1]:
26
+ messages.append({"role": "assistant", "content": val[1]})
27
+
28
+ messages.append({"role": "user", "content": message})
29
+
30
+ response = ""
31
+
32
+ for message in client.chat_completion(
33
+ messages,
34
+ max_tokens=max_tokens,
35
+ stream=True,
36
+ temperature=temperature,
37
+ top_p=top_p,
38
+ ):
39
+ token = message.choices[0].delta.content
40
+
41
+ response += token
42
+ yield response
43
+
44
+
45
+ """
46
+ For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
47
+ """
48
  # Create Gradio app with queue enabled
49
  app = gr.ChatInterface(
50
  respond,