mudabbirbhat commited on
Commit
1d4e276
Β·
verified Β·
1 Parent(s): 7343e6b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +26 -41
app.py CHANGED
@@ -1,12 +1,15 @@
 
1
  from threading import Thread
2
  from typing import Iterator
3
  import gradio as gr
 
4
  from transformers import AutoModelForCausalLM, AutoTokenizer
 
5
  from openai import OpenAI
6
- import requests, json
7
 
8
  MAX_MAX_NEW_TOKENS = 2048
9
  DEFAULT_MAX_NEW_TOKENS = 1024
 
10
  DEFAULT_SYSTEM_PROMPT = """\
11
  You are a helpful and joyous mental therapy assistant. Always answer as helpfully and cheerfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content.Please ensure that your responses are socially unbiased and positive in nature.\n\nIf a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.
12
  """
@@ -14,56 +17,32 @@ You are a helpful and joyous mental therapy assistant. Always answer as helpfull
14
  DESCRIPTION = """
15
  # LLama-2-Mental-Therapy-Chatbot
16
  """
 
17
 
18
- client = OpenAI(
19
- base_url="http://192.168.3.74:8080/v1",
20
- api_key="-"
21
- )
22
-
23
- def response_guard(text):
24
- url = 'http://192.168.3.74:6006/safety'
25
- data = {'message': text}
26
- response = requests.post(url, data=json.dumps(data), headers={'Content-Type': 'application/json'})
27
- if response.status_code == 200:
28
- result = response.json()
29
- return(result)
30
-
31
-
32
  def generate(
33
  message: str,
34
  chat_history: list[tuple[str, str]],
35
  system_prompt: str,
36
  max_new_tokens: int = 1024,
37
- temperature: float = 1,
38
  top_p: float = 0.9,
 
39
  ) -> Iterator[str]:
40
- llmGuardCheck = response_guard(message)
41
- if(llmGuardCheck != "safe"):
42
- raise gr.Error(llmGuardCheck)
43
- yield(llmGuardCheck)
44
  else:
45
- messages = []
46
  if system_prompt:
47
- messages.append({"role": "system", "content": system_prompt})
48
  for user, assistant in chat_history:
49
- messages.extend([{"role": "user", "content": user}, {"role": "assistant", "content": assistant}])
50
- messages.append({"role": "user", "content": message})
51
- chat_completion = client.chat.completions.create(
52
- model="tgi", messages=messages, stream=True,max_tokens=max_new_tokens,temperature=temperature,top_p=top_p
53
- )
54
- response = ""
55
- first_chunk = True
56
- for chunk in chat_completion:
57
- token = chunk.choices[0].delta.content
58
- if first_chunk:
59
- token= token.strip() ## the first token Has a leading space, due to some bug in TGI
60
- response += token
61
- yield response
62
- first_chunk = False
63
- else:
64
- if token!="</s>":
65
- response += token
66
- yield response
67
 
68
 
69
  chat_interface = gr.ChatInterface(
@@ -93,6 +72,13 @@ chat_interface = gr.ChatInterface(
93
  step=0.05,
94
  value=0.95,
95
  ),
 
 
 
 
 
 
 
96
  ],
97
  stop_btn="Stop",
98
  )
@@ -100,6 +86,5 @@ chat_interface = gr.ChatInterface(
100
  with gr.Blocks(css="style.css") as demo:
101
  gr.Markdown(DESCRIPTION)
102
  chat_interface.render()
103
-
104
  if __name__ == "__main__":
105
  demo.queue(max_size=20).launch()
 
1
+ import os
2
  from threading import Thread
3
  from typing import Iterator
4
  import gradio as gr
5
+ import torch
6
  from transformers import AutoModelForCausalLM, AutoTokenizer
7
+ from modelGuards.suicideModel import predictSuicide
8
  from openai import OpenAI
 
9
 
10
  MAX_MAX_NEW_TOKENS = 2048
11
  DEFAULT_MAX_NEW_TOKENS = 1024
12
+ MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
13
  DEFAULT_SYSTEM_PROMPT = """\
14
  You are a helpful and joyous mental therapy assistant. Always answer as helpfully and cheerfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content.Please ensure that your responses are socially unbiased and positive in nature.\n\nIf a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.
15
  """
 
17
  DESCRIPTION = """
18
  # LLama-2-Mental-Therapy-Chatbot
19
  """
20
+ LICENSE = "open-source"
21
 
22
+ from llamaModel.model import get_input_token_length, get_LLAMA_response_stream
 
 
 
 
 
 
 
 
 
 
 
 
 
23
  def generate(
24
  message: str,
25
  chat_history: list[tuple[str, str]],
26
  system_prompt: str,
27
  max_new_tokens: int = 1024,
28
+ temperature: float = 0.6,
29
  top_p: float = 0.9,
30
+ top_k: int = 50
31
  ) -> Iterator[str]:
32
+ if os.getenv("PREDICT_SUICIDE")=="True" and predict_suicide(message)=='suicide':
33
+ yield("I am sorry that you are feeling this way. You need a specialist help. Please consult a nearby doctor.")
 
 
34
  else:
35
+ conversation = []
36
  if system_prompt:
37
+ conversation.append({"role": "system", "content": system_prompt})
38
  for user, assistant in chat_history:
39
+ conversation.extend([{"role": "user", "content": user}, {"role": "assistant", "content": assistant}])
40
+ conversation.append({"role": "user", "content": message})
41
+ if(get_input_token_length(conversation) > MAX_INPUT_TOKEN_LENGTH):
42
+ raise gr.InterfaceError(f"The accumulated input is too long ({get_input_token_length(conversation)} > {MAX_INPUT_TOKEN_LENGTH}). Clear your chat history and try again.")
43
+ generator = get_LLAMA_response_stream(conversation, max_new_tokens, temperature, top_p, top_k)
44
+ for response in generator:
45
+ yield response
 
 
 
 
 
 
 
 
 
 
 
46
 
47
 
48
  chat_interface = gr.ChatInterface(
 
72
  step=0.05,
73
  value=0.95,
74
  ),
75
+ gr.Slider(
76
+ label="Top-k",
77
+ minimum=1,
78
+ maximum=1000,
79
+ step=1,
80
+ value=50,
81
+ ),
82
  ],
83
  stop_btn="Stop",
84
  )
 
86
  with gr.Blocks(css="style.css") as demo:
87
  gr.Markdown(DESCRIPTION)
88
  chat_interface.render()
 
89
  if __name__ == "__main__":
90
  demo.queue(max_size=20).launch()