YashDave commited on
Commit
3abd1c3
·
verified ·
1 Parent(s): 5772735

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -37
app.py CHANGED
@@ -1,6 +1,6 @@
1
  import streamlit as st
2
  import random
3
- from app_config import SYSTEM_PROMPT, NLP_MODEL_NAME, NUMBER_OF_VECTORS_FOR_RAG, NLP_MODEL_TEMPERATURE, NLP_MODEL_MAX_TOKENS, VECTOR_MAX_TOKENS,my_vector_store,chat,tiktoken_len
4
  from langchain.memory import ConversationSummaryBufferMemory
5
  from langchain_core.messages import SystemMessage, HumanMessage, AIMessage
6
  from langchain.chains.summarize import load_summarize_chain
@@ -9,37 +9,29 @@ from langchain_groq import ChatGroq
9
  from dotenv import load_dotenv
10
  from pathlib import Path
11
  import os
 
12
  env_path = Path('.') / '.env'
13
  load_dotenv(dotenv_path=env_path)
14
 
15
- def response_generator(prompt: str) -> str:
16
- """this function can be used for general quetion answers which are related to tyrex and tyre recycling
17
-
18
- Args:
19
- prompt (string): user query
20
-
21
- Returns:
22
- string: answer of the query
23
- """
24
 
 
25
  try:
26
- retriever = st.session_state.retriever
27
  docs = retriever.invoke(prompt)
28
  my_context = [doc.page_content for doc in docs]
29
  my_context = '\n\n'.join(my_context)
30
- system_message = SystemMessage(content = SYSTEM_PROMPT.format(context=my_context, previous_message_summary=st.session_state.rag_memory.moving_summary_buffer))
31
  print(system_message)
32
  chat_messages = (system_message + st.session_state.rag_memory.chat_memory.messages + HumanMessage(content=prompt)).messages
33
  print("total tokens: ", tiktoken_len(str(chat_messages)))
34
- # print("my_context*********",my_context)
35
- response = st.session_state.llm.invoke(chat_messages)
36
  return response.content
37
-
38
  except Exception as error:
39
  print(error, "ERROR")
40
  return "Oops! something went wrong, please try again."
41
 
42
-
43
  st.markdown(
44
  """
45
  <style>
@@ -52,30 +44,16 @@ st.markdown(
52
  unsafe_allow_html=True,
53
  )
54
 
55
- # When user gives input
56
-
57
- print("SYSTEM MESSAGE")
58
  if "messages" not in st.session_state:
59
- st.session_state.messages=[{"role": "system", "content": SYSTEM_PROMPT}]
60
-
61
- print("SYSTEM MODEL")
62
- if "llm" not in st.session_state:
63
- st.session_state.llm = ChatGroq(temperature=NLP_MODEL_TEMPERATURE, groq_api_key=str(os.getenv('GROQ_API_KEY')), model_name=NLP_MODEL_NAME)
64
-
65
- print("rag")
66
  if "rag_memory" not in st.session_state:
67
- st.session_state.rag_memory = ConversationSummaryBufferMemory(llm=st.session_state.llm, max_token_limit= 5000)
68
-
69
- print("retrival")
70
  if "retriever" not in st.session_state:
71
- # vector_store = get_vectorstore_with_doc_from_pdf('GPT OUTPUT.pdf')
72
- st.session_state.retriever = my_vector_store.as_retriever(k=NUMBER_OF_VECTORS_FOR_RAG)
73
-
74
 
75
  st.title("Insurance Bot")
76
- print("container")
77
- # Display chat messages from history
78
- container = st.container(height=600)
79
  for message in st.session_state.messages:
80
  if message["role"] != "system":
81
  with container.chat_message(message["role"]):
@@ -84,7 +62,7 @@ for message in st.session_state.messages:
84
  if prompt := st.chat_input("Enter your query here... "):
85
  with container.chat_message("user"):
86
  st.write(prompt)
87
- st.session_state.messages.append({"role":"user" , "content":prompt})
88
 
89
  with container.chat_message("assistant"):
90
  response = response_generator(prompt=prompt)
@@ -94,4 +72,4 @@ if prompt := st.chat_input("Enter your query here... "):
94
 
95
  print("Response is:", response)
96
  st.session_state.rag_memory.save_context({'input': prompt}, {'output': response})
97
- st.session_state.messages.append({"role":"assistant" , "content":response})
 
1
  import streamlit as st
2
  import random
3
+ from app_config import SYSTEM_PROMPT, NLP_MODEL_NAME, NUMBER_OF_VECTORS_FOR_RAG, NLP_MODEL_TEMPERATURE, NLP_MODEL_MAX_TOKENS, VECTOR_MAX_TOKENS, my_vector_store, chat, tiktoken_len
4
  from langchain.memory import ConversationSummaryBufferMemory
5
  from langchain_core.messages import SystemMessage, HumanMessage, AIMessage
6
  from langchain.chains.summarize import load_summarize_chain
 
9
  from dotenv import load_dotenv
10
  from pathlib import Path
11
  import os
12
+
13
  env_path = Path('.') / '.env'
14
  load_dotenv(dotenv_path=env_path)
15
 
16
+ # Initialize vector store and LLM outside session state
17
+ retriever = my_vector_store.as_retriever(k=NUMBER_OF_VECTORS_FOR_RAG)
18
+ llm = ChatGroq(temperature=NLP_MODEL_TEMPERATURE, groq_api_key=str(os.getenv('GROQ_API_KEY')), model_name=NLP_MODEL_NAME)
 
 
 
 
 
 
19
 
20
+ def response_generator(prompt: str) -> str:
21
  try:
 
22
  docs = retriever.invoke(prompt)
23
  my_context = [doc.page_content for doc in docs]
24
  my_context = '\n\n'.join(my_context)
25
+ system_message = SystemMessage(content=SYSTEM_PROMPT.format(context=my_context, previous_message_summary=st.session_state.rag_memory.moving_summary_buffer))
26
  print(system_message)
27
  chat_messages = (system_message + st.session_state.rag_memory.chat_memory.messages + HumanMessage(content=prompt)).messages
28
  print("total tokens: ", tiktoken_len(str(chat_messages)))
29
+ response = llm.invoke(chat_messages)
 
30
  return response.content
 
31
  except Exception as error:
32
  print(error, "ERROR")
33
  return "Oops! something went wrong, please try again."
34
 
 
35
  st.markdown(
36
  """
37
  <style>
 
44
  unsafe_allow_html=True,
45
  )
46
 
47
+ # Initialize session state
 
 
48
  if "messages" not in st.session_state:
49
+ st.session_state.messages = [{"role": "system", "content": SYSTEM_PROMPT}]
 
 
 
 
 
 
50
  if "rag_memory" not in st.session_state:
51
+ st.session_state.rag_memory = ConversationSummaryBufferMemory(llm=llm, max_token_limit=5000)
 
 
52
  if "retriever" not in st.session_state:
53
+ st.session_state.retriever = retriever
 
 
54
 
55
  st.title("Insurance Bot")
56
+ container = st.container(height=600)
 
 
57
  for message in st.session_state.messages:
58
  if message["role"] != "system":
59
  with container.chat_message(message["role"]):
 
62
  if prompt := st.chat_input("Enter your query here... "):
63
  with container.chat_message("user"):
64
  st.write(prompt)
65
+ st.session_state.messages.append({"role": "user", "content": prompt})
66
 
67
  with container.chat_message("assistant"):
68
  response = response_generator(prompt=prompt)
 
72
 
73
  print("Response is:", response)
74
  st.session_state.rag_memory.save_context({'input': prompt}, {'output': response})
75
+ st.session_state.messages.append({"role": "assistant", "content": response})