Tejasw1 commited on
Commit
7f35abf
Β·
1 Parent(s): a94467d

Upload folder using huggingface_hub

Browse files
__pycache__/gradio_app.cpython-311.pyc CHANGED
Binary files a/__pycache__/gradio_app.cpython-311.pyc and b/__pycache__/gradio_app.cpython-311.pyc differ
 
gradio_app.py CHANGED
@@ -15,6 +15,7 @@ from langchain.chat_models import ChatOpenAI
15
  from langchain.embeddings import HuggingFaceBgeEmbeddings
16
  from langchain.llms import HuggingFaceTextGenInference, OpenAI
17
  from langchain.prompts import PromptTemplate
 
18
  from langchain.tools import tool
19
  from langchain.vectorstores import FAISS
20
  from pydantic import BaseModel, Field
@@ -74,11 +75,13 @@ def load_chain():
74
  # "Searches and returns documents regarding Indian law. Accepts query as a string. For example: 'Section 298 of Indian Penal Code'."
75
  # )
76
  tools = [search]
77
- llm = ChatOpenAI(openai_api_base='http://20.83.177.108:8080/v1',
78
  openai_api_key='none',)
79
 
80
  conv_agent_executor = create_conversational_retrieval_agent(
81
  llm, tools, verbose=False,
 
 
82
  )
83
  return conv_agent_executor
84
 
@@ -102,14 +105,20 @@ with gr.Blocks() as demo:
102
  def respond(history):
103
  print('message is', history[-1])
104
  bot_message = chain({'input': history[-1][0]})['output']
105
- history[-1][1] = ""
106
- for character in bot_message:
107
- history[-1][1] += character
108
- time.sleep(0.0)
109
- yield history
110
 
 
 
 
 
 
 
 
 
 
 
 
111
  msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False
112
  ).then(respond, chatbot, chatbot)
113
 
114
  if __name__ == "__main__":
115
- demo.queue(max_size=32).launch(enable_queue=True)
 
15
  from langchain.embeddings import HuggingFaceBgeEmbeddings
16
  from langchain.llms import HuggingFaceTextGenInference, OpenAI
17
  from langchain.prompts import PromptTemplate
18
+ from langchain.schema.messages import SystemMessage
19
  from langchain.tools import tool
20
  from langchain.vectorstores import FAISS
21
  from pydantic import BaseModel, Field
 
75
  # "Searches and returns documents regarding Indian law. Accepts query as a string. For example: 'Section 298 of Indian Penal Code'."
76
  # )
77
  tools = [search]
78
+ llm = ChatOpenAI(openai_api_base='http://20.124.240.6:8080/v1',
79
  openai_api_key='none',)
80
 
81
  conv_agent_executor = create_conversational_retrieval_agent(
82
  llm, tools, verbose=False,
83
+ system_message=SystemMessage(
84
+ content="Your name is Votum, an expert legal assistant with extensive knowledge about Indian law. Your task is to respond to the given query in a factually correct and concise manner unless asked for a detailed explanation. Feel free to use any tools available to look up relevant information, only if necessary")
85
  )
86
  return conv_agent_executor
87
 
 
105
  def respond(history):
106
  print('message is', history[-1])
107
  bot_message = chain({'input': history[-1][0]})['output']
 
 
 
 
 
108
 
109
+ if 'Final answer:' in bot_message:
110
+ bot_message = bot_message.split('Final answer:')[-1]
111
+
112
+ history[-1][1] = bot_message
113
+ # for character in bot_message:
114
+ # history[-1][1] += character
115
+ # time.sleep(0.0)
116
+ # yield history
117
+ return history
118
+
119
+ clear.click(chain.memory.clear(),)
120
  msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False
121
  ).then(respond, chatbot, chatbot)
122
 
123
  if __name__ == "__main__":
124
+ demo.queue(max_size=32).launch()
langchain_qwen.ipynb CHANGED
@@ -63,9 +63,10 @@
63
  "outputs": [],
64
  "source": [
65
  "from langchain.chat_models import ChatOpenAI\n",
 
66
  "\n",
67
- "llm = ChatOpenAI(openai_api_base='http://20.83.177.108:8080/v1',\n",
68
- " openai_api_key='none',)\n"
69
  ]
70
  },
71
  {
@@ -86,7 +87,6 @@
86
  "from langchain.agents.agent_toolkits import create_conversational_retrieval_agent\n",
87
  "from langchain.agents.openai_functions_agent.agent_token_buffer_memory import AgentTokenBufferMemory\n",
88
  "from langchain.agents.openai_functions_agent.base import OpenAIFunctionsAgent\n",
89
- "from langchain.schema.messages import SystemMessage\n",
90
  "from langchain.prompts import MessagesPlaceholder\n",
91
  "from langchain.chains import ConversationalRetrievalChain\n",
92
  "from langchain.memory import ConversationBufferMemory\n",
@@ -130,23 +130,18 @@
130
  "\n",
131
  "\n",
132
  "\n",
 
 
 
133
  "tool = create_retriever_tool(\n",
134
  " retriever,\n",
135
  " \"search_legal_sections\",\n",
136
- " \"Searches and returns documents regarding Indian legal acts and sections.\"\n",
 
137
  ")\n",
138
  "tools = [tool]\n"
139
  ]
140
  },
141
- {
142
- "cell_type": "code",
143
- "execution_count": null,
144
- "metadata": {},
145
- "outputs": [],
146
- "source": [
147
- "print(type(tool),type(search))"
148
- ]
149
- },
150
  {
151
  "cell_type": "markdown",
152
  "metadata": {},
@@ -180,25 +175,13 @@
180
  },
181
  {
182
  "cell_type": "code",
183
- "execution_count": 111,
184
  "metadata": {},
185
- "outputs": [
186
- {
187
- "ename": "NameError",
188
- "evalue": "name 'HuggingFaceTextGenInference' is not defined",
189
- "output_type": "error",
190
- "traceback": [
191
- "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
192
- "\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)",
193
- "\u001b[1;32m/Users/tejasw/Downloads/scraper-law/votum-gradio/langchain_qwen.ipynb Cell 12\u001b[0m line \u001b[0;36m1\n\u001b[0;32m----> <a href='vscode-notebook-cell:/Users/tejasw/Downloads/scraper-law/votum-gradio/langchain_qwen.ipynb#X41sZmlsZQ%3D%3D?line=0'>1</a>\u001b[0m llm \u001b[39m=\u001b[39m HuggingFaceTextGenInference(\n\u001b[1;32m <a href='vscode-notebook-cell:/Users/tejasw/Downloads/scraper-law/votum-gradio/langchain_qwen.ipynb#X41sZmlsZQ%3D%3D?line=1'>2</a>\u001b[0m inference_server_url\u001b[39m=\u001b[39m\u001b[39m\"\u001b[39m\u001b[39mhttp://20.83.177.108:8080/\u001b[39m\u001b[39m\"\u001b[39m,\n\u001b[1;32m <a href='vscode-notebook-cell:/Users/tejasw/Downloads/scraper-law/votum-gradio/langchain_qwen.ipynb#X41sZmlsZQ%3D%3D?line=2'>3</a>\u001b[0m max_new_tokens\u001b[39m=\u001b[39m\u001b[39m512\u001b[39m,\n\u001b[1;32m <a href='vscode-notebook-cell:/Users/tejasw/Downloads/scraper-law/votum-gradio/langchain_qwen.ipynb#X41sZmlsZQ%3D%3D?line=3'>4</a>\u001b[0m top_k\u001b[39m=\u001b[39m\u001b[39m10\u001b[39m,\n\u001b[1;32m <a href='vscode-notebook-cell:/Users/tejasw/Downloads/scraper-law/votum-gradio/langchain_qwen.ipynb#X41sZmlsZQ%3D%3D?line=4'>5</a>\u001b[0m top_p\u001b[39m=\u001b[39m\u001b[39m0.95\u001b[39m,\n\u001b[1;32m <a href='vscode-notebook-cell:/Users/tejasw/Downloads/scraper-law/votum-gradio/langchain_qwen.ipynb#X41sZmlsZQ%3D%3D?line=5'>6</a>\u001b[0m typical_p\u001b[39m=\u001b[39m\u001b[39m0.95\u001b[39m,\n\u001b[1;32m <a href='vscode-notebook-cell:/Users/tejasw/Downloads/scraper-law/votum-gradio/langchain_qwen.ipynb#X41sZmlsZQ%3D%3D?line=6'>7</a>\u001b[0m temperature\u001b[39m=\u001b[39m\u001b[39m0.6\u001b[39m,\n\u001b[1;32m <a href='vscode-notebook-cell:/Users/tejasw/Downloads/scraper-law/votum-gradio/langchain_qwen.ipynb#X41sZmlsZQ%3D%3D?line=7'>8</a>\u001b[0m \u001b[39m# repetition_penalty=1.1,\u001b[39;00m\n\u001b[1;32m <a href='vscode-notebook-cell:/Users/tejasw/Downloads/scraper-law/votum-gradio/langchain_qwen.ipynb#X41sZmlsZQ%3D%3D?line=8'>9</a>\u001b[0m )\n",
194
- "\u001b[0;31mNameError\u001b[0m: name 'HuggingFaceTextGenInference' is not defined"
195
- ]
196
- }
197
- ],
198
  "source": [
199
  "from langchain.llms import HuggingFaceTextGenInference\n",
200
  "\n",
201
- "llm = HuggingFaceTextGenInference(\n",
202
  " inference_server_url=\"http://20.83.177.108:8080/\",\n",
203
  " max_new_tokens=2000,\n",
204
  " # top_k=10,\n",
@@ -217,6 +200,10 @@
217
  "source": [
218
  "from langchain.agents import initialize_agent, Tool\n",
219
  "from langchain.agents import AgentType\n",
 
 
 
 
220
  "\n",
221
  "\n",
222
  "\n",
@@ -227,8 +214,13 @@
227
  " verbose=True,\n",
228
  ")\n",
229
  "\n",
 
 
 
 
 
230
  "conv_agent_executor = create_conversational_retrieval_agent(\n",
231
- " llm, tools, verbose=True,\n",
232
  ")\n"
233
  ]
234
  },
@@ -240,8 +232,9 @@
240
  "source": [
241
  "# {'input': 'How is section 308 of Indian Penal Code different from section 299?'}\n",
242
  "conv_agent_executor(\n",
243
- " {'input': 'Sorry i meant 299.'}\n",
244
- " )\n"
 
245
  ]
246
  },
247
  {
@@ -253,21 +246,18 @@
253
  },
254
  {
255
  "cell_type": "code",
256
- "execution_count": 110,
257
  "metadata": {},
258
  "outputs": [
259
  {
260
- "ename": "ValidationError",
261
- "evalue": "1 validation error for OpenAI\n__root__\n Did not find openai_api_key, please add an environment variable `OPENAI_API_KEY` which contains it, or pass `openai_api_key` as a named parameter. (type=value_error)",
262
  "output_type": "error",
263
  "traceback": [
264
  "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
265
- "\u001b[0;31mValidationError\u001b[0m Traceback (most recent call last)",
266
- "\u001b[1;32m/Users/tejasw/Downloads/scraper-law/votum-gradio/langchain_qwen.ipynb Cell 15\u001b[0m line \u001b[0;36m8\n\u001b[1;32m <a href='vscode-notebook-cell:/Users/tejasw/Downloads/scraper-law/votum-gradio/langchain_qwen.ipynb#X40sZmlsZQ%3D%3D?line=2'>3</a>\u001b[0m \u001b[39mimport\u001b[39;00m \u001b[39mlangchain\u001b[39;00m\n\u001b[1;32m <a href='vscode-notebook-cell:/Users/tejasw/Downloads/scraper-law/votum-gradio/langchain_qwen.ipynb#X40sZmlsZQ%3D%3D?line=4'>5</a>\u001b[0m langchain\u001b[39m.\u001b[39mverbose \u001b[39m=\u001b[39m \u001b[39mTrue\u001b[39;00m\n\u001b[0;32m----> <a href='vscode-notebook-cell:/Users/tejasw/Downloads/scraper-law/votum-gradio/langchain_qwen.ipynb#X40sZmlsZQ%3D%3D?line=7'>8</a>\u001b[0m flare \u001b[39m=\u001b[39m FlareChain\u001b[39m.\u001b[39;49mfrom_llm(\n\u001b[1;32m <a href='vscode-notebook-cell:/Users/tejasw/Downloads/scraper-law/votum-gradio/langchain_qwen.ipynb#X40sZmlsZQ%3D%3D?line=8'>9</a>\u001b[0m ChatOpenAI(openai_api_base\u001b[39m=\u001b[39;49m\u001b[39m'\u001b[39;49m\u001b[39mhttp://20.83.177.108:8080/v1\u001b[39;49m\u001b[39m'\u001b[39;49m,\n\u001b[1;32m <a href='vscode-notebook-cell:/Users/tejasw/Downloads/scraper-law/votum-gradio/langchain_qwen.ipynb#X40sZmlsZQ%3D%3D?line=9'>10</a>\u001b[0m openai_api_key\u001b[39m=\u001b[39;49m\u001b[39m'\u001b[39;49m\u001b[39mnone\u001b[39;49m\u001b[39m'\u001b[39;49m),\n\u001b[1;32m <a href='vscode-notebook-cell:/Users/tejasw/Downloads/scraper-law/votum-gradio/langchain_qwen.ipynb#X40sZmlsZQ%3D%3D?line=10'>11</a>\u001b[0m retriever\u001b[39m=\u001b[39;49mretriever,\n\u001b[1;32m <a href='vscode-notebook-cell:/Users/tejasw/Downloads/scraper-law/votum-gradio/langchain_qwen.ipynb#X40sZmlsZQ%3D%3D?line=11'>12</a>\u001b[0m max_generation_len\u001b[39m=\u001b[39;49m\u001b[39m164\u001b[39;49m,\n\u001b[1;32m <a href='vscode-notebook-cell:/Users/tejasw/Downloads/scraper-law/votum-gradio/langchain_qwen.ipynb#X40sZmlsZQ%3D%3D?line=12'>13</a>\u001b[0m min_prob\u001b[39m=\u001b[39;49m\u001b[39m0.3\u001b[39;49m,\n\u001b[1;32m <a href='vscode-notebook-cell:/Users/tejasw/Downloads/scraper-law/votum-gradio/langchain_qwen.ipynb#X40sZmlsZQ%3D%3D?line=13'>14</a>\u001b[0m )\n\u001b[1;32m <a href='vscode-notebook-cell:/Users/tejasw/Downloads/scraper-law/votum-gradio/langchain_qwen.ipynb#X40sZmlsZQ%3D%3D?line=15'>16</a>\u001b[0m query \u001b[39m=\u001b[39m \u001b[39m\"\u001b[39m\u001b[39mexplain in great detail the difference between the langchain framework and baby agi\u001b[39m\u001b[39m\"\u001b[39m\n",
267
- "File \u001b[0;32m~/.pyenv/versions/3.11.2/lib/python3.11/site-packages/langchain/chains/flare/base.py:249\u001b[0m, in \u001b[0;36mFlareChain.from_llm\u001b[0;34m(cls, llm, max_generation_len, **kwargs)\u001b[0m\n\u001b[1;32m 238\u001b[0m \u001b[39m\u001b[39m\u001b[39m\"\"\"Creates a FlareChain from a language model.\u001b[39;00m\n\u001b[1;32m 239\u001b[0m \n\u001b[1;32m 240\u001b[0m \u001b[39mArgs:\u001b[39;00m\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 246\u001b[0m \u001b[39m FlareChain class with the given language model.\u001b[39;00m\n\u001b[1;32m 247\u001b[0m \u001b[39m\"\"\"\u001b[39;00m\n\u001b[1;32m 248\u001b[0m question_gen_chain \u001b[39m=\u001b[39m QuestionGeneratorChain(llm\u001b[39m=\u001b[39mllm)\n\u001b[0;32m--> 249\u001b[0m response_llm \u001b[39m=\u001b[39m OpenAI(\n\u001b[1;32m 250\u001b[0m max_tokens\u001b[39m=\u001b[39;49mmax_generation_len, model_kwargs\u001b[39m=\u001b[39;49m{\u001b[39m\"\u001b[39;49m\u001b[39mlogprobs\u001b[39;49m\u001b[39m\"\u001b[39;49m: \u001b[39m1\u001b[39;49m}, temperature\u001b[39m=\u001b[39;49m\u001b[39m0\u001b[39;49m\n\u001b[1;32m 251\u001b[0m )\n\u001b[1;32m 252\u001b[0m response_chain \u001b[39m=\u001b[39m _OpenAIResponseChain(llm\u001b[39m=\u001b[39mresponse_llm)\n\u001b[1;32m 253\u001b[0m \u001b[39mreturn\u001b[39;00m \u001b[39mcls\u001b[39m(\n\u001b[1;32m 254\u001b[0m question_generator_chain\u001b[39m=\u001b[39mquestion_gen_chain,\n\u001b[1;32m 255\u001b[0m response_chain\u001b[39m=\u001b[39mresponse_chain,\n\u001b[1;32m 256\u001b[0m \u001b[39m*\u001b[39m\u001b[39m*\u001b[39mkwargs,\n\u001b[1;32m 257\u001b[0m )\n",
268
- "File \u001b[0;32m~/.pyenv/versions/3.11.2/lib/python3.11/site-packages/langchain/load/serializable.py:75\u001b[0m, in \u001b[0;36mSerializable.__init__\u001b[0;34m(self, **kwargs)\u001b[0m\n\u001b[1;32m 74\u001b[0m \u001b[39mdef\u001b[39;00m \u001b[39m__init__\u001b[39m(\u001b[39mself\u001b[39m, \u001b[39m*\u001b[39m\u001b[39m*\u001b[39mkwargs: Any) \u001b[39m-\u001b[39m\u001b[39m>\u001b[39m \u001b[39mNone\u001b[39;00m:\n\u001b[0;32m---> 75\u001b[0m \u001b[39msuper\u001b[39;49m()\u001b[39m.\u001b[39;49m\u001b[39m__init__\u001b[39;49m(\u001b[39m*\u001b[39;49m\u001b[39m*\u001b[39;49mkwargs)\n\u001b[1;32m 76\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_lc_kwargs \u001b[39m=\u001b[39m kwargs\n",
269
- "File \u001b[0;32m~/.pyenv/versions/3.11.2/lib/python3.11/site-packages/pydantic/main.py:342\u001b[0m, in \u001b[0;36mpydantic.main.BaseModel.__init__\u001b[0;34m()\u001b[0m\n",
270
- "\u001b[0;31mValidationError\u001b[0m: 1 validation error for OpenAI\n__root__\n Did not find openai_api_key, please add an environment variable `OPENAI_API_KEY` which contains it, or pass `openai_api_key` as a named parameter. (type=value_error)"
271
  ]
272
  }
273
  ],
@@ -279,15 +269,15 @@
279
  "langchain.verbose = True\n",
280
  "\n",
281
  "\n",
282
- "# flare = FlareChain.from_llm(\n",
283
- "# llm,\n",
284
- "# retriever=retriever,\n",
285
- "# max_generation_len=164,\n",
286
- "# min_prob=0.3,\n",
287
- "# )\n",
288
  "\n",
289
  "query = \"explain in great detail the difference between the langchain framework and baby agi\"\n",
290
- "print(llm)\n"
291
  ]
292
  },
293
  {
@@ -335,6 +325,9 @@
335
  "from langchain.agents import initialize_agent, Tool\n",
336
  "from langchain.agents import AgentType\n",
337
  "from langchain.agents.react.base import DocstoreExplorer\n",
 
 
 
338
  "\n",
339
  "\n",
340
  "docstore = DocstoreExplorer(vectordb)\n",
@@ -352,9 +345,67 @@
352
  "]\n",
353
  "\n",
354
  "\n",
355
- "react = initialize_agent(\n",
356
  " tools, llm, agent=AgentType.REACT_DOCSTORE, verbose=True)\n",
357
  "\n",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
358
  "\n"
359
  ]
360
  },
 
63
  "outputs": [],
64
  "source": [
65
  "from langchain.chat_models import ChatOpenAI\n",
66
+ "from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n",
67
  "\n",
68
+ "ChatOpenAI(openai_api_base='http://20.124.240.6:8080/v1',\n",
69
+ " openai_api_key='none', callbacks=[StreamingStdOutCallbackHandler()], streaming=True,)\n"
70
  ]
71
  },
72
  {
 
87
  "from langchain.agents.agent_toolkits import create_conversational_retrieval_agent\n",
88
  "from langchain.agents.openai_functions_agent.agent_token_buffer_memory import AgentTokenBufferMemory\n",
89
  "from langchain.agents.openai_functions_agent.base import OpenAIFunctionsAgent\n",
 
90
  "from langchain.prompts import MessagesPlaceholder\n",
91
  "from langchain.chains import ConversationalRetrievalChain\n",
92
  "from langchain.memory import ConversationBufferMemory\n",
 
130
  "\n",
131
  "\n",
132
  "\n",
133
+ "\n",
134
+ "\n",
135
+ "\n",
136
  "tool = create_retriever_tool(\n",
137
  " retriever,\n",
138
  " \"search_legal_sections\",\n",
139
+ " \"Searches and returns documents regarding Indian law. Accept query as a string. For example: 'Section 298 of Indian Penal Code'.\",\n",
140
+ " \n",
141
  ")\n",
142
  "tools = [tool]\n"
143
  ]
144
  },
 
 
 
 
 
 
 
 
 
145
  {
146
  "cell_type": "markdown",
147
  "metadata": {},
 
175
  },
176
  {
177
  "cell_type": "code",
178
+ "execution_count": null,
179
  "metadata": {},
180
+ "outputs": [],
 
 
 
 
 
 
 
 
 
 
 
 
181
  "source": [
182
  "from langchain.llms import HuggingFaceTextGenInference\n",
183
  "\n",
184
+ "llm_hg = HuggingFaceTextGenInference(\n",
185
  " inference_server_url=\"http://20.83.177.108:8080/\",\n",
186
  " max_new_tokens=2000,\n",
187
  " # top_k=10,\n",
 
200
  "source": [
201
  "from langchain.agents import initialize_agent, Tool\n",
202
  "from langchain.agents import AgentType\n",
203
+ "from langchain.schema.messages import SystemMessage\n",
204
+ "import langchain\n",
205
+ "\n",
206
+ "langchain.verbose = True\n",
207
  "\n",
208
  "\n",
209
  "\n",
 
214
  " verbose=True,\n",
215
  ")\n",
216
  "\n",
217
+ "agent_kwargs = {\n",
218
+ " \"extra_prompt_messages\": [MessagesPlaceholder(variable_name=\"memory\")],\n",
219
+ " \"system_message\": SystemMessage(content=\"Your name is Votum, an expert legal assistant with extensive knowledge about Indian law. Your task is to respond to the given query in a factually correct and concise manner unless asked for a detailed explanation.\"),\n",
220
+ "}\n",
221
+ "\n",
222
  "conv_agent_executor = create_conversational_retrieval_agent(\n",
223
+ " llm, [search], verbose=False, agent_kwargs=agent_kwargs, \n",
224
  ")\n"
225
  ]
226
  },
 
232
  "source": [
233
  "# {'input': 'How is section 308 of Indian Penal Code different from section 299?'}\n",
234
  "conv_agent_executor(\n",
235
+ " {'input': 'Explain sections related to medical negligence.'}\n",
236
+ " )\n",
237
+ "\n"
238
  ]
239
  },
240
  {
 
246
  },
247
  {
248
  "cell_type": "code",
249
+ "execution_count": 9,
250
  "metadata": {},
251
  "outputs": [
252
  {
253
+ "ename": "NameError",
254
+ "evalue": "name 'retriever' is not defined",
255
  "output_type": "error",
256
  "traceback": [
257
  "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
258
+ "\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)",
259
+ "\u001b[1;32m/Users/tejasw/Downloads/scraper-law/votum-gradio/langchain_qwen.ipynb Cell 15\u001b[0m line \u001b[0;36m1\n\u001b[1;32m <a href='vscode-notebook-cell:/Users/tejasw/Downloads/scraper-law/votum-gradio/langchain_qwen.ipynb#X20sZmlsZQ%3D%3D?line=2'>3</a>\u001b[0m \u001b[39mimport\u001b[39;00m \u001b[39mlangchain\u001b[39;00m\n\u001b[1;32m <a href='vscode-notebook-cell:/Users/tejasw/Downloads/scraper-law/votum-gradio/langchain_qwen.ipynb#X20sZmlsZQ%3D%3D?line=4'>5</a>\u001b[0m langchain\u001b[39m.\u001b[39mverbose \u001b[39m=\u001b[39m \u001b[39mTrue\u001b[39;00m\n\u001b[1;32m <a href='vscode-notebook-cell:/Users/tejasw/Downloads/scraper-law/votum-gradio/langchain_qwen.ipynb#X20sZmlsZQ%3D%3D?line=7'>8</a>\u001b[0m flare \u001b[39m=\u001b[39m FlareChain\u001b[39m.\u001b[39mfrom_llm(\n\u001b[1;32m <a href='vscode-notebook-cell:/Users/tejasw/Downloads/scraper-law/votum-gradio/langchain_qwen.ipynb#X20sZmlsZQ%3D%3D?line=8'>9</a>\u001b[0m llm_n,\n\u001b[0;32m---> <a href='vscode-notebook-cell:/Users/tejasw/Downloads/scraper-law/votum-gradio/langchain_qwen.ipynb#X20sZmlsZQ%3D%3D?line=9'>10</a>\u001b[0m retriever\u001b[39m=\u001b[39mretriever,\n\u001b[1;32m <a href='vscode-notebook-cell:/Users/tejasw/Downloads/scraper-law/votum-gradio/langchain_qwen.ipynb#X20sZmlsZQ%3D%3D?line=10'>11</a>\u001b[0m max_generation_len\u001b[39m=\u001b[39m\u001b[39m164\u001b[39m,\n\u001b[1;32m <a href='vscode-notebook-cell:/Users/tejasw/Downloads/scraper-law/votum-gradio/langchain_qwen.ipynb#X20sZmlsZQ%3D%3D?line=11'>12</a>\u001b[0m min_prob\u001b[39m=\u001b[39m\u001b[39m0.3\u001b[39m,\n\u001b[1;32m <a href='vscode-notebook-cell:/Users/tejasw/Downloads/scraper-law/votum-gradio/langchain_qwen.ipynb#X20sZmlsZQ%3D%3D?line=12'>13</a>\u001b[0m )\n\u001b[1;32m <a href='vscode-notebook-cell:/Users/tejasw/Downloads/scraper-law/votum-gradio/langchain_qwen.ipynb#X20sZmlsZQ%3D%3D?line=14'>15</a>\u001b[0m query \u001b[39m=\u001b[39m \u001b[39m\"\u001b[39m\u001b[39mexplain in great detail the difference between the langchain framework and baby agi\u001b[39m\u001b[39m\"\u001b[39m\n\u001b[1;32m <a href='vscode-notebook-cell:/Users/tejasw/Downloads/scraper-law/votum-gradio/langchain_qwen.ipynb#X20sZmlsZQ%3D%3D?line=15'>16</a>\u001b[0m flare\u001b[39m.\u001b[39mrun(query)\n",
260
+ "\u001b[0;31mNameError\u001b[0m: name 'retriever' is not defined"
 
 
 
261
  ]
262
  }
263
  ],
 
269
  "langchain.verbose = True\n",
270
  "\n",
271
  "\n",
272
+ "flare = FlareChain.from_llm(\n",
273
+ " llm_n,\n",
274
+ " retriever=retriever,\n",
275
+ " max_generation_len=164,\n",
276
+ " min_prob=0.3,\n",
277
+ ")\n",
278
  "\n",
279
  "query = \"explain in great detail the difference between the langchain framework and baby agi\"\n",
280
+ "flare.run(query)\n"
281
  ]
282
  },
283
  {
 
325
  "from langchain.agents import initialize_agent, Tool\n",
326
  "from langchain.agents import AgentType\n",
327
  "from langchain.agents.react.base import DocstoreExplorer\n",
328
+ "import langchain\n",
329
+ "\n",
330
+ "langchain.verbose = True\n",
331
  "\n",
332
  "\n",
333
  "docstore = DocstoreExplorer(vectordb)\n",
 
345
  "]\n",
346
  "\n",
347
  "\n",
348
+ "react_docstore = initialize_agent(\n",
349
  " tools, llm, agent=AgentType.REACT_DOCSTORE, verbose=True)\n",
350
  "\n",
351
+ "react_docstore.run('hi')\n",
352
+ "\n"
353
+ ]
354
+ },
355
+ {
356
+ "cell_type": "code",
357
+ "execution_count": 7,
358
+ "metadata": {},
359
+ "outputs": [
360
+ {
361
+ "name": "stdout",
362
+ "output_type": "stream",
363
+ "text": [
364
+ "I am a large language model created by Alibaba Cloud. I am called QianWen."
365
+ ]
366
+ },
367
+ {
368
+ "data": {
369
+ "text/plain": [
370
+ "AIMessageChunk(content='I am a large language model created by Alibaba Cloud. I am called QianWen.')"
371
+ ]
372
+ },
373
+ "execution_count": 7,
374
+ "metadata": {},
375
+ "output_type": "execute_result"
376
+ }
377
+ ],
378
+ "source": [
379
+ "# from langchain.chat_models import ChatOpenAI\n",
380
+ "# from langchain.document_loaders import TextLoader\n",
381
+ "# from langchain.embeddings import OpenAIEmbeddings\n",
382
+ "# from langchain.indexes import VectorstoreIndexCreator\n",
383
+ "# from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n",
384
+ "\n",
385
+ "# from langchain.schema import HumanMessage\n",
386
+ "\n",
387
+ "\n",
388
+ "# llm_n = ChatOpenAI(openai_api_base='http://20.124.240.6:8080/v1',\n",
389
+ "# openai_api_key='none', callbacks=[StreamingStdOutCallbackHandler()],streaming=True,)\n",
390
+ "\n",
391
+ "# questions = [\n",
392
+ "# \"Who is the speaker\",\n",
393
+ "# \"What did the president say about Ketanji Brown Jackson\",\n",
394
+ "# \"What are the threats to America\",\n",
395
+ "# \"Who are mentioned in the speech\",\n",
396
+ "# \"Who is the vice president\",\n",
397
+ "# \"How many projects were announced\",\n",
398
+ "# ]\n",
399
+ "\n",
400
+ "\n",
401
+ "# llm_n(\n",
402
+ "# [\n",
403
+ "# HumanMessage(\n",
404
+ "# content=\"What model are you?\"\n",
405
+ "# )\n",
406
+ "# ]\n",
407
+ "# )\n",
408
+ "\n",
409
  "\n"
410
  ]
411
  },
langchain_retreival.ipynb CHANGED
@@ -247,7 +247,7 @@
247
  },
248
  {
249
  "cell_type": "code",
250
- "execution_count": 2,
251
  "metadata": {
252
  "colab": {
253
  "base_uri": "https://localhost:8080/"
@@ -457,7 +457,7 @@
457
  },
458
  {
459
  "cell_type": "code",
460
- "execution_count": 4,
461
  "metadata": {
462
  "colab": {
463
  "base_uri": "https://localhost:8080/",
@@ -603,16 +603,7 @@
603
  "id": "Emj46ATxtV9C",
604
  "outputId": "169be147-f31e-4153-cf91-9b1cea6667be"
605
  },
606
- "outputs": [
607
- {
608
- "name": "stderr",
609
- "output_type": "stream",
610
- "text": [
611
- "/Users/tejasw/.pyenv/versions/3.11.2/lib/python3.11/site-packages/tqdm/auto.py:22: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
612
- " from .autonotebook import tqdm as notebook_tqdm\n"
613
- ]
614
- }
615
- ],
616
  "source": [
617
  "\n",
618
  "from langchain.embeddings import HuggingFaceBgeEmbeddings\n",
@@ -640,7 +631,7 @@
640
  },
641
  {
642
  "cell_type": "code",
643
- "execution_count": 5,
644
  "metadata": {
645
  "colab": {
646
  "base_uri": "https://localhost:8080/"
@@ -648,19 +639,7 @@
648
  "id": "Q_eTIZwf4Dk2",
649
  "outputId": "57072a9e-bd8f-4296-b4e1-83c098f868c7"
650
  },
651
- "outputs": [
652
- {
653
- "ename": "NameError",
654
- "evalue": "name 'texts' is not defined",
655
- "output_type": "error",
656
- "traceback": [
657
- "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
658
- "\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)",
659
- "File \u001b[0;32m<timed exec>:9\u001b[0m\n",
660
- "\u001b[0;31mNameError\u001b[0m: name 'texts' is not defined"
661
- ]
662
- }
663
- ],
664
  "source": [
665
  "%%time\n",
666
  "# Embed and store the texts\n",
@@ -677,21 +656,9 @@
677
  },
678
  {
679
  "cell_type": "code",
680
- "execution_count": 1,
681
  "metadata": {},
682
- "outputs": [
683
- {
684
- "ename": "NameError",
685
- "evalue": "name 'FAISS' is not defined",
686
- "output_type": "error",
687
- "traceback": [
688
- "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
689
- "\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)",
690
- "\u001b[1;32m/Users/tejasw/Downloads/scraper-law/votum-gradio/langchain_retreival.ipynb Cell 24\u001b[0m line \u001b[0;36m3\n\u001b[1;32m <a href='vscode-notebook-cell:/Users/tejasw/Downloads/scraper-law/votum-gradio/langchain_retreival.ipynb#Y105sZmlsZQ%3D%3D?line=0'>1</a>\u001b[0m \u001b[39m# vectordb.save_local(\"faiss_index\")\u001b[39;00m\n\u001b[0;32m----> <a href='vscode-notebook-cell:/Users/tejasw/Downloads/scraper-law/votum-gradio/langchain_retreival.ipynb#Y105sZmlsZQ%3D%3D?line=2'>3</a>\u001b[0m vectordb \u001b[39m=\u001b[39m FAISS\u001b[39m.\u001b[39mload_local(\u001b[39m'\u001b[39m\u001b[39mfaiss_index\u001b[39m\u001b[39m'\u001b[39m,embeddings\u001b[39m=\u001b[39mmodel_norm)\n",
691
- "\u001b[0;31mNameError\u001b[0m: name 'FAISS' is not defined"
692
- ]
693
- }
694
- ],
695
  "source": [
696
  "# vectordb.save_local(\"faiss_index\")\n",
697
  "\n",
@@ -915,6 +882,8 @@
915
  " ),\n",
916
  "]\n",
917
  "\n",
 
 
918
  "llm = OpenAI(temperature=0)\n",
919
  "\n",
920
  "react = initialize_agent(tools, model, agent=AgentType.REACT_DOCSTORE, verbose=True)"
@@ -963,7 +932,10 @@
963
  "import os\n",
964
  "from langchain.chat_models import AzureChatOpenAI\n",
965
  "from langchain.schema import HumanMessage\n",
 
 
966
  "\n",
 
967
  "\n",
968
  "model = AzureChatOpenAI(\n",
969
  " openai_api_base=\"https://votum.openai.azure.com/\",\n",
@@ -980,8 +952,9 @@
980
  "\n",
981
  "tools = load_tools([\"serpapi\", \"llm-math\"], llm=model)\n",
982
  "\n",
 
983
  "agent_executor = initialize_agent(\n",
984
- " [*tools, read_cii], model, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True)\n",
985
  "\n",
986
  "\n",
987
  "agent_executor.invoke(\n",
@@ -994,7 +967,7 @@
994
  "metadata": {},
995
  "outputs": [],
996
  "source": [
997
- "agent_executor = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True)\n",
998
  "agent_executor.invoke({\"input\": \"What is the punishment for drinking and driving?\"})"
999
  ]
1000
  },
@@ -1171,15 +1144,27 @@
1171
  "from langchain.utilities import SerpAPIWrapper\n",
1172
  "from langchain.agents.tools import Tool\n",
1173
  "from langchain.chains import LLMMathChain\n",
 
1174
  "\n",
1175
- "planner = load_chat_planner(llm)\n",
1176
- "executor = load_agent_executor(llm, tools, verbose=True)\n",
 
 
 
 
1177
  "plan_agent = PlanAndExecute(planner=planner, executor=executor, verbose=True)\n",
1178
  "\n",
1179
  "\n",
1180
  "plan_agent.run(\n",
1181
- " 'I bought a house in 2001 for 20 lakh rupees , i sold it in 2022 for 50 lakhs , what will be my profit?')\n"
1182
  ]
 
 
 
 
 
 
 
1183
  }
1184
  ],
1185
  "metadata": {
 
247
  },
248
  {
249
  "cell_type": "code",
250
+ "execution_count": null,
251
  "metadata": {
252
  "colab": {
253
  "base_uri": "https://localhost:8080/"
 
457
  },
458
  {
459
  "cell_type": "code",
460
+ "execution_count": null,
461
  "metadata": {
462
  "colab": {
463
  "base_uri": "https://localhost:8080/",
 
603
  "id": "Emj46ATxtV9C",
604
  "outputId": "169be147-f31e-4153-cf91-9b1cea6667be"
605
  },
606
+ "outputs": [],
 
 
 
 
 
 
 
 
 
607
  "source": [
608
  "\n",
609
  "from langchain.embeddings import HuggingFaceBgeEmbeddings\n",
 
631
  },
632
  {
633
  "cell_type": "code",
634
+ "execution_count": null,
635
  "metadata": {
636
  "colab": {
637
  "base_uri": "https://localhost:8080/"
 
639
  "id": "Q_eTIZwf4Dk2",
640
  "outputId": "57072a9e-bd8f-4296-b4e1-83c098f868c7"
641
  },
642
+ "outputs": [],
 
 
 
 
 
 
 
 
 
 
 
 
643
  "source": [
644
  "%%time\n",
645
  "# Embed and store the texts\n",
 
656
  },
657
  {
658
  "cell_type": "code",
659
+ "execution_count": null,
660
  "metadata": {},
661
+ "outputs": [],
 
 
 
 
 
 
 
 
 
 
 
 
662
  "source": [
663
  "# vectordb.save_local(\"faiss_index\")\n",
664
  "\n",
 
882
  " ),\n",
883
  "]\n",
884
  "\n",
885
+ "import langchain\n",
886
+ "langchain.verbose= True\n",
887
  "llm = OpenAI(temperature=0)\n",
888
  "\n",
889
  "react = initialize_agent(tools, model, agent=AgentType.REACT_DOCSTORE, verbose=True)"
 
932
  "import os\n",
933
  "from langchain.chat_models import AzureChatOpenAI\n",
934
  "from langchain.schema import HumanMessage\n",
935
+ "import langchain\n",
936
+ "\n",
937
  "\n",
938
+ "langchain.verbose = True\n",
939
  "\n",
940
  "model = AzureChatOpenAI(\n",
941
  " openai_api_base=\"https://votum.openai.azure.com/\",\n",
 
952
  "\n",
953
  "tools = load_tools([\"serpapi\", \"llm-math\"], llm=model)\n",
954
  "\n",
955
+ "# ZERO_SHOT_REACT_DESCRIPTION ,CHAT_ZERO_SHOT_REACT_DESCRIPTION\n",
956
  "agent_executor = initialize_agent(\n",
957
+ " [*tools], model, agent=AgentType.REACT_DOCSTORE, verbose=True)\n",
958
  "\n",
959
  "\n",
960
  "agent_executor.invoke(\n",
 
967
  "metadata": {},
968
  "outputs": [],
969
  "source": [
970
+ "agent_executor = initialize_agent(tools, llm, agent=AgentType.REACT_DOCSTORE, verbose=True)\n",
971
  "agent_executor.invoke({\"input\": \"What is the punishment for drinking and driving?\"})"
972
  ]
973
  },
 
1144
  "from langchain.utilities import SerpAPIWrapper\n",
1145
  "from langchain.agents.tools import Tool\n",
1146
  "from langchain.chains import LLMMathChain\n",
1147
+ "from langchain.agents import load_tools\n",
1148
  "\n",
1149
+ "import os\n",
1150
+ "\n",
1151
+ "os.environ[\"SERPAPI_API_KEY\"] = '94de7df75e512ca1fe42b3f51a034a0f0e4683e0f880f9cf7dee1a0eb36a069e'\n",
1152
+ "tools = load_tools([\"serpapi\", \"llm-math\"], llm=model)\n",
1153
+ "planner = load_chat_planner(model)\n",
1154
+ "executor = load_agent_executor(model, tools, verbose=True)\n",
1155
  "plan_agent = PlanAndExecute(planner=planner, executor=executor, verbose=True)\n",
1156
  "\n",
1157
  "\n",
1158
  "plan_agent.run(\n",
1159
+ " 'Plan a 5 day trip for me to Paris, My budget is $2000. I would prefer 4 star hotel and minimal traveling. I live in Delhi, India,')\n"
1160
  ]
1161
+ },
1162
+ {
1163
+ "cell_type": "code",
1164
+ "execution_count": null,
1165
+ "metadata": {},
1166
+ "outputs": [],
1167
+ "source": []
1168
  }
1169
  ],
1170
  "metadata": {