Rag_chatbot / app.py
basitbhai's picture
Update app.py
d50d789 verified
import bs4
import os
from dotenv import load_dotenv
from langchain_community.document_loaders import WebBaseLoader
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain_cohere import CohereEmbeddings
from langchain_core.vectorstores import InMemoryVectorStore
from langchain_groq import ChatGroq
from langgraph.checkpoint.memory import MemorySaver
from langchain.tools.retriever import create_retriever_tool
from langchain_core.messages import HumanMessage,AIMessage
from langgraph.prebuilt import create_react_agent
import gradio as gr
load_dotenv()
memory=MemorySaver()
# 1. Load, chunk and index the contents of the blog to create a retriever.
loader = WebBaseLoader(
web_paths=("https://www.xevensolutions.com/",)
)
docs = loader.load()
# splitter to make chunks
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
splits = text_splitter.split_documents(docs)
# initializing embedding model
os.environ["COHERE_API_KEY"] =os.getenv("COHERE_API_KEY")
embeddings = CohereEmbeddings(
model="embed-english-v3.0",
)
vector_store = InMemoryVectorStore.from_documents(documents=splits, embedding=embeddings)
retriever = vector_store.as_retriever(search_type='mmr',search_kwargs={"fetch_k":3} )
#setting chatmodel
os.environ["GROQ_API_KEY"] = os.getenv("GROQ_API_KEY")
llm = ChatGroq(model="llama3-groq-8b-8192-tool-use-preview")
tool=create_retriever_tool(retriever,"retrivel_tool","Retrieves information from a knowledge base.",)
tools=[tool]
agent_executer=create_react_agent(llm,tools,checkpointer=memory)
threads = {}
def predict(message,history,request:gr.Request):
session_id=request.session_hash
if session_id in threads:
thread=threads[session_id]
else:
config={"configurable":{"thread_id":f"thread-{session_id}"}}
threads[session_id]=config
response_gen=agent_executer.stream({"messages":[HumanMessage(content=f"{message}")]},config=threads[session_id],stream_mode="values")
last_msg=None
for event in response_gen:
last_msg=event["messages"][-1]
return last_msg.content
gr.ChatInterface(predict,textbox=gr.Textbox(placeholder="Enter your message here", container=True, scale=7, submit_btn=True),
title="RAG Chatbot",
theme="soft",
examples=[{"text": "Hello, how are you?"}, {"text": "Am I cool?"}, {"text": "What is AI?"}],
cache_examples=False,
type="messages").launch()