import gradio as gr import uuid from typing import Sequence from langchain.chains import create_history_aware_retriever, create_retrieval_chain from langchain.chains.combine_documents import create_stuff_documents_chain from langchain_community.document_loaders import TextLoader from langchain_core.messages import AIMessage, BaseMessage, HumanMessage from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder from langchain_core.vectorstores import InMemoryVectorStore from langchain_cohere import ChatCohere from langchain_cohere import CohereEmbeddings from langchain_text_splitters import RecursiveCharacterTextSplitter from langgraph.checkpoint.memory import MemorySaver from langgraph.graph import START, StateGraph from langgraph.graph.message import add_messages from typing_extensions import Annotated, TypedDict import os COHERE_API_KEY = os.getenv("COHERE_API_KEY") llm = ChatCohere(model="c4ai-aya-expanse-32b", cohere_api_key=COHERE_API_KEY, temperature=0) loader = TextLoader("stj.txt") loader.load() docs = loader.load() embeddings = CohereEmbeddings(model="embed-multilingual-v3.0",cohere_api_key=COHERE_API_KEY) text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200) splits = text_splitter.split_documents(docs) vectorstore = InMemoryVectorStore.from_documents( documents=splits, embedding=embeddings ) retriever = vectorstore.as_retriever() contextualize_q_system_prompt = ( "Sohbet geçmişi ve en son kullanıcı sorusu verilirse, sohbet geçmişine atıfta bulunabilecek en son kullanıcı sorusunu, sohbet geçmişi olmadan anlaşılabilecek bağımsız bir soru haline getirin. Soruyu yanıtlamayın, sadece yeniden düzenleyin ve gerekirse geri döndürün." ) contextualize_q_prompt = ChatPromptTemplate.from_messages( [ ("system", contextualize_q_system_prompt), MessagesPlaceholder("chat_history"), ("human", "{input}"), ] ) history_aware_retriever = create_history_aware_retriever( llm, retriever, contextualize_q_prompt ) system_prompt = ( "Soru-cevap görevleri için bir asistansın. Soruyu yanıtlamak için alınan aşağıdaki bağlam parçalarını kullan. Cevabı bilmiyorsan, bilmiyorum de. Cevabı üç cümleyle sınırla ve kısa tut." "\n\n" "{context}" ) qa_prompt = ChatPromptTemplate.from_messages( [ ("system", system_prompt), MessagesPlaceholder("chat_history"), ("human", "{input}"), ] ) question_answer_chain = create_stuff_documents_chain(llm, qa_prompt) rag_chain = create_retrieval_chain(history_aware_retriever, question_answer_chain) class State(TypedDict): input: str chat_history: Annotated[Sequence[BaseMessage], add_messages] context: str answer: str def call_model(state: State): response = rag_chain.invoke(state) return { "chat_history": [ HumanMessage(state["input"]), AIMessage(response["answer"]), ], "context": response["context"], "answer": response["answer"], } workflow = StateGraph(state_schema=State) workflow.add_edge(START, "model") workflow.add_node("model", call_model) memory = MemorySaver() app = workflow.compile(checkpointer=memory) def rag_response(user_input, chat_history, thread_id): config = {"configurable": {"thread_id": thread_id}} state = { "input": user_input, "chat_history": [msg[0] for msg in chat_history] } result = app.invoke(state, config=config) chat_history.append((user_input, result["answer"])) return "", chat_history def acknowledge_disclaimer(): thread_id = str(uuid.uuid4()) return gr.update(visible=False), gr.update(visible=False), thread_id with gr.Blocks() as demo: disclaimer_message = gr.Markdown( "**⚠️ Uyarı:** Büyük dil modelleri [halüsinasyon](https://en.wikipedia.org/wiki/Hallucination_(artificial_intelligence)) sebebi ile yanlış cevaplar verebilir. Lütfen aldığınız cevapları uygulamadan önce doğrulayınız.\nBu demo resmi olarak Işık Üniversitesini temsil **etmemektedir.**", visible=True ) ok_button = gr.Button("OK", visible=True) chatbox = gr.Chatbot(label="Sohbet Geçmişi", visible=False) user_input = gr.Textbox(placeholder="Soru", label="Kullanıcı Soru Alanı", visible=False) submit_button = gr.Button("Gönder", visible=False) thread_id_component = gr.State() ok_button.click(acknowledge_disclaimer, outputs=[disclaimer_message, ok_button, thread_id_component]) submit_button.click(rag_response, inputs=[user_input, chatbox, thread_id_component], outputs=[user_input, chatbox]) ok_button.click(lambda: [gr.update(visible=True)] * 3, outputs=[chatbox, user_input, submit_button]) demo.launch()