Commit
·
3b2114f
1
Parent(s):
f613fa0
update ai agent
Browse files- .gitignore +1 -0
- ai_agent.py +137 -0
- app.py +5 -1
.gitignore
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
.env
|
ai_agent.py
ADDED
@@ -0,0 +1,137 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from dotenv import load_dotenv, find_dotenv
|
2 |
+
import os
|
3 |
+
import bs4
|
4 |
+
|
5 |
+
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
6 |
+
from langchain_community.document_loaders import WebBaseLoader
|
7 |
+
from langchain_huggingface import HuggingFaceEmbeddings
|
8 |
+
from langchain.prompts import ChatPromptTemplate
|
9 |
+
|
10 |
+
from langdetect import detect
|
11 |
+
from deep_translator import GoogleTranslator
|
12 |
+
|
13 |
+
from langchain_google_genai import ChatGoogleGenerativeAI
|
14 |
+
from langchain.agents import initialize_agent, AgentType
|
15 |
+
from langchain_openai import ChatOpenAI
|
16 |
+
|
17 |
+
from langchain_community.utilities import WikipediaAPIWrapper, ArxivAPIWrapper, DuckDuckGoSearchAPIWrapper
|
18 |
+
from langchain_community.tools import WikipediaQueryRun, ArxivQueryRun, DuckDuckGoSearchRun
|
19 |
+
|
20 |
+
|
21 |
+
load_dotenv(find_dotenv())
|
22 |
+
|
23 |
+
wiki = WikipediaQueryRun(api_wrapper=WikipediaAPIWrapper(top_k_results=1, doc_content_chars_max=200))
|
24 |
+
|
25 |
+
arxiv = ArxivQueryRun(api_wrapper=ArxivAPIWrapper(top_k_results=1, doc_content_chars_max=200))
|
26 |
+
|
27 |
+
duckduckgo_search = DuckDuckGoSearchRun(api_wrapper=DuckDuckGoSearchAPIWrapper(region="in-en", time="y", max_results=2))
|
28 |
+
|
29 |
+
tools = [wiki, arxiv, duckduckgo_search]
|
30 |
+
|
31 |
+
def translate_to_english(text):
|
32 |
+
try:
|
33 |
+
detected_lang = detect(text)
|
34 |
+
if detected_lang == "en":
|
35 |
+
return text, "en"
|
36 |
+
translated_text = GoogleTranslator(source=detected_lang, target="en").translate(text)
|
37 |
+
return translated_text, detected_lang
|
38 |
+
except Exception:
|
39 |
+
return text, "unknown"
|
40 |
+
|
41 |
+
def translate_back(text, target_lang):
|
42 |
+
try:
|
43 |
+
if target_lang == "en":
|
44 |
+
return text
|
45 |
+
return GoogleTranslator(source="en", target=target_lang).translate(text)
|
46 |
+
except Exception:
|
47 |
+
return text
|
48 |
+
|
49 |
+
def load_llm():
|
50 |
+
load_dotenv()
|
51 |
+
return ChatGoogleGenerativeAI(
|
52 |
+
model="gemini-1.5-flash",
|
53 |
+
temperature=0.7,
|
54 |
+
google_api_key=os.environ.get("GEMINI_API_KEY") # ✅ Truyền trực tiếp tại đây
|
55 |
+
)
|
56 |
+
|
57 |
+
# def load_llm():
|
58 |
+
# return ChatOpenAI(
|
59 |
+
# model_name="llama3-70b-8192",
|
60 |
+
# temperature=1,
|
61 |
+
# openai_api_key=os.getenv("GROQ_API_KEY"),
|
62 |
+
# openai_api_base="https://api.groq.com/openai/v1"
|
63 |
+
# )
|
64 |
+
|
65 |
+
def format_docs(docs):
|
66 |
+
return "\n\n".join(doc.page_content for doc in docs)
|
67 |
+
|
68 |
+
|
69 |
+
|
70 |
+
def get_conversational_agent():
|
71 |
+
|
72 |
+
llm = load_llm()
|
73 |
+
|
74 |
+
return initialize_agent(
|
75 |
+
tools=tools,
|
76 |
+
llm=llm,
|
77 |
+
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
|
78 |
+
verbose=False,
|
79 |
+
return_intermediate_steps=False,
|
80 |
+
max_iterations=5,
|
81 |
+
handle_parsing_errors=True
|
82 |
+
|
83 |
+
)
|
84 |
+
|
85 |
+
def ask_gemini_to_evaluate(question, answer):
|
86 |
+
try:
|
87 |
+
agent = get_conversational_agent()
|
88 |
+
|
89 |
+
prompt = f"""
|
90 |
+
Câu hỏi: {question}
|
91 |
+
Câu trả lời từ AI: {answer}
|
92 |
+
Hãy trả lời duy nhất một từ: "Hợp lý" nếu câu trả lời tốt, hoặc "Không hợp lý" nếu câu trả lời sai hoặc thiếu thông tin.
|
93 |
+
"""
|
94 |
+
|
95 |
+
response = agent.invoke(prompt)
|
96 |
+
|
97 |
+
if response["output"].strip() == "Hợp lý":
|
98 |
+
return True
|
99 |
+
return False
|
100 |
+
except:
|
101 |
+
return False
|
102 |
+
class AIAgent:
|
103 |
+
def __init__(self):
|
104 |
+
self.loader = WebBaseLoader(
|
105 |
+
web_paths=("https://lilianweng.github.io/posts/2023-06-23-agent/",),
|
106 |
+
bs_kwargs=dict(
|
107 |
+
parse_only=bs4.SoupStrainer(
|
108 |
+
class_=("post-content", "post-title", "post-header")
|
109 |
+
)
|
110 |
+
),
|
111 |
+
)
|
112 |
+
|
113 |
+
self.text_splitter = RecursiveCharacterTextSplitter.from_tiktoken_encoder(
|
114 |
+
chunk_size=300,
|
115 |
+
chunk_overlap=50
|
116 |
+
)
|
117 |
+
|
118 |
+
self.embedding = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
|
119 |
+
self.agent = get_conversational_agent()
|
120 |
+
|
121 |
+
self.prompt = ChatPromptTemplate.from_template(
|
122 |
+
"Answer the question based only on the following context:\n{context}\n\nQuestion: {question}"
|
123 |
+
)
|
124 |
+
|
125 |
+
def ai_agent(self, question, answer):
|
126 |
+
try:
|
127 |
+
if ask_gemini_to_evaluate(question, answer):
|
128 |
+
return answer
|
129 |
+
|
130 |
+
translated_question, original_lang = translate_to_english(question)
|
131 |
+
|
132 |
+
answer = self.agent.invoke(translated_question)
|
133 |
+
|
134 |
+
answer = translate_back(answer['output'], original_lang)
|
135 |
+
except Exception:
|
136 |
+
return "Bạn đã hết lượt hỏi trong ngày, vui lòng quay lại sau."
|
137 |
+
return answer
|
app.py
CHANGED
@@ -29,7 +29,11 @@ def prediction():
|
|
29 |
model = InferenceModel()
|
30 |
answer = model.predict(question=question, extract=ner_results)
|
31 |
|
32 |
-
|
|
|
|
|
|
|
|
|
33 |
|
34 |
if __name__ == '__main__':
|
35 |
app.run(host="0.0.0.0", port=7860)
|
|
|
29 |
model = InferenceModel()
|
30 |
answer = model.predict(question=question, extract=ner_results)
|
31 |
|
32 |
+
# Gọi AI Agent để đánh giá câu trả lời
|
33 |
+
ai_agent = AIAgent()
|
34 |
+
results = ai_agent.ai_agent(question, answer)
|
35 |
+
|
36 |
+
return jsonify({"answer": results}), 200
|
37 |
|
38 |
if __name__ == '__main__':
|
39 |
app.run(host="0.0.0.0", port=7860)
|