Merge branch 'dev' into feat/talk_to_data_graph
Browse files- app.py +51 -80
- climateqa/chat.py +11 -3
- climateqa/engine/chains/answer_rag.py +4 -2
- climateqa/engine/chains/follow_up.py +32 -0
- climateqa/engine/chains/retrieve_documents.py +2 -8
- climateqa/engine/chains/standalone_question.py +39 -0
- climateqa/engine/graph.py +36 -19
- front/tabs/__init__.py +4 -1
- front/tabs/chat_interface.py +15 -12
- front/tabs/main_tab.py +59 -27
- front/tabs/tab_config.py +19 -29
- style.css +31 -1
app.py
CHANGED
@@ -15,7 +15,8 @@ from climateqa.chat import start_chat, chat_stream, finish_chat
|
|
15 |
from climateqa.engine.talk_to_data.main import ask_drias, DRIAS_MODELS
|
16 |
from climateqa.engine.talk_to_data.myVanna import MyVanna
|
17 |
|
18 |
-
from front.tabs import (create_config_modal,
|
|
|
19 |
from front.utils import process_figures
|
20 |
from gradio_modal import Modal
|
21 |
|
@@ -239,39 +240,12 @@ def cqa_tab(tab_name):
|
|
239 |
)
|
240 |
|
241 |
|
242 |
-
|
243 |
-
|
244 |
-
|
245 |
-
|
246 |
-
"sources_raw": sources_raw,
|
247 |
-
"new_figures": new_figures,
|
248 |
-
"current_graphs": current_graphs,
|
249 |
-
"examples_hidden": examples_hidden,
|
250 |
-
"sources_textbox": sources_textbox,
|
251 |
-
"figures_cards": figures_cards,
|
252 |
-
"gallery_component": gallery_component,
|
253 |
-
"config_button": config_button,
|
254 |
-
"papers_direct_search" : papers_direct_search,
|
255 |
-
"papers_html": papers_html,
|
256 |
-
"citations_network": citations_network,
|
257 |
-
"papers_summary": papers_summary,
|
258 |
-
"tab_recommended_content": tab_recommended_content,
|
259 |
-
"tab_sources": tab_sources,
|
260 |
-
"tab_figures": tab_figures,
|
261 |
-
"tab_graphs": tab_graphs,
|
262 |
-
"tab_papers": tab_papers,
|
263 |
-
"graph_container": graphs_container,
|
264 |
-
# "vanna_sql_query": vanna_sql_query,
|
265 |
-
# "vanna_table" : vanna_table,
|
266 |
-
# "vanna_display": vanna_display
|
267 |
-
}
|
268 |
-
|
269 |
-
def config_event_handling(main_tabs_components : list[dict], config_componenets : dict):
|
270 |
-
config_open = config_componenets["config_open"]
|
271 |
-
config_modal = config_componenets["config_modal"]
|
272 |
-
close_config_modal = config_componenets["close_config_modal_button"]
|
273 |
|
274 |
-
for button in [close_config_modal] + [main_tab_component
|
275 |
button.click(
|
276 |
fn=update_config_modal_visibility,
|
277 |
inputs=[config_open],
|
@@ -279,58 +253,45 @@ def config_event_handling(main_tabs_components : list[dict], config_componenets
|
|
279 |
)
|
280 |
|
281 |
def event_handling(
|
282 |
-
main_tab_components,
|
283 |
-
config_components,
|
284 |
tab_name="ClimateQ&A"
|
285 |
):
|
286 |
-
chatbot = main_tab_components
|
287 |
-
textbox = main_tab_components
|
288 |
-
tabs = main_tab_components
|
289 |
-
sources_raw = main_tab_components
|
290 |
-
new_figures = main_tab_components
|
291 |
-
current_graphs = main_tab_components
|
292 |
-
examples_hidden = main_tab_components
|
293 |
-
sources_textbox = main_tab_components
|
294 |
-
figures_cards = main_tab_components
|
295 |
-
gallery_component = main_tab_components
|
296 |
-
|
297 |
-
|
298 |
-
|
299 |
-
|
300 |
-
|
301 |
-
|
302 |
-
|
303 |
-
|
304 |
-
|
305 |
-
|
306 |
-
|
307 |
-
|
308 |
-
# vanna_table = main_tab_components["vanna_table"]
|
309 |
-
# vanna_display = main_tab_components["vanna_display"]
|
310 |
-
|
311 |
|
312 |
-
|
313 |
-
|
314 |
-
|
315 |
-
|
316 |
-
|
317 |
-
|
318 |
-
|
319 |
-
|
320 |
-
output_query = config_components["output_query"]
|
321 |
-
output_language = config_components["output_language"]
|
322 |
-
# close_config_modal = config_components["close_config_modal_button"]
|
323 |
|
324 |
new_sources_hmtl = gr.State([])
|
325 |
ttd_data = gr.State([])
|
326 |
|
327 |
-
|
328 |
-
# for button in [config_button, close_config_modal]:
|
329 |
-
# button.click(
|
330 |
-
# fn=update_config_modal_visibility,
|
331 |
-
# inputs=[config_open],
|
332 |
-
# outputs=[config_modal, config_open]
|
333 |
-
# )
|
334 |
|
335 |
if tab_name == "ClimateQ&A":
|
336 |
print("chat cqa - message sent")
|
@@ -338,15 +299,20 @@ def event_handling(
|
|
338 |
# Event for textbox
|
339 |
(textbox
|
340 |
.submit(start_chat, [textbox, chatbot, search_only], [textbox, tabs, chatbot, sources_raw], queue=False, api_name=f"start_chat_{textbox.elem_id}")
|
341 |
-
.then(chat, [textbox, chatbot, dropdown_audience, dropdown_sources, dropdown_reports, dropdown_external_sources, search_only], [chatbot, new_sources_hmtl, output_query, output_language, new_figures, current_graphs], concurrency_limit=8, api_name=f"chat_{textbox.elem_id}")
|
342 |
.then(finish_chat, None, [textbox], api_name=f"finish_chat_{textbox.elem_id}")
|
343 |
)
|
344 |
# Event for examples_hidden
|
345 |
(examples_hidden
|
346 |
.change(start_chat, [examples_hidden, chatbot, search_only], [examples_hidden, tabs, chatbot, sources_raw], queue=False, api_name=f"start_chat_{examples_hidden.elem_id}")
|
347 |
-
.then(chat, [examples_hidden, chatbot, dropdown_audience, dropdown_sources, dropdown_reports, dropdown_external_sources, search_only], [chatbot, new_sources_hmtl, output_query, output_language, new_figures, current_graphs], concurrency_limit=8, api_name=f"chat_{examples_hidden.elem_id}")
|
348 |
.then(finish_chat, None, [textbox], api_name=f"finish_chat_{examples_hidden.elem_id}")
|
349 |
)
|
|
|
|
|
|
|
|
|
|
|
350 |
|
351 |
elif tab_name == "Beta - POC Adapt'Action":
|
352 |
print("chat poc - message sent")
|
@@ -362,6 +328,11 @@ def event_handling(
|
|
362 |
.then(chat_poc, [examples_hidden, chatbot, dropdown_audience, dropdown_sources, dropdown_reports, dropdown_external_sources, search_only], [chatbot, new_sources_hmtl, output_query, output_language, new_figures, current_graphs], concurrency_limit=8, api_name=f"chat_{examples_hidden.elem_id}")
|
363 |
.then(finish_chat, None, [textbox], api_name=f"finish_chat_{examples_hidden.elem_id}")
|
364 |
)
|
|
|
|
|
|
|
|
|
|
|
365 |
|
366 |
|
367 |
new_sources_hmtl.change(lambda x : x, inputs = [new_sources_hmtl], outputs = [sources_textbox])
|
|
|
15 |
from climateqa.engine.talk_to_data.main import ask_drias, DRIAS_MODELS
|
16 |
from climateqa.engine.talk_to_data.myVanna import MyVanna
|
17 |
|
18 |
+
from front.tabs import (create_config_modal, cqa_tab, create_about_tab)
|
19 |
+
from front.tabs import (MainTabPanel, ConfigPanel)
|
20 |
from front.utils import process_figures
|
21 |
from gradio_modal import Modal
|
22 |
|
|
|
240 |
)
|
241 |
|
242 |
|
243 |
+
def config_event_handling(main_tabs_components : list[MainTabPanel], config_componenets : ConfigPanel):
|
244 |
+
config_open = config_componenets.config_open
|
245 |
+
config_modal = config_componenets.config_modal
|
246 |
+
close_config_modal = config_componenets.close_config_modal_button
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
247 |
|
248 |
+
for button in [close_config_modal] + [main_tab_component.config_button for main_tab_component in main_tabs_components]:
|
249 |
button.click(
|
250 |
fn=update_config_modal_visibility,
|
251 |
inputs=[config_open],
|
|
|
253 |
)
|
254 |
|
255 |
def event_handling(
|
256 |
+
main_tab_components : MainTabPanel,
|
257 |
+
config_components : ConfigPanel,
|
258 |
tab_name="ClimateQ&A"
|
259 |
):
|
260 |
+
chatbot = main_tab_components.chatbot
|
261 |
+
textbox = main_tab_components.textbox
|
262 |
+
tabs = main_tab_components.tabs
|
263 |
+
sources_raw = main_tab_components.sources_raw
|
264 |
+
new_figures = main_tab_components.new_figures
|
265 |
+
current_graphs = main_tab_components.current_graphs
|
266 |
+
examples_hidden = main_tab_components.examples_hidden
|
267 |
+
sources_textbox = main_tab_components.sources_textbox
|
268 |
+
figures_cards = main_tab_components.figures_cards
|
269 |
+
gallery_component = main_tab_components.gallery_component
|
270 |
+
papers_direct_search = main_tab_components.papers_direct_search
|
271 |
+
papers_html = main_tab_components.papers_html
|
272 |
+
citations_network = main_tab_components.citations_network
|
273 |
+
papers_summary = main_tab_components.papers_summary
|
274 |
+
tab_recommended_content = main_tab_components.tab_recommended_content
|
275 |
+
tab_sources = main_tab_components.tab_sources
|
276 |
+
tab_figures = main_tab_components.tab_figures
|
277 |
+
tab_graphs = main_tab_components.tab_graphs
|
278 |
+
tab_papers = main_tab_components.tab_papers
|
279 |
+
graphs_container = main_tab_components.graph_container
|
280 |
+
follow_up_examples = main_tab_components.follow_up_examples
|
281 |
+
follow_up_examples_hidden = main_tab_components.follow_up_examples_hidden
|
|
|
|
|
|
|
282 |
|
283 |
+
dropdown_sources = config_components.dropdown_sources
|
284 |
+
dropdown_reports = config_components.dropdown_reports
|
285 |
+
dropdown_external_sources = config_components.dropdown_external_sources
|
286 |
+
search_only = config_components.search_only
|
287 |
+
dropdown_audience = config_components.dropdown_audience
|
288 |
+
after = config_components.after
|
289 |
+
output_query = config_components.output_query
|
290 |
+
output_language = config_components.output_language
|
|
|
|
|
|
|
291 |
|
292 |
new_sources_hmtl = gr.State([])
|
293 |
ttd_data = gr.State([])
|
294 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
295 |
|
296 |
if tab_name == "ClimateQ&A":
|
297 |
print("chat cqa - message sent")
|
|
|
299 |
# Event for textbox
|
300 |
(textbox
|
301 |
.submit(start_chat, [textbox, chatbot, search_only], [textbox, tabs, chatbot, sources_raw], queue=False, api_name=f"start_chat_{textbox.elem_id}")
|
302 |
+
.then(chat, [textbox, chatbot, dropdown_audience, dropdown_sources, dropdown_reports, dropdown_external_sources, search_only], [chatbot, new_sources_hmtl, output_query, output_language, new_figures, current_graphs, follow_up_examples.dataset], concurrency_limit=8, api_name=f"chat_{textbox.elem_id}")
|
303 |
.then(finish_chat, None, [textbox], api_name=f"finish_chat_{textbox.elem_id}")
|
304 |
)
|
305 |
# Event for examples_hidden
|
306 |
(examples_hidden
|
307 |
.change(start_chat, [examples_hidden, chatbot, search_only], [examples_hidden, tabs, chatbot, sources_raw], queue=False, api_name=f"start_chat_{examples_hidden.elem_id}")
|
308 |
+
.then(chat, [examples_hidden, chatbot, dropdown_audience, dropdown_sources, dropdown_reports, dropdown_external_sources, search_only], [chatbot, new_sources_hmtl, output_query, output_language, new_figures, current_graphs,follow_up_examples.dataset], concurrency_limit=8, api_name=f"chat_{examples_hidden.elem_id}")
|
309 |
.then(finish_chat, None, [textbox], api_name=f"finish_chat_{examples_hidden.elem_id}")
|
310 |
)
|
311 |
+
(follow_up_examples_hidden
|
312 |
+
.change(start_chat, [follow_up_examples_hidden, chatbot, search_only], [follow_up_examples_hidden, tabs, chatbot, sources_raw], queue=False, api_name=f"start_chat_{examples_hidden.elem_id}")
|
313 |
+
.then(chat, [follow_up_examples_hidden, chatbot, dropdown_audience, dropdown_sources, dropdown_reports, dropdown_external_sources, search_only], [chatbot, new_sources_hmtl, output_query, output_language, new_figures, current_graphs,follow_up_examples.dataset], concurrency_limit=8, api_name=f"chat_{examples_hidden.elem_id}")
|
314 |
+
.then(finish_chat, None, [textbox], api_name=f"finish_chat_{follow_up_examples_hidden.elem_id}")
|
315 |
+
)
|
316 |
|
317 |
elif tab_name == "Beta - POC Adapt'Action":
|
318 |
print("chat poc - message sent")
|
|
|
328 |
.then(chat_poc, [examples_hidden, chatbot, dropdown_audience, dropdown_sources, dropdown_reports, dropdown_external_sources, search_only], [chatbot, new_sources_hmtl, output_query, output_language, new_figures, current_graphs], concurrency_limit=8, api_name=f"chat_{examples_hidden.elem_id}")
|
329 |
.then(finish_chat, None, [textbox], api_name=f"finish_chat_{examples_hidden.elem_id}")
|
330 |
)
|
331 |
+
(follow_up_examples_hidden
|
332 |
+
.change(start_chat, [follow_up_examples_hidden, chatbot, search_only], [follow_up_examples_hidden, tabs, chatbot, sources_raw], queue=False, api_name=f"start_chat_{examples_hidden.elem_id}")
|
333 |
+
.then(chat, [follow_up_examples_hidden, chatbot, dropdown_audience, dropdown_sources, dropdown_reports, dropdown_external_sources, search_only], [chatbot, new_sources_hmtl, output_query, output_language, new_figures, current_graphs,follow_up_examples.dataset], concurrency_limit=8, api_name=f"chat_{examples_hidden.elem_id}")
|
334 |
+
.then(finish_chat, None, [textbox], api_name=f"finish_chat_{follow_up_examples_hidden.elem_id}")
|
335 |
+
)
|
336 |
|
337 |
|
338 |
new_sources_hmtl.change(lambda x : x, inputs = [new_sources_hmtl], outputs = [sources_textbox])
|
climateqa/chat.py
CHANGED
@@ -101,6 +101,7 @@ async def chat_stream(
|
|
101 |
audience_prompt = init_audience(audience)
|
102 |
sources = sources or ["IPCC", "IPBES"]
|
103 |
reports = reports or []
|
|
|
104 |
|
105 |
# Prepare inputs for agent
|
106 |
inputs = {
|
@@ -109,7 +110,8 @@ async def chat_stream(
|
|
109 |
"sources_input": sources,
|
110 |
"relevant_content_sources_selection": relevant_content_sources_selection,
|
111 |
"search_only": search_only,
|
112 |
-
"reports": reports
|
|
|
113 |
}
|
114 |
|
115 |
# Get streaming events from agent
|
@@ -129,6 +131,7 @@ async def chat_stream(
|
|
129 |
retrieved_contents = []
|
130 |
answer_message_content = ""
|
131 |
vanna_data = {}
|
|
|
132 |
|
133 |
# Define processing steps
|
134 |
steps_display = {
|
@@ -200,7 +203,12 @@ async def chat_stream(
|
|
200 |
sub_questions = [q["question"] + "-> relevant sources : " + str(q["sources"]) for q in event["data"]["output"]["questions_list"]]
|
201 |
history[-1].content += "Decompose question into sub-questions:\n\n - " + "\n - ".join(sub_questions)
|
202 |
|
203 |
-
|
|
|
|
|
|
|
|
|
|
|
204 |
|
205 |
except Exception as e:
|
206 |
print(f"Event {event} has failed")
|
@@ -211,4 +219,4 @@ async def chat_stream(
|
|
211 |
# Call the function to log interaction
|
212 |
log_interaction_to_azure(history, output_query, sources, docs, share_client, user_id)
|
213 |
|
214 |
-
yield history, docs_html, output_query, output_language, related_contents, graphs_html#, vanna_data
|
|
|
101 |
audience_prompt = init_audience(audience)
|
102 |
sources = sources or ["IPCC", "IPBES"]
|
103 |
reports = reports or []
|
104 |
+
relevant_history_discussion = history[-2:] if len(history) > 1 else []
|
105 |
|
106 |
# Prepare inputs for agent
|
107 |
inputs = {
|
|
|
110 |
"sources_input": sources,
|
111 |
"relevant_content_sources_selection": relevant_content_sources_selection,
|
112 |
"search_only": search_only,
|
113 |
+
"reports": reports,
|
114 |
+
"chat_history": relevant_history_discussion,
|
115 |
}
|
116 |
|
117 |
# Get streaming events from agent
|
|
|
131 |
retrieved_contents = []
|
132 |
answer_message_content = ""
|
133 |
vanna_data = {}
|
134 |
+
follow_up_examples = gr.Dataset(samples=[])
|
135 |
|
136 |
# Define processing steps
|
137 |
steps_display = {
|
|
|
203 |
sub_questions = [q["question"] + "-> relevant sources : " + str(q["sources"]) for q in event["data"]["output"]["questions_list"]]
|
204 |
history[-1].content += "Decompose question into sub-questions:\n\n - " + "\n - ".join(sub_questions)
|
205 |
|
206 |
+
# Handle follow up questions
|
207 |
+
if event["name"] == "generate_follow_up" and event["event"] == "on_chain_end":
|
208 |
+
follow_up_examples = event["data"]["output"].get("follow_up_questions", [])
|
209 |
+
follow_up_examples = gr.Dataset(samples= [ [question] for question in follow_up_examples ])
|
210 |
+
|
211 |
+
yield history, docs_html, output_query, output_language, related_contents, graphs_html, follow_up_examples#, vanna_data
|
212 |
|
213 |
except Exception as e:
|
214 |
print(f"Event {event} has failed")
|
|
|
219 |
# Call the function to log interaction
|
220 |
log_interaction_to_azure(history, output_query, sources, docs, share_client, user_id)
|
221 |
|
222 |
+
yield history, docs_html, output_query, output_language, related_contents, graphs_html, follow_up_examples#, vanna_data
|
climateqa/engine/chains/answer_rag.py
CHANGED
@@ -65,6 +65,7 @@ def make_rag_node(llm,with_docs = True):
|
|
65 |
async def answer_rag(state,config):
|
66 |
print("---- Answer RAG ----")
|
67 |
start_time = time.time()
|
|
|
68 |
print("Sources used : " + "\n".join([x.metadata["short_name"] + " - page " + str(x.metadata["page_number"]) for x in state["documents"]]))
|
69 |
|
70 |
answer = await rag_chain.ainvoke(state,config)
|
@@ -73,9 +74,10 @@ def make_rag_node(llm,with_docs = True):
|
|
73 |
elapsed_time = end_time - start_time
|
74 |
print("RAG elapsed time: ", elapsed_time)
|
75 |
print("Answer size : ", len(answer))
|
76 |
-
# print(f"\n\nAnswer:\n{answer}")
|
77 |
|
78 |
-
|
|
|
|
|
79 |
|
80 |
return answer_rag
|
81 |
|
|
|
65 |
async def answer_rag(state,config):
|
66 |
print("---- Answer RAG ----")
|
67 |
start_time = time.time()
|
68 |
+
chat_history = state.get("chat_history",[])
|
69 |
print("Sources used : " + "\n".join([x.metadata["short_name"] + " - page " + str(x.metadata["page_number"]) for x in state["documents"]]))
|
70 |
|
71 |
answer = await rag_chain.ainvoke(state,config)
|
|
|
74 |
elapsed_time = end_time - start_time
|
75 |
print("RAG elapsed time: ", elapsed_time)
|
76 |
print("Answer size : ", len(answer))
|
|
|
77 |
|
78 |
+
chat_history.append({"question":state["query"],"answer":answer})
|
79 |
+
|
80 |
+
return {"answer":answer,"chat_history": chat_history}
|
81 |
|
82 |
return answer_rag
|
83 |
|
climateqa/engine/chains/follow_up.py
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import List
|
2 |
+
from langchain.prompts import ChatPromptTemplate
|
3 |
+
|
4 |
+
|
5 |
+
FOLLOW_UP_TEMPLATE = """Based on the previous question and answer, generate 2-3 relevant follow-up questions that would help explore the topic further.
|
6 |
+
|
7 |
+
Previous Question: {user_input}
|
8 |
+
Previous Answer: {answer}
|
9 |
+
|
10 |
+
Generate short, concise, focused follow-up questions
|
11 |
+
You don't need a full question as it will be reformulated later as a standalone question with the context. Eg. "Details the first point"
|
12 |
+
"""
|
13 |
+
|
14 |
+
def make_follow_up_node(llm):
|
15 |
+
prompt = ChatPromptTemplate.from_template(FOLLOW_UP_TEMPLATE)
|
16 |
+
|
17 |
+
def generate_follow_up(state):
|
18 |
+
if not state.get("answer"):
|
19 |
+
return state
|
20 |
+
|
21 |
+
response = llm.invoke(prompt.format(
|
22 |
+
user_input=state["user_input"],
|
23 |
+
answer=state["answer"]
|
24 |
+
))
|
25 |
+
|
26 |
+
# Extract questions from response
|
27 |
+
follow_ups = [q.strip() for q in response.content.split("\n") if q.strip()]
|
28 |
+
state["follow_up_questions"] = follow_ups
|
29 |
+
|
30 |
+
return state
|
31 |
+
|
32 |
+
return generate_follow_up
|
climateqa/engine/chains/retrieve_documents.py
CHANGED
@@ -621,10 +621,7 @@ def make_IPx_retriever_node(vectorstore,reranker,llm,rerank_by_question=True, k_
|
|
621 |
|
622 |
def make_POC_retriever_node(vectorstore,reranker,llm,rerank_by_question=True, k_final=15, k_before_reranking=100, k_summary=5):
|
623 |
|
624 |
-
async def retrieve_POC_docs_node(state, config):
|
625 |
-
if "POC region" not in state["relevant_content_sources_selection"] :
|
626 |
-
return {}
|
627 |
-
|
628 |
source_type = "POC"
|
629 |
POC_questions_index = [i for i, x in enumerate(state["questions_list"]) if x["source_type"] == "POC"]
|
630 |
|
@@ -665,10 +662,7 @@ def make_POC_by_ToC_retriever_node(
|
|
665 |
k_summary=5,
|
666 |
):
|
667 |
|
668 |
-
async def retrieve_POC_docs_node(state, config):
|
669 |
-
if "POC region" not in state["relevant_content_sources_selection"] :
|
670 |
-
return {}
|
671 |
-
|
672 |
search_figures = "Figures (IPCC/IPBES)" in state["relevant_content_sources_selection"]
|
673 |
search_only = state["search_only"]
|
674 |
search_only = state["search_only"]
|
|
|
621 |
|
622 |
def make_POC_retriever_node(vectorstore,reranker,llm,rerank_by_question=True, k_final=15, k_before_reranking=100, k_summary=5):
|
623 |
|
624 |
+
async def retrieve_POC_docs_node(state, config):
|
|
|
|
|
|
|
625 |
source_type = "POC"
|
626 |
POC_questions_index = [i for i, x in enumerate(state["questions_list"]) if x["source_type"] == "POC"]
|
627 |
|
|
|
662 |
k_summary=5,
|
663 |
):
|
664 |
|
665 |
+
async def retrieve_POC_docs_node(state, config):
|
|
|
|
|
|
|
666 |
search_figures = "Figures (IPCC/IPBES)" in state["relevant_content_sources_selection"]
|
667 |
search_only = state["search_only"]
|
668 |
search_only = state["search_only"]
|
climateqa/engine/chains/standalone_question.py
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain.prompts import ChatPromptTemplate
|
2 |
+
|
3 |
+
def make_standalone_question_chain(llm):
|
4 |
+
prompt = ChatPromptTemplate.from_messages([
|
5 |
+
("system", """You are a helpful assistant that transforms user questions into standalone questions
|
6 |
+
by incorporating context from the chat history if needed. The output should be a self-contained
|
7 |
+
question that can be understood without any additional context.
|
8 |
+
|
9 |
+
Examples:
|
10 |
+
Chat History: "Let's talk about renewable energy"
|
11 |
+
User Input: "What about solar?"
|
12 |
+
Output: "What are the key aspects of solar energy as a renewable energy source?"
|
13 |
+
|
14 |
+
Chat History: "What causes global warming?"
|
15 |
+
User Input: "And what are its effects?"
|
16 |
+
Output: "What are the effects of global warming on the environment and society?"
|
17 |
+
"""),
|
18 |
+
("user", """Chat History: {chat_history}
|
19 |
+
User Question: {question}
|
20 |
+
|
21 |
+
Transform this into a standalone question:""")
|
22 |
+
])
|
23 |
+
|
24 |
+
chain = prompt | llm
|
25 |
+
return chain
|
26 |
+
|
27 |
+
def make_standalone_question_node(llm):
|
28 |
+
standalone_chain = make_standalone_question_chain(llm)
|
29 |
+
|
30 |
+
def transform_to_standalone(state):
|
31 |
+
chat_history = state.get("chat_history", "")
|
32 |
+
output = standalone_chain.invoke({
|
33 |
+
"chat_history": chat_history,
|
34 |
+
"question": state["user_input"]
|
35 |
+
})
|
36 |
+
state["user_input"] = output.content
|
37 |
+
return state
|
38 |
+
|
39 |
+
return transform_to_standalone
|
climateqa/engine/graph.py
CHANGED
@@ -23,13 +23,15 @@ from .chains.retrieve_documents import make_IPx_retriever_node, make_POC_retriev
|
|
23 |
from .chains.answer_rag import make_rag_node
|
24 |
from .chains.graph_retriever import make_graph_retriever_node
|
25 |
from .chains.chitchat_categorization import make_chitchat_intent_categorization_node
|
26 |
-
|
|
|
27 |
|
28 |
class GraphState(TypedDict):
|
29 |
"""
|
30 |
Represents the state of our graph.
|
31 |
"""
|
32 |
user_input : str
|
|
|
33 |
language : str
|
34 |
intent : str
|
35 |
search_graphs_chitchat : bool
|
@@ -49,6 +51,7 @@ class GraphState(TypedDict):
|
|
49 |
recommended_content : List[Document] # OWID Graphs # TODO merge with related_contents
|
50 |
search_only : bool = False
|
51 |
reports : List[str] = []
|
|
|
52 |
|
53 |
def dummy(state):
|
54 |
return
|
@@ -100,15 +103,6 @@ def route_continue_retrieve_documents(state):
|
|
100 |
else:
|
101 |
return "retrieve_documents"
|
102 |
|
103 |
-
def route_continue_retrieve_local_documents(state):
|
104 |
-
index_question_poc = [i for i, x in enumerate(state["questions_list"]) if x["source_type"] == "POC"]
|
105 |
-
questions_poc_finished = all(elem in state["handled_questions_index"] for elem in index_question_poc)
|
106 |
-
# if questions_poc_finished and state["search_only"]:
|
107 |
-
# return END
|
108 |
-
if questions_poc_finished or ("POC region" not in state["relevant_content_sources_selection"]):
|
109 |
-
return "end_retrieve_local_documents"
|
110 |
-
else:
|
111 |
-
return "retrieve_local_data"
|
112 |
|
113 |
def route_retrieve_documents(state):
|
114 |
sources_to_retrieve = []
|
@@ -120,6 +114,11 @@ def route_retrieve_documents(state):
|
|
120 |
return END
|
121 |
return sources_to_retrieve
|
122 |
|
|
|
|
|
|
|
|
|
|
|
123 |
def make_id_dict(values):
|
124 |
return {k:k for k in values}
|
125 |
|
@@ -128,6 +127,7 @@ def make_graph_agent(llm, vectorstore_ipcc, vectorstore_graphs, vectorstore_regi
|
|
128 |
workflow = StateGraph(GraphState)
|
129 |
|
130 |
# Define the node functions
|
|
|
131 |
categorize_intent = make_intent_categorization_node(llm)
|
132 |
transform_query = make_query_transform_node(llm)
|
133 |
translate_query = make_translation_node(llm)
|
@@ -139,9 +139,11 @@ def make_graph_agent(llm, vectorstore_ipcc, vectorstore_graphs, vectorstore_regi
|
|
139 |
answer_rag = make_rag_node(llm, with_docs=True)
|
140 |
answer_rag_no_docs = make_rag_node(llm, with_docs=False)
|
141 |
chitchat_categorize_intent = make_chitchat_intent_categorization_node(llm)
|
|
|
142 |
|
143 |
# Define the nodes
|
144 |
# workflow.add_node("set_defaults", set_defaults)
|
|
|
145 |
workflow.add_node("categorize_intent", categorize_intent)
|
146 |
workflow.add_node("answer_climate", dummy)
|
147 |
workflow.add_node("answer_search", answer_search)
|
@@ -155,9 +157,11 @@ def make_graph_agent(llm, vectorstore_ipcc, vectorstore_graphs, vectorstore_regi
|
|
155 |
workflow.add_node("retrieve_documents", retrieve_documents)
|
156 |
workflow.add_node("answer_rag", answer_rag)
|
157 |
workflow.add_node("answer_rag_no_docs", answer_rag_no_docs)
|
|
|
|
|
158 |
|
159 |
# Entry point
|
160 |
-
workflow.set_entry_point("
|
161 |
|
162 |
# CONDITIONAL EDGES
|
163 |
workflow.add_conditional_edges(
|
@@ -189,20 +193,29 @@ def make_graph_agent(llm, vectorstore_ipcc, vectorstore_graphs, vectorstore_regi
|
|
189 |
make_id_dict(["retrieve_graphs", END])
|
190 |
)
|
191 |
|
|
|
|
|
|
|
|
|
|
|
|
|
192 |
# Define the edges
|
|
|
193 |
workflow.add_edge("translate_query", "transform_query")
|
194 |
workflow.add_edge("transform_query", "retrieve_documents") #TODO put back
|
195 |
# workflow.add_edge("transform_query", "retrieve_local_data")
|
196 |
# workflow.add_edge("transform_query", END) # TODO remove
|
197 |
|
198 |
workflow.add_edge("retrieve_graphs", END)
|
199 |
-
workflow.add_edge("answer_rag",
|
200 |
-
workflow.add_edge("answer_rag_no_docs",
|
201 |
workflow.add_edge("answer_chitchat", "chitchat_categorize_intent")
|
202 |
workflow.add_edge("retrieve_graphs_chitchat", END)
|
203 |
|
204 |
# workflow.add_edge("retrieve_local_data", "answer_search")
|
205 |
workflow.add_edge("retrieve_documents", "answer_search")
|
|
|
|
|
206 |
|
207 |
# Compile
|
208 |
app = workflow.compile()
|
@@ -228,6 +241,8 @@ def make_graph_agent_poc(llm, vectorstore_ipcc, vectorstore_graphs, vectorstore_
|
|
228 |
workflow = StateGraph(GraphState)
|
229 |
|
230 |
# Define the node functions
|
|
|
|
|
231 |
categorize_intent = make_intent_categorization_node(llm)
|
232 |
transform_query = make_query_transform_node(llm)
|
233 |
translate_query = make_translation_node(llm)
|
@@ -240,9 +255,11 @@ def make_graph_agent_poc(llm, vectorstore_ipcc, vectorstore_graphs, vectorstore_
|
|
240 |
answer_rag = make_rag_node(llm, with_docs=True)
|
241 |
answer_rag_no_docs = make_rag_node(llm, with_docs=False)
|
242 |
chitchat_categorize_intent = make_chitchat_intent_categorization_node(llm)
|
|
|
243 |
|
244 |
# Define the nodes
|
245 |
# workflow.add_node("set_defaults", set_defaults)
|
|
|
246 |
workflow.add_node("categorize_intent", categorize_intent)
|
247 |
workflow.add_node("answer_climate", dummy)
|
248 |
workflow.add_node("answer_search", answer_search)
|
@@ -258,9 +275,10 @@ def make_graph_agent_poc(llm, vectorstore_ipcc, vectorstore_graphs, vectorstore_
|
|
258 |
workflow.add_node("retrieve_documents", retrieve_documents)
|
259 |
workflow.add_node("answer_rag", answer_rag)
|
260 |
workflow.add_node("answer_rag_no_docs", answer_rag_no_docs)
|
|
|
261 |
|
262 |
# Entry point
|
263 |
-
workflow.set_entry_point("
|
264 |
|
265 |
# CONDITIONAL EDGES
|
266 |
workflow.add_conditional_edges(
|
@@ -293,22 +311,21 @@ def make_graph_agent_poc(llm, vectorstore_ipcc, vectorstore_graphs, vectorstore_
|
|
293 |
)
|
294 |
|
295 |
# Define the edges
|
|
|
296 |
workflow.add_edge("translate_query", "transform_query")
|
297 |
workflow.add_edge("transform_query", "retrieve_documents") #TODO put back
|
298 |
workflow.add_edge("transform_query", "retrieve_local_data")
|
299 |
# workflow.add_edge("transform_query", END) # TODO remove
|
300 |
|
301 |
workflow.add_edge("retrieve_graphs", END)
|
302 |
-
workflow.add_edge("answer_rag",
|
303 |
-
workflow.add_edge("answer_rag_no_docs",
|
304 |
workflow.add_edge("answer_chitchat", "chitchat_categorize_intent")
|
305 |
workflow.add_edge("retrieve_graphs_chitchat", END)
|
306 |
|
307 |
workflow.add_edge("retrieve_local_data", "answer_search")
|
308 |
workflow.add_edge("retrieve_documents", "answer_search")
|
309 |
-
|
310 |
-
# workflow.add_edge("transform_query", "retrieve_drias_data")
|
311 |
-
# workflow.add_edge("retrieve_drias_data", END)
|
312 |
|
313 |
|
314 |
# Compile
|
|
|
23 |
from .chains.answer_rag import make_rag_node
|
24 |
from .chains.graph_retriever import make_graph_retriever_node
|
25 |
from .chains.chitchat_categorization import make_chitchat_intent_categorization_node
|
26 |
+
from .chains.standalone_question import make_standalone_question_node
|
27 |
+
from .chains.follow_up import make_follow_up_node # Add this import
|
28 |
|
29 |
class GraphState(TypedDict):
|
30 |
"""
|
31 |
Represents the state of our graph.
|
32 |
"""
|
33 |
user_input : str
|
34 |
+
chat_history : str
|
35 |
language : str
|
36 |
intent : str
|
37 |
search_graphs_chitchat : bool
|
|
|
51 |
recommended_content : List[Document] # OWID Graphs # TODO merge with related_contents
|
52 |
search_only : bool = False
|
53 |
reports : List[str] = []
|
54 |
+
follow_up_questions: List[str] = []
|
55 |
|
56 |
def dummy(state):
|
57 |
return
|
|
|
103 |
else:
|
104 |
return "retrieve_documents"
|
105 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
106 |
|
107 |
def route_retrieve_documents(state):
|
108 |
sources_to_retrieve = []
|
|
|
114 |
return END
|
115 |
return sources_to_retrieve
|
116 |
|
117 |
+
def route_follow_up(state):
|
118 |
+
if state["follow_up_questions"]:
|
119 |
+
return "process_follow_up"
|
120 |
+
return END
|
121 |
+
|
122 |
def make_id_dict(values):
|
123 |
return {k:k for k in values}
|
124 |
|
|
|
127 |
workflow = StateGraph(GraphState)
|
128 |
|
129 |
# Define the node functions
|
130 |
+
standalone_question_node = make_standalone_question_node(llm)
|
131 |
categorize_intent = make_intent_categorization_node(llm)
|
132 |
transform_query = make_query_transform_node(llm)
|
133 |
translate_query = make_translation_node(llm)
|
|
|
139 |
answer_rag = make_rag_node(llm, with_docs=True)
|
140 |
answer_rag_no_docs = make_rag_node(llm, with_docs=False)
|
141 |
chitchat_categorize_intent = make_chitchat_intent_categorization_node(llm)
|
142 |
+
generate_follow_up = make_follow_up_node(llm)
|
143 |
|
144 |
# Define the nodes
|
145 |
# workflow.add_node("set_defaults", set_defaults)
|
146 |
+
workflow.add_node("standalone_question", standalone_question_node)
|
147 |
workflow.add_node("categorize_intent", categorize_intent)
|
148 |
workflow.add_node("answer_climate", dummy)
|
149 |
workflow.add_node("answer_search", answer_search)
|
|
|
157 |
workflow.add_node("retrieve_documents", retrieve_documents)
|
158 |
workflow.add_node("answer_rag", answer_rag)
|
159 |
workflow.add_node("answer_rag_no_docs", answer_rag_no_docs)
|
160 |
+
workflow.add_node("generate_follow_up", generate_follow_up)
|
161 |
+
# workflow.add_node("process_follow_up", standalone_question_node)
|
162 |
|
163 |
# Entry point
|
164 |
+
workflow.set_entry_point("standalone_question")
|
165 |
|
166 |
# CONDITIONAL EDGES
|
167 |
workflow.add_conditional_edges(
|
|
|
193 |
make_id_dict(["retrieve_graphs", END])
|
194 |
)
|
195 |
|
196 |
+
# workflow.add_conditional_edges(
|
197 |
+
# "generate_follow_up",
|
198 |
+
# route_follow_up,
|
199 |
+
# make_id_dict(["process_follow_up", END])
|
200 |
+
# )
|
201 |
+
|
202 |
# Define the edges
|
203 |
+
workflow.add_edge("standalone_question", "categorize_intent")
|
204 |
workflow.add_edge("translate_query", "transform_query")
|
205 |
workflow.add_edge("transform_query", "retrieve_documents") #TODO put back
|
206 |
# workflow.add_edge("transform_query", "retrieve_local_data")
|
207 |
# workflow.add_edge("transform_query", END) # TODO remove
|
208 |
|
209 |
workflow.add_edge("retrieve_graphs", END)
|
210 |
+
workflow.add_edge("answer_rag", "generate_follow_up")
|
211 |
+
workflow.add_edge("answer_rag_no_docs", "generate_follow_up")
|
212 |
workflow.add_edge("answer_chitchat", "chitchat_categorize_intent")
|
213 |
workflow.add_edge("retrieve_graphs_chitchat", END)
|
214 |
|
215 |
# workflow.add_edge("retrieve_local_data", "answer_search")
|
216 |
workflow.add_edge("retrieve_documents", "answer_search")
|
217 |
+
workflow.add_edge("generate_follow_up",END)
|
218 |
+
# workflow.add_edge("process_follow_up", "categorize_intent")
|
219 |
|
220 |
# Compile
|
221 |
app = workflow.compile()
|
|
|
241 |
workflow = StateGraph(GraphState)
|
242 |
|
243 |
# Define the node functions
|
244 |
+
standalone_question_node = make_standalone_question_node(llm)
|
245 |
+
|
246 |
categorize_intent = make_intent_categorization_node(llm)
|
247 |
transform_query = make_query_transform_node(llm)
|
248 |
translate_query = make_translation_node(llm)
|
|
|
255 |
answer_rag = make_rag_node(llm, with_docs=True)
|
256 |
answer_rag_no_docs = make_rag_node(llm, with_docs=False)
|
257 |
chitchat_categorize_intent = make_chitchat_intent_categorization_node(llm)
|
258 |
+
generate_follow_up = make_follow_up_node(llm)
|
259 |
|
260 |
# Define the nodes
|
261 |
# workflow.add_node("set_defaults", set_defaults)
|
262 |
+
workflow.add_node("standalone_question", standalone_question_node)
|
263 |
workflow.add_node("categorize_intent", categorize_intent)
|
264 |
workflow.add_node("answer_climate", dummy)
|
265 |
workflow.add_node("answer_search", answer_search)
|
|
|
275 |
workflow.add_node("retrieve_documents", retrieve_documents)
|
276 |
workflow.add_node("answer_rag", answer_rag)
|
277 |
workflow.add_node("answer_rag_no_docs", answer_rag_no_docs)
|
278 |
+
workflow.add_node("generate_follow_up", generate_follow_up)
|
279 |
|
280 |
# Entry point
|
281 |
+
workflow.set_entry_point("standalone_question")
|
282 |
|
283 |
# CONDITIONAL EDGES
|
284 |
workflow.add_conditional_edges(
|
|
|
311 |
)
|
312 |
|
313 |
# Define the edges
|
314 |
+
workflow.add_edge("standalone_question", "categorize_intent")
|
315 |
workflow.add_edge("translate_query", "transform_query")
|
316 |
workflow.add_edge("transform_query", "retrieve_documents") #TODO put back
|
317 |
workflow.add_edge("transform_query", "retrieve_local_data")
|
318 |
# workflow.add_edge("transform_query", END) # TODO remove
|
319 |
|
320 |
workflow.add_edge("retrieve_graphs", END)
|
321 |
+
workflow.add_edge("answer_rag", "generate_follow_up")
|
322 |
+
workflow.add_edge("answer_rag_no_docs", "generate_follow_up")
|
323 |
workflow.add_edge("answer_chitchat", "chitchat_categorize_intent")
|
324 |
workflow.add_edge("retrieve_graphs_chitchat", END)
|
325 |
|
326 |
workflow.add_edge("retrieve_local_data", "answer_search")
|
327 |
workflow.add_edge("retrieve_documents", "answer_search")
|
328 |
+
workflow.add_edge("generate_follow_up",END)
|
|
|
|
|
329 |
|
330 |
|
331 |
# Compile
|
front/tabs/__init__.py
CHANGED
@@ -3,4 +3,7 @@ from .tab_examples import create_examples_tab
|
|
3 |
from .tab_papers import create_papers_tab
|
4 |
from .tab_figures import create_figures_tab
|
5 |
from .chat_interface import create_chat_interface
|
6 |
-
from .tab_about import create_about_tab
|
|
|
|
|
|
|
|
3 |
from .tab_papers import create_papers_tab
|
4 |
from .tab_figures import create_figures_tab
|
5 |
from .chat_interface import create_chat_interface
|
6 |
+
from .tab_about import create_about_tab
|
7 |
+
from .main_tab import MainTabPanel
|
8 |
+
from .tab_config import ConfigPanel
|
9 |
+
from .main_tab import cqa_tab
|
front/tabs/chat_interface.py
CHANGED
@@ -21,21 +21,21 @@ What do you want to learn ?
|
|
21 |
"""
|
22 |
|
23 |
init_prompt_poc = """
|
24 |
-
|
25 |
|
26 |
-
❓
|
27 |
-
- **Language
|
28 |
-
- **Audience
|
29 |
-
- **Sources
|
30 |
-
- **Relevant content sources
|
31 |
|
32 |
⚠️ Limitations
|
33 |
-
*
|
34 |
|
35 |
-
🛈
|
36 |
-
|
37 |
|
38 |
-
|
39 |
"""
|
40 |
|
41 |
|
@@ -54,7 +54,10 @@ def create_chat_interface(tab):
|
|
54 |
max_height="80vh",
|
55 |
height="100vh"
|
56 |
)
|
57 |
-
|
|
|
|
|
|
|
58 |
with gr.Row(elem_id="input-message"):
|
59 |
|
60 |
textbox = gr.Textbox(
|
@@ -68,7 +71,7 @@ def create_chat_interface(tab):
|
|
68 |
|
69 |
config_button = gr.Button("", elem_id="config-button")
|
70 |
|
71 |
-
return chatbot, textbox, config_button
|
72 |
|
73 |
|
74 |
|
|
|
21 |
"""
|
22 |
|
23 |
init_prompt_poc = """
|
24 |
+
Bonjour, je suis ClimateQ&A, un assistant conversationnel conçu pour vous aider à comprendre le changement climatique et la perte de biodiversité. Je réponds à vos questions en **parcourant les rapports scientifiques du GIEC et de l'IPBES, le PCAET de Paris, le Plan Biodiversité 2018-2024, et les rapports Acclimaterra de la Région Nouvelle-Aquitaine**.
|
25 |
|
26 |
+
❓ Mode d'emploi
|
27 |
+
- **Language** : Vous pouvez me poser vos questions dans n'importe quelle langue.
|
28 |
+
- **Audience** : Vous pouvez préciser votre public (enfants, grand public, experts) pour obtenir une réponse plus adaptée.
|
29 |
+
- **Sources** : Vous pouvez choisir de chercher dans les rapports du GIEC ou de l'IPBES, et dans les sources POC pour les documents locaux (PCAET, Plan Biodiversité, Acclimaterra).
|
30 |
+
- **Relevant content sources** : Vous pouvez choisir de rechercher des images, des papiers scientifiques ou des graphiques qui peuvent être pertinents pour votre question.
|
31 |
|
32 |
⚠️ Limitations
|
33 |
+
*Veuillez noter que l'IA n'est pas parfaite et peut parfois donner des réponses non pertinentes. Si vous n'êtes pas satisfait de la réponse, veuillez poser une question plus précise ou nous faire part de vos commentaires pour nous aider à améliorer le système.*
|
34 |
|
35 |
+
🛈 Informations
|
36 |
+
Veuillez noter que nous enregistrons vos questions à des fins de méta-analyse, évitez donc de partager toute information sensible ou personnelle.
|
37 |
|
38 |
+
Que voulez-vous apprendre ?
|
39 |
"""
|
40 |
|
41 |
|
|
|
54 |
max_height="80vh",
|
55 |
height="100vh"
|
56 |
)
|
57 |
+
with gr.Row(elem_id="follow-up-examples"):
|
58 |
+
follow_up_examples_hidden = gr.Textbox(visible=False, elem_id="follow-up-hidden")
|
59 |
+
follow_up_examples = gr.Examples(examples=[ ], label="Follow up questions", inputs= [follow_up_examples_hidden], elem_id="follow-up-button", run_on_click=False)
|
60 |
+
|
61 |
with gr.Row(elem_id="input-message"):
|
62 |
|
63 |
textbox = gr.Textbox(
|
|
|
71 |
|
72 |
config_button = gr.Button("", elem_id="config-button")
|
73 |
|
74 |
+
return chatbot, textbox, config_button, follow_up_examples, follow_up_examples_hidden
|
75 |
|
76 |
|
77 |
|
front/tabs/main_tab.py
CHANGED
@@ -1,8 +1,37 @@
|
|
1 |
import gradio as gr
|
|
|
|
|
2 |
from .chat_interface import create_chat_interface
|
3 |
from .tab_examples import create_examples_tab
|
4 |
from .tab_papers import create_papers_tab
|
5 |
from .tab_figures import create_figures_tab
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
6 |
|
7 |
def cqa_tab(tab_name):
|
8 |
# State variables
|
@@ -11,14 +40,14 @@ def cqa_tab(tab_name):
|
|
11 |
with gr.Row(elem_id="chatbot-row"):
|
12 |
# Left column - Chat interface
|
13 |
with gr.Column(scale=2):
|
14 |
-
chatbot, textbox, config_button = create_chat_interface(tab_name)
|
15 |
|
16 |
# Right column - Content panels
|
17 |
with gr.Column(scale=2, variant="panel", elem_id="right-panel"):
|
18 |
with gr.Tabs(elem_id="right_panel_tab") as tabs:
|
19 |
# Examples tab
|
20 |
with gr.TabItem("Examples", elem_id="tab-examples", id=0):
|
21 |
-
examples_hidden
|
22 |
|
23 |
# Sources tab
|
24 |
with gr.Tab("Sources", elem_id="tab-sources", id=1) as tab_sources:
|
@@ -34,7 +63,7 @@ def cqa_tab(tab_name):
|
|
34 |
|
35 |
# Papers subtab
|
36 |
with gr.Tab("Papers", elem_id="tab-citations", id=4) as tab_papers:
|
37 |
-
papers_summary, papers_html, citations_network, papers_modal = create_papers_tab()
|
38 |
|
39 |
# Graphs subtab
|
40 |
with gr.Tab("Graphs", elem_id="tab-graphs", id=5) as tab_graphs:
|
@@ -42,27 +71,30 @@ def cqa_tab(tab_name):
|
|
42 |
"<h2>There are no graphs to be displayed at the moment. Try asking another question.</h2>",
|
43 |
elem_id="graphs-container"
|
44 |
)
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
|
|
|
|
|
|
|
1 |
import gradio as gr
|
2 |
+
from gradio.helpers import Examples
|
3 |
+
from typing import TypedDict
|
4 |
from .chat_interface import create_chat_interface
|
5 |
from .tab_examples import create_examples_tab
|
6 |
from .tab_papers import create_papers_tab
|
7 |
from .tab_figures import create_figures_tab
|
8 |
+
from dataclasses import dataclass
|
9 |
+
|
10 |
+
@dataclass
|
11 |
+
class MainTabPanel:
|
12 |
+
chatbot: gr.Chatbot
|
13 |
+
textbox: gr.Textbox
|
14 |
+
tabs: gr.Tabs
|
15 |
+
sources_raw: gr.State
|
16 |
+
new_figures: gr.State
|
17 |
+
current_graphs: gr.State
|
18 |
+
examples_hidden: gr.State
|
19 |
+
sources_textbox: gr.HTML
|
20 |
+
figures_cards: gr.HTML
|
21 |
+
gallery_component: gr.Gallery
|
22 |
+
config_button: gr.Button
|
23 |
+
papers_direct_search: gr.TextArea
|
24 |
+
papers_html: gr.HTML
|
25 |
+
citations_network: gr.Plot
|
26 |
+
papers_summary: gr.Textbox
|
27 |
+
tab_recommended_content: gr.Tab
|
28 |
+
tab_sources: gr.Tab
|
29 |
+
tab_figures: gr.Tab
|
30 |
+
tab_graphs: gr.Tab
|
31 |
+
tab_papers: gr.Tab
|
32 |
+
graph_container: gr.HTML
|
33 |
+
follow_up_examples : Examples
|
34 |
+
follow_up_examples_hidden : gr.Textbox
|
35 |
|
36 |
def cqa_tab(tab_name):
|
37 |
# State variables
|
|
|
40 |
with gr.Row(elem_id="chatbot-row"):
|
41 |
# Left column - Chat interface
|
42 |
with gr.Column(scale=2):
|
43 |
+
chatbot, textbox, config_button, follow_up_examples, follow_up_examples_hidden = create_chat_interface(tab_name)
|
44 |
|
45 |
# Right column - Content panels
|
46 |
with gr.Column(scale=2, variant="panel", elem_id="right-panel"):
|
47 |
with gr.Tabs(elem_id="right_panel_tab") as tabs:
|
48 |
# Examples tab
|
49 |
with gr.TabItem("Examples", elem_id="tab-examples", id=0):
|
50 |
+
examples_hidden = create_examples_tab(tab_name)
|
51 |
|
52 |
# Sources tab
|
53 |
with gr.Tab("Sources", elem_id="tab-sources", id=1) as tab_sources:
|
|
|
63 |
|
64 |
# Papers subtab
|
65 |
with gr.Tab("Papers", elem_id="tab-citations", id=4) as tab_papers:
|
66 |
+
papers_direct_search, papers_summary, papers_html, citations_network, papers_modal = create_papers_tab()
|
67 |
|
68 |
# Graphs subtab
|
69 |
with gr.Tab("Graphs", elem_id="tab-graphs", id=5) as tab_graphs:
|
|
|
71 |
"<h2>There are no graphs to be displayed at the moment. Try asking another question.</h2>",
|
72 |
elem_id="graphs-container"
|
73 |
)
|
74 |
+
|
75 |
+
|
76 |
+
return MainTabPanel(
|
77 |
+
chatbot=chatbot,
|
78 |
+
textbox=textbox,
|
79 |
+
tabs=tabs,
|
80 |
+
sources_raw=sources_raw,
|
81 |
+
new_figures=new_figures,
|
82 |
+
current_graphs=current_graphs,
|
83 |
+
examples_hidden=examples_hidden,
|
84 |
+
sources_textbox=sources_textbox,
|
85 |
+
figures_cards=figures_cards,
|
86 |
+
gallery_component=gallery_component,
|
87 |
+
config_button=config_button,
|
88 |
+
papers_direct_search=papers_direct_search,
|
89 |
+
papers_html=papers_html,
|
90 |
+
citations_network=citations_network,
|
91 |
+
papers_summary=papers_summary,
|
92 |
+
tab_recommended_content=tab_recommended_content,
|
93 |
+
tab_sources=tab_sources,
|
94 |
+
tab_figures=tab_figures,
|
95 |
+
tab_graphs=tab_graphs,
|
96 |
+
tab_papers=tab_papers,
|
97 |
+
graph_container=graphs_container,
|
98 |
+
follow_up_examples= follow_up_examples,
|
99 |
+
follow_up_examples_hidden = follow_up_examples_hidden
|
100 |
+
)
|
front/tabs/tab_config.py
CHANGED
@@ -2,8 +2,10 @@ import gradio as gr
|
|
2 |
from gradio_modal import Modal
|
3 |
from climateqa.constants import POSSIBLE_REPORTS
|
4 |
from typing import TypedDict
|
|
|
5 |
|
6 |
-
|
|
|
7 |
config_open: gr.State
|
8 |
config_modal: Modal
|
9 |
dropdown_sources: gr.CheckboxGroup
|
@@ -14,6 +16,7 @@ class ConfigPanel(TypedDict):
|
|
14 |
after: gr.Slider
|
15 |
output_query: gr.Textbox
|
16 |
output_language: gr.Textbox
|
|
|
17 |
|
18 |
|
19 |
def create_config_modal():
|
@@ -37,9 +40,9 @@ def create_config_modal():
|
|
37 |
)
|
38 |
|
39 |
dropdown_external_sources = gr.CheckboxGroup(
|
40 |
-
choices=["Figures (IPCC/IPBES)", "Papers (OpenAlex)", "Graphs (OurWorldInData)"
|
41 |
label="Select database to search for relevant content",
|
42 |
-
value=["Figures (IPCC/IPBES)"
|
43 |
interactive=True
|
44 |
)
|
45 |
|
@@ -95,29 +98,16 @@ def create_config_modal():
|
|
95 |
|
96 |
close_config_modal_button = gr.Button("Validate and Close", elem_id="close-config-modal")
|
97 |
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
return {
|
112 |
-
"config_open" : config_open,
|
113 |
-
"config_modal": config_modal,
|
114 |
-
"dropdown_sources": dropdown_sources,
|
115 |
-
"dropdown_reports": dropdown_reports,
|
116 |
-
"dropdown_external_sources": dropdown_external_sources,
|
117 |
-
"search_only": search_only,
|
118 |
-
"dropdown_audience": dropdown_audience,
|
119 |
-
"after": after,
|
120 |
-
"output_query": output_query,
|
121 |
-
"output_language": output_language,
|
122 |
-
"close_config_modal_button": close_config_modal_button
|
123 |
-
}
|
|
|
2 |
from gradio_modal import Modal
|
3 |
from climateqa.constants import POSSIBLE_REPORTS
|
4 |
from typing import TypedDict
|
5 |
+
from dataclasses import dataclass
|
6 |
|
7 |
+
@dataclass
|
8 |
+
class ConfigPanel:
|
9 |
config_open: gr.State
|
10 |
config_modal: Modal
|
11 |
dropdown_sources: gr.CheckboxGroup
|
|
|
16 |
after: gr.Slider
|
17 |
output_query: gr.Textbox
|
18 |
output_language: gr.Textbox
|
19 |
+
close_config_modal_button: gr.Button
|
20 |
|
21 |
|
22 |
def create_config_modal():
|
|
|
40 |
)
|
41 |
|
42 |
dropdown_external_sources = gr.CheckboxGroup(
|
43 |
+
choices=["Figures (IPCC/IPBES)", "Papers (OpenAlex)", "Graphs (OurWorldInData)"],
|
44 |
label="Select database to search for relevant content",
|
45 |
+
value=["Figures (IPCC/IPBES)"],
|
46 |
interactive=True
|
47 |
)
|
48 |
|
|
|
98 |
|
99 |
close_config_modal_button = gr.Button("Validate and Close", elem_id="close-config-modal")
|
100 |
|
101 |
+
return ConfigPanel(
|
102 |
+
config_open=config_open,
|
103 |
+
config_modal=config_modal,
|
104 |
+
dropdown_sources=dropdown_sources,
|
105 |
+
dropdown_reports=dropdown_reports,
|
106 |
+
dropdown_external_sources=dropdown_external_sources,
|
107 |
+
search_only=search_only,
|
108 |
+
dropdown_audience=dropdown_audience,
|
109 |
+
after=after,
|
110 |
+
output_query=output_query,
|
111 |
+
output_language=output_language,
|
112 |
+
close_config_modal_button=close_config_modal_button
|
113 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
style.css
CHANGED
@@ -115,6 +115,11 @@ main.flex.flex-1.flex-col {
|
|
115 |
border-radius: 40px;
|
116 |
padding-left: 30px;
|
117 |
resize: none;
|
|
|
|
|
|
|
|
|
|
|
118 |
}
|
119 |
|
120 |
#input-message > div {
|
@@ -474,6 +479,18 @@ a {
|
|
474 |
text-decoration: none !important;
|
475 |
}
|
476 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
477 |
/* Media Queries */
|
478 |
/* Desktop Media Query */
|
479 |
@media screen and (min-width: 1024px) {
|
@@ -495,6 +512,15 @@ a {
|
|
495 |
overflow-y: scroll !important;
|
496 |
}
|
497 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
498 |
div#chatbot-row {
|
499 |
max-height: calc(100vh - 90px) !important;
|
500 |
}
|
@@ -513,7 +539,11 @@ a {
|
|
513 |
/* Mobile Media Query */
|
514 |
@media screen and (max-width: 767px) {
|
515 |
div#chatbot {
|
516 |
-
height:
|
|
|
|
|
|
|
|
|
517 |
}
|
518 |
|
519 |
#submit-button {
|
|
|
115 |
border-radius: 40px;
|
116 |
padding-left: 30px;
|
117 |
resize: none;
|
118 |
+
background-color: #f0f8ff; /* Light blue background */
|
119 |
+
border: 2px solid #4b8ec3; /* Blue border */
|
120 |
+
font-size: 16px; /* Increase font size */
|
121 |
+
color: #333; /* Text color */
|
122 |
+
box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1); /* Add shadow */
|
123 |
}
|
124 |
|
125 |
#input-message > div {
|
|
|
479 |
text-decoration: none !important;
|
480 |
}
|
481 |
|
482 |
+
/* Follow-up Examples Styles */
|
483 |
+
#follow-up-examples {
|
484 |
+
height: 15vh;
|
485 |
+
overflow-y: auto;
|
486 |
+
padding: 10px 0;
|
487 |
+
}
|
488 |
+
|
489 |
+
#follow-up-button {
|
490 |
+
height: 100%;
|
491 |
+
overflow-y: auto;
|
492 |
+
}
|
493 |
+
|
494 |
/* Media Queries */
|
495 |
/* Desktop Media Query */
|
496 |
@media screen and (min-width: 1024px) {
|
|
|
512 |
overflow-y: scroll !important;
|
513 |
}
|
514 |
|
515 |
+
div#chatbot-row {
|
516 |
+
max-height: calc(100vh - 200px) !important;
|
517 |
+
}
|
518 |
+
|
519 |
+
div#chatbot {
|
520 |
+
height: 65vh !important;
|
521 |
+
max-height: 65vh !important;
|
522 |
+
}
|
523 |
+
|
524 |
div#chatbot-row {
|
525 |
max-height: calc(100vh - 90px) !important;
|
526 |
}
|
|
|
539 |
/* Mobile Media Query */
|
540 |
@media screen and (max-width: 767px) {
|
541 |
div#chatbot {
|
542 |
+
height: 400px !important; /* Reduced from 500px */
|
543 |
+
}
|
544 |
+
|
545 |
+
#follow-up-examples {
|
546 |
+
height: 100px;
|
547 |
}
|
548 |
|
549 |
#submit-button {
|