timeki commited on
Commit
2d6b3b9
·
1 Parent(s): 5bd3f8c

replace eval by litteral eval

Browse files
climateqa/engine/chains/retrieve_documents.py CHANGED
@@ -21,7 +21,7 @@ from langchain_core.prompts import ChatPromptTemplate
21
  from langchain_core.output_parsers import StrOutputParser
22
  from ..vectorstore import get_pinecone_vectorstore
23
  from ..embeddings import get_embeddings_function
24
-
25
 
26
  import asyncio
27
 
@@ -580,7 +580,7 @@ async def get_relevant_toc_level_for_query(
580
  response = chain.invoke({"query": query, "doc_list": doc_list})
581
 
582
  try:
583
- relevant_tocs = eval(response)
584
  except Exception as e:
585
  print(f" Failed to parse the result because of : {e}")
586
 
 
21
  from langchain_core.output_parsers import StrOutputParser
22
  from ..vectorstore import get_pinecone_vectorstore
23
  from ..embeddings import get_embeddings_function
24
+ import ast
25
 
26
  import asyncio
27
 
 
580
  response = chain.invoke({"query": query, "doc_list": doc_list})
581
 
582
  try:
583
+ relevant_tocs = ast.literal_eval(response)
584
  except Exception as e:
585
  print(f" Failed to parse the result because of : {e}")
586
 
climateqa/engine/talk_to_data/input_processing.py CHANGED
@@ -11,6 +11,7 @@ from climateqa.engine.talk_to_data.objects.location import Location
11
  from climateqa.engine.talk_to_data.objects.plot import Plot
12
  from climateqa.engine.talk_to_data.objects.states import State
13
  import time
 
14
 
15
  async def detect_location_with_openai(sentence: str) -> str:
16
  """
@@ -114,7 +115,7 @@ async def detect_year_with_openai(sentence: str) -> str:
114
  structured_llm = llm.with_structured_output(ArrayOutput)
115
  chain = prompt | structured_llm
116
  response: ArrayOutput = await chain.ainvoke({"sentence": sentence})
117
- years_list = eval(response['array'])
118
  if len(years_list) > 0:
119
  return years_list[0]
120
  else:
 
11
  from climateqa.engine.talk_to_data.objects.plot import Plot
12
  from climateqa.engine.talk_to_data.objects.states import State
13
  import time
14
+ import ast
15
 
16
  async def detect_location_with_openai(sentence: str) -> str:
17
  """
 
115
  structured_llm = llm.with_structured_output(ArrayOutput)
116
  chain = prompt | structured_llm
117
  response: ArrayOutput = await chain.ainvoke({"sentence": sentence})
118
+ years_list = ast.literal_eval(response['array'])
119
  if len(years_list) > 0:
120
  return years_list[0]
121
  else: