hussamalafandi commited on
Commit
c3e3be2
·
1 Parent(s): aff055d

Refactor respond function to improve message handling and clarity

Browse files
Files changed (1) hide show
  1. app.py +11 -4
app.py CHANGED
@@ -1,6 +1,6 @@
1
  import gradio as gr
2
  from langchain.chat_models import init_chat_model
3
- from langchain_core.messages import HumanMessage
4
 
5
  model = init_chat_model("gemini-2.0-flash", model_provider="google_genai")
6
 
@@ -16,9 +16,16 @@ def respond(
16
  """
17
  Respond to user input using the model.
18
  """
19
- response = model.invoke(
20
- dialog_history + [HumanMessage(content=user_input)],
21
- )
 
 
 
 
 
 
 
22
  return response.content
23
 
24
 
 
1
  import gradio as gr
2
  from langchain.chat_models import init_chat_model
3
+ from langchain_core.messages import HumanMessage, AIMessage
4
 
5
  model = init_chat_model("gemini-2.0-flash", model_provider="google_genai")
6
 
 
16
  """
17
  Respond to user input using the model.
18
  """
19
+ history_langchain_format = []
20
+ for msg in dialog_history:
21
+ if msg['role'] == "user":
22
+ history_langchain_format.append(
23
+ HumanMessage(content=msg['content']))
24
+ elif msg['role'] == "assistant":
25
+ history_langchain_format.append(AIMessage(content=msg['content']))
26
+ history_langchain_format.append(HumanMessage(content=user_input))
27
+ response = model.invoke(history_langchain_format)
28
+ print(history_langchain_format)
29
  return response.content
30
 
31