Abid Ali Awan commited on
Commit
baa0fe2
·
1 Parent(s): 48e84df

Refactor app.py to create a dedicated demo function for UI initialization. Update UIHandler to generate a unique user ID per session and pass it to the regulatory query processing. Enhance memory management by ensuring user-specific data is utilized. Modify LLM streaming function to include a system prompt for improved response context.

Browse files
Files changed (4) hide show
  1. agents/reg_radar.py +4 -2
  2. agents/ui_handler.py +17 -2
  3. app.py +8 -3
  4. tools/llm.py +10 -3
agents/reg_radar.py CHANGED
@@ -60,7 +60,9 @@ class RegRadarAgent:
60
  intent = call_llm(intent_prompt).strip().lower()
61
  return not intent.startswith("n")
62
 
63
- def process_regulatory_query(self, message: str, params: dict = None):
 
 
64
  """Process a regulatory query and return results"""
65
  # Determine the intended tool
66
  tool_key, tool_name = self.determine_intended_tool(message)
@@ -75,7 +77,7 @@ class RegRadarAgent:
75
  )
76
 
77
  # Check memory for similar queries
78
- memory_results = self.memory_tools.search_memory("user", message)
79
 
80
  return {
81
  "tool_name": tool_name,
 
60
  intent = call_llm(intent_prompt).strip().lower()
61
  return not intent.startswith("n")
62
 
63
+ def process_regulatory_query(
64
+ self, message: str, params: dict = None, user_id: str = "user"
65
+ ):
66
  """Process a regulatory query and return results"""
67
  # Determine the intended tool
68
  tool_key, tool_name = self.determine_intended_tool(message)
 
77
  )
78
 
79
  # Check memory for similar queries
80
+ memory_results = self.memory_tools.search_memory(user_id, message)
81
 
82
  return {
83
  "tool_name": tool_name,
agents/ui_handler.py CHANGED
@@ -1,3 +1,4 @@
 
1
  import threading
2
  import time
3
 
@@ -12,6 +13,9 @@ from tools.llm import stream_llm
12
  class UIHandler:
13
  def __init__(self):
14
  self.agent = RegRadarAgent()
 
 
 
15
 
16
  def streaming_chatbot(self, message, history):
17
  """Process messages with tool visibility and lock input during response generation"""
@@ -82,7 +86,9 @@ class UIHandler:
82
  yield history, "", gr.update(interactive=False)
83
 
84
  # Process the regulatory query
85
- results = self.agent.process_regulatory_query(message, params)
 
 
86
  crawl_results = results["crawl_results"]
87
  memory_results = results["memory_results"]
88
 
@@ -173,7 +179,7 @@ Found {len(memory_results)} similar past queries in memory.
173
  # Save to memory in the background
174
  threading.Thread(
175
  target=self.agent.memory_tools.save_to_memory,
176
- args=("user", message, streaming_content),
177
  daemon=True,
178
  ).start()
179
 
@@ -238,6 +244,14 @@ Found {len(memory_results)} similar past queries in memory.
238
  gr.Markdown("""
239
  ### RegRadar uses these intelligent tools:
240
 
 
 
 
 
 
 
 
 
241
  **🔍 Regulatory Web Crawler**
242
  - Crawls official regulatory websites (SEC, FDA, FTC, etc.)
243
  - Searches for recent updates and compliance changes
@@ -252,6 +266,7 @@ Found {len(memory_results)} similar past queries in memory.
252
  - Remembers past queries and responses
253
  - Learns from your compliance interests
254
  - Provides context from previous interactions
 
255
 
256
  **🤖 AI Analysis Engine**
257
  - Analyzes and summarizes regulatory findings
 
1
+ import random
2
  import threading
3
  import time
4
 
 
13
  class UIHandler:
14
  def __init__(self):
15
  self.agent = RegRadarAgent()
16
+ self.user_id = (
17
+ f"user-{random.randint(1000, 9999)}" # User ID per session, small number
18
+ )
19
 
20
  def streaming_chatbot(self, message, history):
21
  """Process messages with tool visibility and lock input during response generation"""
 
86
  yield history, "", gr.update(interactive=False)
87
 
88
  # Process the regulatory query
89
+ results = self.agent.process_regulatory_query(
90
+ message, params, user_id=self.user_id
91
+ )
92
  crawl_results = results["crawl_results"]
93
  memory_results = results["memory_results"]
94
 
 
179
  # Save to memory in the background
180
  threading.Thread(
181
  target=self.agent.memory_tools.save_to_memory,
182
+ args=(self.user_id, message, streaming_content),
183
  daemon=True,
184
  ).start()
185
 
 
244
  gr.Markdown("""
245
  ### RegRadar uses these intelligent tools:
246
 
247
+ **🧠 Query Type Detection**
248
+ - Automatically detects if your message is a regulatory compliance query or a general question
249
+ - Selects the appropriate tools and response style based on your intent
250
+
251
+ **📩 Information Extraction**
252
+ - Extracts key details (industry, region, keywords) from your command
253
+ - Ensures accurate and relevant regulatory analysis
254
+
255
  **🔍 Regulatory Web Crawler**
256
  - Crawls official regulatory websites (SEC, FDA, FTC, etc.)
257
  - Searches for recent updates and compliance changes
 
266
  - Remembers past queries and responses
267
  - Learns from your compliance interests
268
  - Provides context from previous interactions
269
+ - Each session creates a new user for personalization
270
 
271
  **🤖 AI Analysis Engine**
272
  - Analyzes and summarizes regulatory findings
app.py CHANGED
@@ -7,12 +7,17 @@ compliance guidance for various industries and regions.
7
 
8
  from agents.ui_handler import UIHandler
9
 
 
 
 
 
 
 
10
  def main():
11
  """Initialize and launch the RegRadar application"""
12
- ui_handler = UIHandler()
13
- demo = ui_handler.create_ui()
14
  demo.launch()
15
 
 
16
  if __name__ == "__main__":
17
  main()
18
-
 
7
 
8
  from agents.ui_handler import UIHandler
9
 
10
+
11
+ def create_demo():
12
+ ui_handler = UIHandler() # New user for each session
13
+ return ui_handler.create_ui()
14
+
15
+
16
  def main():
17
  """Initialize and launch the RegRadar application"""
18
+ demo = create_demo()
 
19
  demo.launch()
20
 
21
+
22
  if __name__ == "__main__":
23
  main()
 
tools/llm.py CHANGED
@@ -28,12 +28,19 @@ def call_llm(prompt: str, temperature: float = DEFAULT_LLM_TEMPERATURE) -> str:
28
  return "I apologize, but I encountered an error processing your request."
29
 
30
 
31
- def stream_llm(prompt: str, temperature: float = DEFAULT_LLM_TEMPERATURE):
32
- """Stream LLM response"""
 
 
 
 
33
  try:
34
  stream = client.chat.completions.create(
35
  model=DEFAULT_LLM_MODEL,
36
- messages=[{"role": "user", "content": prompt}],
 
 
 
37
  temperature=temperature,
38
  stream=True,
39
  )
 
28
  return "I apologize, but I encountered an error processing your request."
29
 
30
 
31
+ def stream_llm(
32
+ prompt: str,
33
+ temperature: float = DEFAULT_LLM_TEMPERATURE,
34
+ system_prompt: str = "You are an expert AI assistant specializing in regulatory updates. Provide thorough, insightful, and actionable analysis based on the user's request, focusing on compliance, recent changes, and best practices.",
35
+ ):
36
+ """Stream LLM response with an expert regulatory system prompt"""
37
  try:
38
  stream = client.chat.completions.create(
39
  model=DEFAULT_LLM_MODEL,
40
+ messages=[
41
+ {"role": "system", "content": system_prompt},
42
+ {"role": "user", "content": prompt},
43
+ ],
44
  temperature=temperature,
45
  stream=True,
46
  )