Abid Ali Awan commited on
Commit
f4c443a
·
1 Parent(s): 10e2503

Refactor app.py to streamline the RegRadar application, consolidating regulatory compliance features into a cohesive UI. Enhance user interaction with improved query processing, tool detection, and memory integration. Update main function to initialize and launch the application effectively.

Browse files
agents/__init__.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ # Agents package initialization
2
+
agents/reg_radar.py ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ from typing import Dict, List, Tuple
3
+ from tools.web_tools import WebTools
4
+ from tools.memory_tools import MemoryTools
5
+ from tools.llm import call_llm, stream_llm
6
+
7
+ class RegRadarAgent:
8
+ def __init__(self):
9
+ self.web_tools = WebTools()
10
+ self.memory_tools = MemoryTools()
11
+
12
+ def determine_intended_tool(self, message: str) -> Tuple[str, str]:
13
+ """Determine which tool will be used based on the message"""
14
+ message_lower = message.lower()
15
+
16
+ if any(
17
+ word in message_lower
18
+ for word in ["crawl", "scan", "check", "latest", "update", "recent"]
19
+ ):
20
+ return "web_crawler", "Regulatory Web Crawler"
21
+ elif any(
22
+ word in message_lower for word in ["remember", "history", "past", "previous"]
23
+ ):
24
+ return "memory", "Memory Search"
25
+ else:
26
+ return "search", "Regulatory Search"
27
+
28
+ def extract_parameters(self, message: str) -> Dict:
29
+ """Extract industry, region, and keywords from the query"""
30
+ extract_prompt = f"""
31
+ Extract industry, region, and keywords from this query:
32
+ "{message}"
33
+
34
+ Return as JSON with keys: industry, region, keywords
35
+ If not specified, use General/US/main topic
36
+ """
37
+
38
+ extraction = call_llm(extract_prompt)
39
+ try:
40
+ params = json.loads(extraction)
41
+ except:
42
+ params = {"industry": "General", "region": "US", "keywords": message}
43
+
44
+ return params
45
+
46
+ def is_regulatory_query(self, message: str) -> bool:
47
+ """Detect if this is a regulatory, compliance, or update-related question"""
48
+ intent_prompt = f"""
49
+ Is the following user message a regulatory, compliance, or update-related question (yes/no)?
50
+ Message: {message}
51
+ Respond with only 'yes' or 'no'.
52
+ """
53
+
54
+ intent = call_llm(intent_prompt).strip().lower()
55
+ return not intent.startswith("n")
56
+
57
+ def process_regulatory_query(self, message: str):
58
+ """Process a regulatory query and return results"""
59
+ # Determine the intended tool
60
+ tool_key, tool_name = self.determine_intended_tool(message)
61
+
62
+ # Extract parameters
63
+ params = self.extract_parameters(message)
64
+
65
+ # Execute tool (crawl sites)
66
+ crawl_results = self.web_tools.crawl_regulatory_sites(
67
+ params["industry"], params["region"], params["keywords"]
68
+ )
69
+
70
+ # Check memory for similar queries
71
+ memory_results = self.memory_tools.search_memory("user", message)
72
+
73
+ return {
74
+ "tool_name": tool_name,
75
+ "params": params,
76
+ "crawl_results": crawl_results,
77
+ "memory_results": memory_results
78
+ }
79
+
80
+ def generate_report(self, params, crawl_results):
81
+ """Generate a comprehensive regulatory report"""
82
+ if not crawl_results["results"]:
83
+ summary_prompt = (
84
+ f"No regulatory updates found for {params['industry']} in {params['region']} "
85
+ f"with keywords: {params['keywords']}. Provide helpful suggestions on where to "
86
+ f"look or what to search for."
87
+ )
88
+ else:
89
+ by_source = {}
90
+ for result in crawl_results["results"][:8]:
91
+ source = result.get("source", "Unknown")
92
+ if source not in by_source:
93
+ by_source[source] = []
94
+ by_source[source].append(result)
95
+
96
+ summary_prompt = f"""
97
+ Create a comprehensive regulatory compliance report for {params["industry"]} industry in {params["region"]} region.
98
+
99
+ Analyze these regulatory updates:
100
+ {json.dumps(by_source, indent=2)}
101
+
102
+ Include:
103
+ # 📋 Executive Summary
104
+ (2-3 sentences overview)
105
+
106
+ # 🔍 Key Findings
107
+ • Finding 1
108
+ • Finding 2
109
+ • Finding 3
110
+
111
+ # ⚠️ Compliance Requirements
112
+ - List main requirements with priorities
113
+
114
+ # ✅ Action Items
115
+ - Specific actions with suggested timelines
116
+
117
+ # 📚 Resources
118
+ - Links and references
119
+
120
+ Use emojis, bullet points, and clear formatting. Keep it professional but readable.
121
+ """
122
+
123
+ return stream_llm(summary_prompt)
124
+
agents/ui_handler.py ADDED
@@ -0,0 +1,248 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import time
2
+ import gradio as gr
3
+ from gradio import ChatMessage
4
+ from config.settings import AVATAR_IMAGES
5
+ from agents.reg_radar import RegRadarAgent
6
+ from tools.llm import stream_llm
7
+
8
+ class UIHandler:
9
+ def __init__(self):
10
+ self.agent = RegRadarAgent()
11
+
12
+ def streaming_chatbot(self, message, history):
13
+ """Process messages with tool visibility"""
14
+ if not message.strip():
15
+ return history, ""
16
+
17
+ # Add user message
18
+ history.append(ChatMessage(role="user", content=message))
19
+
20
+ # Start timer
21
+ start_time = time.time()
22
+
23
+ # Detect if this is a regulatory query
24
+ is_regulatory = self.agent.is_regulatory_query(message)
25
+
26
+ if not is_regulatory:
27
+ # General chat
28
+ history.append(
29
+ ChatMessage(role="assistant", content="💬 Processing general query...")
30
+ )
31
+ yield history, ""
32
+
33
+ # Clear processing message and stream response
34
+ history.pop()
35
+
36
+ chat_prompt = (
37
+ f"You are a friendly AI assistant. Respond conversationally to: {message}"
38
+ )
39
+ streaming_content = ""
40
+ history.append(ChatMessage(role="assistant", content=""))
41
+
42
+ for chunk in stream_llm(chat_prompt):
43
+ streaming_content += chunk
44
+ history[-1] = ChatMessage(role="assistant", content=streaming_content)
45
+ yield history, ""
46
+
47
+ return
48
+
49
+ # Show tool detection
50
+ tool_key, tool_name = self.agent.determine_intended_tool(message)
51
+
52
+ # Initial processing message with tool info
53
+ status_msg = (
54
+ f"🔍 Using **{tool_name}** to analyze your query (estimated 10-20 seconds)..."
55
+ )
56
+ history.append(ChatMessage(role="assistant", content=status_msg))
57
+ yield history, ""
58
+
59
+ # Extract parameters and process query
60
+ params = self.agent.extract_parameters(message)
61
+
62
+ # Clear status and show parameter extraction
63
+ history.pop()
64
+
65
+ # Show tool execution steps
66
+ tool_status = f"""
67
+ 🛠️ **Tool Execution Status**
68
+
69
+ 📍 **Parameters Extracted:**
70
+ - Industry: {params["industry"]}
71
+ - Region: {params["region"]}
72
+ - Keywords: {params["keywords"]}
73
+
74
+ 🔄 **Executing {tool_name}...**
75
+ """
76
+ history.append(ChatMessage(role="assistant", content=tool_status))
77
+ yield history, ""
78
+
79
+ # Process the regulatory query
80
+ results = self.agent.process_regulatory_query(message)
81
+ crawl_results = results["crawl_results"]
82
+ memory_results = results["memory_results"]
83
+
84
+ # Update with results count
85
+ history[-1] = ChatMessage(
86
+ role="assistant",
87
+ content=tool_status
88
+ + f"\n\n✅ **Found {crawl_results['total_found']} regulatory updates**",
89
+ )
90
+ yield history, ""
91
+
92
+ # Show collapsible raw results
93
+ if crawl_results["results"]:
94
+ # Format results for display
95
+ results_display = []
96
+ for i, result in enumerate(crawl_results["results"][:5], 1):
97
+ results_display.append(f"""
98
+ **{i}. {result["source"]}**
99
+ - Title: {result["title"][:100]}...
100
+ - URL: {result["url"]}
101
+ """)
102
+
103
+ collapsible_results = f"""
104
+ <details>
105
+ <summary><strong>📋 Raw Regulatory Data</strong> - Click to expand</summary>
106
+
107
+ {"".join(results_display)}
108
+
109
+ </details>
110
+ """
111
+ history.append(ChatMessage(role="assistant", content=collapsible_results))
112
+ yield history, ""
113
+
114
+ # Display memory results if available
115
+ if memory_results:
116
+ memory_msg = """
117
+ <details>
118
+ <summary><strong>💾 Related Past Queries</strong> - Click to expand</summary>
119
+
120
+ Found {len(memory_results)} similar past queries in memory.
121
+
122
+ </details>
123
+ """
124
+ history.append(ChatMessage(role="assistant", content=memory_msg))
125
+ yield history, ""
126
+
127
+ # Generate final analysis
128
+ history.append(
129
+ ChatMessage(role="assistant", content="📝 **Generating Compliance Report...**")
130
+ )
131
+ yield history, ""
132
+
133
+ # Clear generating message and stream final report
134
+ history.pop()
135
+
136
+ streaming_content = ""
137
+ history.append(ChatMessage(role="assistant", content=""))
138
+
139
+ for chunk in self.agent.generate_report(params, crawl_results):
140
+ streaming_content += chunk
141
+ history[-1] = ChatMessage(role="assistant", content=streaming_content)
142
+ yield history, ""
143
+
144
+ # Save to memory
145
+ self.agent.memory_tools.save_to_memory("user", message, streaming_content)
146
+
147
+ # Show completion time
148
+ elapsed = time.time() - start_time
149
+ history.append(
150
+ ChatMessage(
151
+ role="assistant", content=f"✨ **Analysis complete** ({elapsed:.1f}s)"
152
+ )
153
+ )
154
+ yield history, ""
155
+
156
+ def create_ui(self):
157
+ """Create Gradio interface"""
158
+ with gr.Blocks(
159
+ title="RegRadar - AI Regulatory Compliance Assistant",
160
+ theme=gr.themes.Soft(),
161
+ css="""
162
+ .tool-status {
163
+ background-color: #f0f4f8;
164
+ padding: 10px;
165
+ border-radius: 5px;
166
+ margin: 10px 0;
167
+ }
168
+ """,
169
+ ) as demo:
170
+ # Header
171
+ gr.HTML("""
172
+ <center>
173
+ <h1 style="text-align: center;">🛡️ RegRadar</h1>
174
+ <p><b>AI-powered regulatory compliance assistant that monitors global regulations</b></p>
175
+ </center>
176
+ """)
177
+
178
+ # Main chat interface
179
+ chatbot = gr.Chatbot(
180
+ height=500,
181
+ type="messages",
182
+ avatar_images=AVATAR_IMAGES,
183
+ show_copy_button=True,
184
+ bubble_full_width=False,
185
+ )
186
+
187
+ with gr.Row(equal_height=True):
188
+ msg = gr.Textbox(
189
+ placeholder="Ask about regulatory updates, compliance requirements, or any industry regulations...",
190
+ show_label=False,
191
+ scale=18,
192
+ autofocus=True,
193
+ )
194
+ submit = gr.Button("Send", variant="primary", scale=1, min_width=60)
195
+ clear = gr.Button("Clear", scale=1, min_width=60)
196
+
197
+ # Example queries
198
+ example_queries = [
199
+ "Show me the latest SEC regulations for fintech",
200
+ "What are the new data privacy rules in the EU?",
201
+ "Any updates on ESG compliance for energy companies?",
202
+ "Scan for healthcare regulations in the US",
203
+ "What are the global trends in AI regulation?",
204
+ ]
205
+
206
+ gr.Examples(examples=example_queries, inputs=msg, label="Example Queries")
207
+
208
+ # Tool information panel
209
+ with gr.Accordion("🛠️ Available Tools", open=False):
210
+ gr.Markdown("""
211
+ ### RegRadar uses these intelligent tools:
212
+
213
+ **🔍 Regulatory Web Crawler**
214
+ - Crawls official regulatory websites (SEC, FDA, FTC, etc.)
215
+ - Searches for recent updates and compliance changes
216
+ - Focuses on last 30 days of content
217
+
218
+ **🌐 Regulatory Search Engine**
219
+ - Searches across multiple sources for regulatory updates
220
+ - Finds industry-specific compliance information
221
+ - Aggregates results from various regulatory bodies
222
+
223
+ **💾 Memory System**
224
+ - Remembers past queries and responses
225
+ - Learns from your compliance interests
226
+ - Provides context from previous interactions
227
+
228
+ **🤖 AI Analysis Engine**
229
+ - Analyzes and summarizes regulatory findings
230
+ - Generates actionable compliance recommendations
231
+ - Creates executive summaries and action items
232
+ """)
233
+
234
+ # Event handlers
235
+ submit_event = msg.submit(self.streaming_chatbot, [msg, chatbot], [chatbot, msg])
236
+ click_event = submit.click(self.streaming_chatbot, [msg, chatbot], [chatbot, msg])
237
+ clear.click(lambda: ([], ""), outputs=[chatbot, msg])
238
+
239
+ # Footer
240
+ gr.HTML("""
241
+ <div style="text-align: center; padding: 20px; color: #666; font-size: 0.9rem;">
242
+ <p>RegRadar monitors regulatory updates from SEC, FDA, FTC, EU Commission, and more.</p>
243
+ <p>All analysis is AI-generated. Always verify with official sources.</p>
244
+ </div>
245
+ """)
246
+
247
+ return demo
248
+
app.py CHANGED
@@ -1,482 +1,18 @@
1
- import hashlib
2
- import json
3
- import os
4
- import time
5
- from typing import Dict, List, Tuple
6
-
7
- import gradio as gr
8
- from gradio import ChatMessage
9
- from mem0 import MemoryClient
10
- from openai import OpenAI
11
- from tavily import TavilyClient
12
-
13
- # Initialize services
14
- tavily_client = TavilyClient(api_key=os.getenv("TAVILY_API_KEY"))
15
- client = OpenAI(
16
- base_url="https://api.keywordsai.co/api/",
17
- api_key=os.getenv("KEYWORDS_API_KEY"),
18
- )
19
- mem0_client = MemoryClient(api_key=os.getenv("MEM0_API_KEY"))
20
-
21
- # Regulatory websites mapping
22
- REGULATORY_SOURCES = {
23
- "US": {
24
- "SEC": "https://www.sec.gov/news/pressreleases",
25
- "FDA": "https://www.fda.gov/news-events/fda-newsroom/press-announcements",
26
- "FTC": "https://www.ftc.gov/news-events/news/press-releases",
27
- "Federal Register": "https://www.federalregister.gov/documents/current",
28
- },
29
- "EU": {
30
- "European Commission": "https://ec.europa.eu/commission/presscorner/home/en",
31
- "ESMA": "https://www.esma.europa.eu/press-news/esma-news",
32
- "EBA": "https://www.eba.europa.eu/news-press/news",
33
- },
34
- "Global": {
35
- "BIS": "https://www.bis.org/press/index.htm",
36
- "IOSCO": "https://www.iosco.org/news/",
37
- },
38
- }
39
-
40
- # Avatar configuration
41
- AVATAR_IMAGES = (
42
- None,
43
- "https://media.roboflow.com/spaces/gemini-icon.png",
44
- )
45
-
46
-
47
- class RegRadarChat:
48
- def __init__(self):
49
- self.cached_searches = {}
50
-
51
- def generate_cache_key(self, industry: str, region: str, keywords: str) -> str:
52
- """Generate a unique cache key"""
53
- key = f"{industry}:{region}:{keywords}".lower()
54
- return hashlib.md5(key.encode()).hexdigest()
55
-
56
- def call_llm(self, prompt: str, temperature: float = 0.3) -> str:
57
- """Make a call to the LLM"""
58
- try:
59
- response = client.chat.completions.create(
60
- model="gpt-4.1-mini",
61
- messages=[{"role": "user", "content": prompt}],
62
- temperature=temperature,
63
- )
64
- return response.choices[0].message.content
65
- except Exception as e:
66
- print(f"LLM call error: {e}")
67
- return "I apologize, but I encountered an error processing your request."
68
-
69
- def stream_llm(self, prompt: str, temperature: float = 0.3):
70
- """Stream LLM response"""
71
- try:
72
- stream = client.chat.completions.create(
73
- model="gpt-4.1-mini",
74
- messages=[{"role": "user", "content": prompt}],
75
- temperature=temperature,
76
- stream=True,
77
- )
78
- for chunk in stream:
79
- delta = getattr(chunk.choices[0].delta, "content", None)
80
- if delta:
81
- yield delta
82
- except Exception as e:
83
- yield f"Error: {str(e)}"
84
-
85
- def crawl_regulatory_sites(self, industry: str, region: str, keywords: str) -> Dict:
86
- """Crawl regulatory websites for updates"""
87
- # Check cache first
88
- cache_key = self.generate_cache_key(industry, region, keywords)
89
- if cache_key in self.cached_searches:
90
- return self.cached_searches[cache_key]
91
-
92
- urls_to_crawl = REGULATORY_SOURCES.get(region, REGULATORY_SOURCES["US"])
93
- all_results = []
94
-
95
- crawl_instructions = f"""
96
- Find pages about:
97
- - Recent regulatory updates for {industry}
98
- - New compliance requirements
99
- - Keywords: {keywords}
100
- - Focus on recent content (last 30 days)
101
- """
102
-
103
- # Crawl regulatory sites
104
- for source_name, url in list(urls_to_crawl.items())[:3]:
105
- try:
106
- crawl_response = tavily_client.crawl(
107
- url=url, max_depth=2, limit=5, instructions=crawl_instructions
108
- )
109
- for result in crawl_response.get("results", []):
110
- all_results.append(
111
- {
112
- "source": source_name,
113
- "url": url,
114
- "title": result.get("title", ""),
115
- "content": result.get("raw_content", "")[:1500],
116
- }
117
- )
118
- except Exception as e:
119
- print(f"Crawl error for {source_name}: {e}")
120
-
121
- # General search
122
- try:
123
- search_results = tavily_client.search(
124
- query=f"{industry} {region} regulatory updates compliance {keywords} 2024 2025",
125
- max_results=5,
126
- include_raw_content=True,
127
- )
128
- for result in search_results.get("results", []):
129
- all_results.append(
130
- {
131
- "source": "Web Search",
132
- "url": result.get("url", ""),
133
- "title": result.get("title", ""),
134
- "content": result.get("content", ""),
135
- }
136
- )
137
- except Exception as e:
138
- print(f"Search error: {e}")
139
-
140
- results = {"results": all_results, "total_found": len(all_results)}
141
- self.cached_searches[cache_key] = results
142
- return results
143
-
144
- def save_to_memory(self, user_id: str, query: str, response: str):
145
- """Save interaction to memory"""
146
- try:
147
- messages = [
148
- {"role": "user", "content": query},
149
- {"role": "assistant", "content": response},
150
- ]
151
- mem0_client.add(
152
- messages=messages,
153
- user_id=user_id,
154
- metadata={"type": "regulatory_query"},
155
- )
156
- except Exception as e:
157
- print(f"Memory save error: {e}")
158
-
159
- def search_memory(self, user_id: str, query: str) -> List[Dict]:
160
- """Search for similar past queries"""
161
- try:
162
- memories = mem0_client.search(query=query, user_id=user_id, limit=3)
163
- return memories
164
- except:
165
- return []
166
-
167
-
168
- # Initialize chat instance
169
- chat_instance = RegRadarChat()
170
-
171
-
172
- def determine_intended_tool(message: str) -> Tuple[str, str]:
173
- """Determine which tool will be used based on the message"""
174
- message_lower = message.lower()
175
-
176
- if any(
177
- word in message_lower
178
- for word in ["crawl", "scan", "check", "latest", "update", "recent"]
179
- ):
180
- return "web_crawler", "Regulatory Web Crawler"
181
- elif any(
182
- word in message_lower for word in ["remember", "history", "past", "previous"]
183
- ):
184
- return "memory", "Memory Search"
185
- else:
186
- return "search", "Regulatory Search"
187
-
188
-
189
- def streaming_chatbot(message, history):
190
- """Process messages with tool visibility"""
191
- if not message.strip():
192
- return history, ""
193
-
194
- # Add user message
195
- history.append(ChatMessage(role="user", content=message))
196
-
197
- # Start timer
198
- start_time = time.time()
199
-
200
- # Detect if this is a regulatory query
201
- intent_prompt = f"""
202
- Is the following user message a regulatory, compliance, or update-related question (yes/no)?
203
- Message: {message}
204
- Respond with only 'yes' or 'no'.
205
- """
206
-
207
- intent = chat_instance.call_llm(intent_prompt).strip().lower()
208
-
209
- if intent.startswith("n"):
210
- # General chat
211
- history.append(
212
- ChatMessage(role="assistant", content="💬 Processing general query...")
213
- )
214
- yield history, ""
215
-
216
- # Clear processing message and stream response
217
- history.pop()
218
-
219
- chat_prompt = (
220
- f"You are a friendly AI assistant. Respond conversationally to: {message}"
221
- )
222
- streaming_content = ""
223
- history.append(ChatMessage(role="assistant", content=""))
224
-
225
- for chunk in chat_instance.stream_llm(chat_prompt):
226
- streaming_content += chunk
227
- history[-1] = ChatMessage(role="assistant", content=streaming_content)
228
- yield history, ""
229
-
230
- return
231
-
232
- # Show tool detection
233
- tool_key, tool_name = determine_intended_tool(message)
234
-
235
- # Initial processing message with tool info
236
- status_msg = (
237
- f"🔍 Using **{tool_name}** to analyze your query (estimated 10-20 seconds)..."
238
- )
239
- history.append(ChatMessage(role="assistant", content=status_msg))
240
- yield history, ""
241
-
242
- # Extract parameters
243
- extract_prompt = f"""
244
- Extract industry, region, and keywords from this query:
245
- "{message}"
246
-
247
- Return as JSON with keys: industry, region, keywords
248
- If not specified, use General/US/main topic
249
- """
250
-
251
- extraction = chat_instance.call_llm(extract_prompt)
252
- try:
253
- params = json.loads(extraction)
254
- except:
255
- params = {"industry": "General", "region": "US", "keywords": message}
256
-
257
- # Clear status and show parameter extraction
258
- history.pop()
259
-
260
- # Show tool execution steps
261
- tool_status = f"""
262
- 🛠️ **Tool Execution Status**
263
-
264
- 📍 **Parameters Extracted:**
265
- - Industry: {params["industry"]}
266
- - Region: {params["region"]}
267
- - Keywords: {params["keywords"]}
268
-
269
- 🔄 **Executing {tool_name}...**
270
  """
271
- history.append(ChatMessage(role="assistant", content=tool_status))
272
- yield history, ""
273
-
274
- # Execute tool (crawl sites)
275
- crawl_results = chat_instance.crawl_regulatory_sites(
276
- params["industry"], params["region"], params["keywords"]
277
- )
278
-
279
- # Update with results count
280
- history[-1] = ChatMessage(
281
- role="assistant",
282
- content=tool_status
283
- + f"\n\n✅ **Found {crawl_results['total_found']} regulatory updates**",
284
- )
285
- yield history, ""
286
 
287
- # Show collapsible raw results
288
- if crawl_results["results"]:
289
- # Format results for display
290
- results_display = []
291
- for i, result in enumerate(crawl_results["results"][:5], 1):
292
- results_display.append(f"""
293
- **{i}. {result["source"]}**
294
- - Title: {result["title"][:100]}...
295
- - URL: {result["url"]}
296
- """)
297
-
298
- collapsible_results = f"""
299
- <details>
300
- <summary><strong>📋 Raw Regulatory Data</strong> - Click to expand</summary>
301
-
302
- {"".join(results_display)}
303
-
304
- </details>
305
- """
306
- history.append(ChatMessage(role="assistant", content=collapsible_results))
307
- yield history, ""
308
-
309
- # Check memory for similar queries
310
- memory_results = chat_instance.search_memory("user", message)
311
- if memory_results:
312
- memory_msg = """
313
- <details>
314
- <summary><strong>💾 Related Past Queries</strong> - Click to expand</summary>
315
-
316
- Found {len(memory_results)} similar past queries in memory.
317
-
318
- </details>
319
  """
320
- history.append(ChatMessage(role="assistant", content=memory_msg))
321
- yield history, ""
322
-
323
- # Generate final analysis
324
- history.append(
325
- ChatMessage(role="assistant", content="📝 **Generating Compliance Report...**")
326
- )
327
- yield history, ""
328
 
329
- # Create analysis prompt
330
- if not crawl_results["results"]:
331
- summary_prompt = f"No regulatory updates found for {params['industry']} in {params['region']} with keywords: {params['keywords']}. Provide helpful suggestions on where to look or what to search for."
332
- else:
333
- by_source = {}
334
- for result in crawl_results["results"][:8]:
335
- source = result.get("source", "Unknown")
336
- if source not in by_source:
337
- by_source[source] = []
338
- by_source[source].append(result)
339
 
340
- summary_prompt = f"""
341
- Create a comprehensive regulatory compliance report for {params["industry"]} industry in {params["region"]} region.
342
-
343
- Analyze these regulatory updates:
344
- {json.dumps(by_source, indent=2)}
345
-
346
- Include:
347
- # 📋 Executive Summary
348
- (2-3 sentences overview)
349
-
350
- # 🔍 Key Findings
351
- • Finding 1
352
- • Finding 2
353
- • Finding 3
354
-
355
- # ⚠️ Compliance Requirements
356
- - List main requirements with priorities
357
-
358
- # ✅ Action Items
359
- - Specific actions with suggested timelines
360
-
361
- # 📚 Resources
362
- - Links and references
363
-
364
- Use emojis, bullet points, and clear formatting. Keep it professional but readable.
365
- """
366
-
367
- # Clear generating message and stream final report
368
- history.pop()
369
-
370
- streaming_content = ""
371
- history.append(ChatMessage(role="assistant", content=""))
372
-
373
- for chunk in chat_instance.stream_llm(summary_prompt):
374
- streaming_content += chunk
375
- history[-1] = ChatMessage(role="assistant", content=streaming_content)
376
- yield history, ""
377
-
378
- # Save to memory
379
- chat_instance.save_to_memory("user", message, streaming_content)
380
-
381
- # Show completion time
382
- elapsed = time.time() - start_time
383
- history.append(
384
- ChatMessage(
385
- role="assistant", content=f"✨ **Analysis complete** ({elapsed:.1f}s)"
386
- )
387
- )
388
- yield history, ""
389
-
390
-
391
- # Create Gradio interface
392
- with gr.Blocks(
393
- title="RegRadar - AI Regulatory Compliance Assistant",
394
- theme=gr.themes.Soft(),
395
- css="""
396
- .tool-status {
397
- background-color: #f0f4f8;
398
- padding: 10px;
399
- border-radius: 5px;
400
- margin: 10px 0;
401
- }
402
- """,
403
- ) as demo:
404
- # Header
405
- gr.HTML("""
406
- <center>
407
- <h1 style="text-align: center;">🛡️ RegRadar</h1>
408
- <p><b>AI-powered regulatory compliance assistant that monitors global regulations</b></p>
409
- </center>
410
- """)
411
-
412
- # Main chat interface
413
- chatbot = gr.Chatbot(
414
- height=500,
415
- type="messages",
416
- avatar_images=AVATAR_IMAGES,
417
- show_copy_button=True,
418
- bubble_full_width=False,
419
- )
420
-
421
- with gr.Row(equal_height=True):
422
- msg = gr.Textbox(
423
- placeholder="Ask about regulatory updates, compliance requirements, or any industry regulations...",
424
- show_label=False,
425
- scale=18,
426
- autofocus=True,
427
- )
428
- submit = gr.Button("Send", variant="primary", scale=1, min_width=60)
429
- clear = gr.Button("Clear", scale=1, min_width=60)
430
-
431
- # Example queries
432
- example_queries = [
433
- "Show me the latest SEC regulations for fintech",
434
- "What are the new data privacy rules in the EU?",
435
- "Any updates on ESG compliance for energy companies?",
436
- "Scan for healthcare regulations in the US",
437
- "What are the global trends in AI regulation?",
438
- ]
439
-
440
- gr.Examples(examples=example_queries, inputs=msg, label="Example Queries")
441
-
442
- # Tool information panel
443
- with gr.Accordion("🛠️ Available Tools", open=False):
444
- gr.Markdown("""
445
- ### RegRadar uses these intelligent tools:
446
-
447
- **🔍 Regulatory Web Crawler**
448
- - Crawls official regulatory websites (SEC, FDA, FTC, etc.)
449
- - Searches for recent updates and compliance changes
450
- - Focuses on last 30 days of content
451
-
452
- **🌐 Regulatory Search Engine**
453
- - Searches across multiple sources for regulatory updates
454
- - Finds industry-specific compliance information
455
- - Aggregates results from various regulatory bodies
456
-
457
- **💾 Memory System**
458
- - Remembers past queries and responses
459
- - Learns from your compliance interests
460
- - Provides context from previous interactions
461
-
462
- **🤖 AI Analysis Engine**
463
- - Analyzes and summarizes regulatory findings
464
- - Generates actionable compliance recommendations
465
- - Creates executive summaries and action items
466
- """)
467
-
468
- # Event handlers
469
- submit_event = msg.submit(streaming_chatbot, [msg, chatbot], [chatbot, msg])
470
- click_event = submit.click(streaming_chatbot, [msg, chatbot], [chatbot, msg])
471
- clear.click(lambda: ([], ""), outputs=[chatbot, msg])
472
-
473
- # Footer
474
- gr.HTML("""
475
- <div style="text-align: center; padding: 20px; color: #666; font-size: 0.9rem;">
476
- <p>RegRadar monitors regulatory updates from SEC, FDA, FTC, EU Commission, and more.</p>
477
- <p>All analysis is AI-generated. Always verify with official sources.</p>
478
- </div>
479
- """)
480
 
481
  if __name__ == "__main__":
482
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  """
2
+ RegRadar - AI Regulatory Compliance Assistant
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
 
4
+ This application monitors and analyzes regulatory updates, providing
5
+ compliance guidance for various industries and regions.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
  """
 
 
 
 
 
 
 
 
7
 
8
+ from agents.ui_handler import UIHandler
 
 
 
 
 
 
 
 
 
9
 
10
+ def main():
11
+ """Initialize and launch the RegRadar application"""
12
+ ui_handler = UIHandler()
13
+ demo = ui_handler.create_ui()
14
+ demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
 
16
  if __name__ == "__main__":
17
+ main()
18
+
config/__init__.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ # Config package initialization
2
+
config/settings.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ # API Client configurations
4
+ TAVILY_API_KEY = os.getenv("TAVILY_API_KEY")
5
+ KEYWORDS_API_KEY = os.getenv("KEYWORDS_API_KEY")
6
+ MEM0_API_KEY = os.getenv("MEM0_API_KEY")
7
+
8
+ # OpenAI base URL
9
+ OPENAI_BASE_URL = "https://api.keywordsai.co/api/"
10
+
11
+ # Regulatory websites mapping
12
+ REGULATORY_SOURCES = {
13
+ "US": {
14
+ "SEC": "https://www.sec.gov/news/pressreleases",
15
+ "FDA": "https://www.fda.gov/news-events/fda-newsroom/press-announcements",
16
+ "FTC": "https://www.ftc.gov/news-events/news/press-releases",
17
+ "Federal Register": "https://www.federalregister.gov/documents/current",
18
+ },
19
+ "EU": {
20
+ "European Commission": "https://ec.europa.eu/commission/presscorner/home/en",
21
+ "ESMA": "https://www.esma.europa.eu/press-news/esma-news",
22
+ "EBA": "https://www.eba.europa.eu/news-press/news",
23
+ },
24
+ "Global": {
25
+ "BIS": "https://www.bis.org/press/index.htm",
26
+ "IOSCO": "https://www.iosco.org/news/",
27
+ },
28
+ }
29
+
30
+ # UI settings
31
+ AVATAR_IMAGES = (
32
+ None,
33
+ "https://media.roboflow.com/spaces/gemini-icon.png",
34
+ )
35
+
36
+ # Default chat parameters
37
+ DEFAULT_LLM_TEMPERATURE = 0.3
38
+ DEFAULT_LLM_MODEL = "gpt-4.1-mini"
39
+
tools/__init__.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ # Tools package initialization
2
+
tools/llm.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from openai import OpenAI
3
+ from config.settings import KEYWORDS_API_KEY, OPENAI_BASE_URL, DEFAULT_LLM_MODEL, DEFAULT_LLM_TEMPERATURE
4
+
5
+ # Initialize OpenAI client
6
+ client = OpenAI(
7
+ base_url=OPENAI_BASE_URL,
8
+ api_key=KEYWORDS_API_KEY,
9
+ )
10
+
11
+ def call_llm(prompt: str, temperature: float = DEFAULT_LLM_TEMPERATURE) -> str:
12
+ """Make a call to the LLM"""
13
+ try:
14
+ response = client.chat.completions.create(
15
+ model=DEFAULT_LLM_MODEL,
16
+ messages=[{"role": "user", "content": prompt}],
17
+ temperature=temperature,
18
+ )
19
+ return response.choices[0].message.content
20
+ except Exception as e:
21
+ print(f"LLM call error: {e}")
22
+ return "I apologize, but I encountered an error processing your request."
23
+
24
+ def stream_llm(prompt: str, temperature: float = DEFAULT_LLM_TEMPERATURE):
25
+ """Stream LLM response"""
26
+ try:
27
+ stream = client.chat.completions.create(
28
+ model=DEFAULT_LLM_MODEL,
29
+ messages=[{"role": "user", "content": prompt}],
30
+ temperature=temperature,
31
+ stream=True,
32
+ )
33
+ for chunk in stream:
34
+ delta = getattr(chunk.choices[0].delta, "content", None)
35
+ if delta:
36
+ yield delta
37
+ except Exception as e:
38
+ yield f"Error: {str(e)}"
39
+
tools/memory_tools.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Dict
2
+ from mem0 import MemoryClient
3
+ from config.settings import MEM0_API_KEY
4
+
5
+ # Initialize memory client
6
+ mem0_client = MemoryClient(api_key=MEM0_API_KEY)
7
+
8
+ class MemoryTools:
9
+ def save_to_memory(self, user_id: str, query: str, response: str):
10
+ """Save interaction to memory"""
11
+ try:
12
+ messages = [
13
+ {"role": "user", "content": query},
14
+ {"role": "assistant", "content": response},
15
+ ]
16
+ mem0_client.add(
17
+ messages=messages,
18
+ user_id=user_id,
19
+ metadata={"type": "regulatory_query"},
20
+ )
21
+ except Exception as e:
22
+ print(f"Memory save error: {e}")
23
+
24
+ def search_memory(self, user_id: str, query: str) -> List[Dict]:
25
+ """Search for similar past queries"""
26
+ try:
27
+ memories = mem0_client.search(query=query, user_id=user_id, limit=3)
28
+ return memories
29
+ except:
30
+ return []
31
+
tools/web_tools.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import hashlib
2
+ from typing import Dict
3
+ from tavily import TavilyClient
4
+ from config.settings import TAVILY_API_KEY, REGULATORY_SOURCES
5
+
6
+ # Initialize Tavily client
7
+ tavily_client = TavilyClient(api_key=TAVILY_API_KEY)
8
+
9
+ class WebTools:
10
+ def __init__(self):
11
+ self.cached_searches = {}
12
+
13
+ def generate_cache_key(self, industry: str, region: str, keywords: str) -> str:
14
+ """Generate a unique cache key"""
15
+ key = f"{industry}:{region}:{keywords}".lower()
16
+ return hashlib.md5(key.encode()).hexdigest()
17
+
18
+ def crawl_regulatory_sites(self, industry: str, region: str, keywords: str) -> Dict:
19
+ """Crawl regulatory websites for updates"""
20
+ # Check cache first
21
+ cache_key = self.generate_cache_key(industry, region, keywords)
22
+ if cache_key in self.cached_searches:
23
+ return self.cached_searches[cache_key]
24
+
25
+ urls_to_crawl = REGULATORY_SOURCES.get(region, REGULATORY_SOURCES["US"])
26
+ all_results = []
27
+
28
+ crawl_instructions = f"""
29
+ Find pages about:
30
+ - Recent regulatory updates for {industry}
31
+ - New compliance requirements
32
+ - Keywords: {keywords}
33
+ - Focus on recent content (last 30 days)
34
+ """
35
+
36
+ # Crawl regulatory sites
37
+ for source_name, url in list(urls_to_crawl.items())[:3]:
38
+ try:
39
+ crawl_response = tavily_client.crawl(
40
+ url=url, max_depth=2, limit=5, instructions=crawl_instructions
41
+ )
42
+ for result in crawl_response.get("results", []):
43
+ all_results.append(
44
+ {
45
+ "source": source_name,
46
+ "url": url,
47
+ "title": result.get("title", ""),
48
+ "content": result.get("raw_content", "")[:1500],
49
+ }
50
+ )
51
+ except Exception as e:
52
+ print(f"Crawl error for {source_name}: {e}")
53
+
54
+ # General search
55
+ try:
56
+ search_results = tavily_client.search(
57
+ query=f"{industry} {region} regulatory updates compliance {keywords} 2024 2025",
58
+ max_results=5,
59
+ include_raw_content=True,
60
+ )
61
+ for result in search_results.get("results", []):
62
+ all_results.append(
63
+ {
64
+ "source": "Web Search",
65
+ "url": result.get("url", ""),
66
+ "title": result.get("title", ""),
67
+ "content": result.get("content", ""),
68
+ }
69
+ )
70
+ except Exception as e:
71
+ print(f"Search error: {e}")
72
+
73
+ results = {"results": all_results, "total_found": len(all_results)}
74
+ self.cached_searches[cache_key] = results
75
+ return results
76
+