Commit
·
7efa4f1
1
Parent(s):
ee8a880
workflow errors debugging v6
Browse files- app.py +52 -8
- orchestrator_engine.py +48 -9
app.py
CHANGED
|
@@ -212,18 +212,21 @@ def create_mobile_optimized_interface():
|
|
| 212 |
label="",
|
| 213 |
show_label=False
|
| 214 |
)
|
|
|
|
| 215 |
|
| 216 |
with gr.Accordion("Agent Performance", open=False):
|
| 217 |
performance_display = gr.JSON(
|
| 218 |
label="",
|
| 219 |
show_label=False
|
| 220 |
)
|
|
|
|
| 221 |
|
| 222 |
with gr.Accordion("Session Context", open=False):
|
| 223 |
context_display = gr.JSON(
|
| 224 |
label="",
|
| 225 |
show_label=False
|
| 226 |
)
|
|
|
|
| 227 |
|
| 228 |
# Mobile Bottom Navigation
|
| 229 |
with gr.Row(visible=False, elem_id="mobile_nav") as mobile_navigation:
|
|
@@ -270,10 +273,19 @@ def create_mobile_optimized_interface():
|
|
| 270 |
# Wire up the submit handler INSIDE the gr.Blocks context
|
| 271 |
if 'send_btn' in interface_components and 'message_input' in interface_components and 'chatbot' in interface_components:
|
| 272 |
# Connect the submit handler with the GPU-decorated function
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 273 |
interface_components['send_btn'].click(
|
| 274 |
fn=chat_handler_fn,
|
| 275 |
inputs=[interface_components['message_input'], interface_components['chatbot']],
|
| 276 |
-
outputs=
|
| 277 |
)
|
| 278 |
|
| 279 |
return demo, interface_components
|
|
@@ -305,15 +317,16 @@ def setup_event_handlers(demo, event_handlers):
|
|
| 305 |
|
| 306 |
return demo
|
| 307 |
|
| 308 |
-
async def process_message_async(message: str, history: Optional[List], session_id: str) -> Tuple[List, str]:
|
| 309 |
"""
|
| 310 |
Process message with full orchestration system
|
| 311 |
-
Returns (updated_history, empty_string)
|
| 312 |
|
| 313 |
GUARANTEES:
|
| 314 |
- Always returns a response (never None or empty)
|
| 315 |
- Handles all error cases gracefully
|
| 316 |
- Provides fallback responses at every level
|
|
|
|
| 317 |
"""
|
| 318 |
global orchestrator
|
| 319 |
|
|
@@ -324,7 +337,7 @@ async def process_message_async(message: str, history: Optional[List], session_i
|
|
| 324 |
|
| 325 |
if not message or not message.strip():
|
| 326 |
logger.debug("Empty message received")
|
| 327 |
-
return history if history else [], ""
|
| 328 |
|
| 329 |
if history is None:
|
| 330 |
history = []
|
|
@@ -334,6 +347,11 @@ async def process_message_async(message: str, history: Optional[List], session_i
|
|
| 334 |
# Add user message
|
| 335 |
new_history.append({"role": "user", "content": message.strip()})
|
| 336 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 337 |
# GUARANTEE: Always get a response
|
| 338 |
response = "Hello! I'm processing your request..."
|
| 339 |
|
|
@@ -349,6 +367,7 @@ async def process_message_async(message: str, history: Optional[List], session_i
|
|
| 349 |
|
| 350 |
# Extract response from result with multiple fallback checks
|
| 351 |
if isinstance(result, dict):
|
|
|
|
| 352 |
response = (
|
| 353 |
result.get('response') or
|
| 354 |
result.get('final_response') or
|
|
@@ -356,6 +375,28 @@ async def process_message_async(message: str, history: Optional[List], session_i
|
|
| 356 |
result.get('original_response') or
|
| 357 |
str(result.get('result', ''))
|
| 358 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 359 |
else:
|
| 360 |
response = str(result) if result else "Processing complete."
|
| 361 |
|
|
@@ -372,16 +413,18 @@ async def process_message_async(message: str, history: Optional[List], session_i
|
|
| 372 |
logger.error(f"Orchestrator error: {orch_error}", exc_info=True)
|
| 373 |
# Fallback response with error info
|
| 374 |
response = f"I'm experiencing some technical difficulties. Your message was: '{message[:100]}...' Please try again or rephrase your question."
|
|
|
|
| 375 |
else:
|
| 376 |
# Fallback placeholder - always informative
|
| 377 |
logger.info("Using placeholder response (orchestrator unavailable)")
|
| 378 |
response = f"Hello! I received your message: '{message}'.\n\nThe full AI orchestration system is {'available but not loaded' if orchestrator_available else 'not available'}."
|
|
|
|
| 379 |
|
| 380 |
# Add assistant response
|
| 381 |
new_history.append({"role": "assistant", "content": response})
|
| 382 |
logger.info("✓ Message processing complete")
|
| 383 |
|
| 384 |
-
return new_history, ""
|
| 385 |
|
| 386 |
except Exception as e:
|
| 387 |
# FINAL FALLBACK: Always return something to user
|
|
@@ -398,11 +441,12 @@ async def process_message_async(message: str, history: Optional[List], session_i
|
|
| 398 |
)
|
| 399 |
error_history.append({"role": "assistant", "content": error_message})
|
| 400 |
|
| 401 |
-
return error_history, ""
|
| 402 |
|
| 403 |
-
def process_message(message: str, history: Optional[List]) -> Tuple[List, str]:
|
| 404 |
"""
|
| 405 |
Synchronous wrapper for async processing
|
|
|
|
| 406 |
"""
|
| 407 |
import asyncio
|
| 408 |
|
|
@@ -420,7 +464,7 @@ def process_message(message: str, history: Optional[List]) -> Tuple[List, str]:
|
|
| 420 |
error_history = list(history) if history else []
|
| 421 |
error_history.append({"role": "user", "content": message})
|
| 422 |
error_history.append({"role": "assistant", "content": f"Error: {str(e)}"})
|
| 423 |
-
return error_history, ""
|
| 424 |
|
| 425 |
# Decorate the chat handler with GPU if available
|
| 426 |
if SPACES_GPU_AVAILABLE and GPU is not None:
|
|
|
|
| 212 |
label="",
|
| 213 |
show_label=False
|
| 214 |
)
|
| 215 |
+
interface_components['reasoning_display'] = reasoning_display
|
| 216 |
|
| 217 |
with gr.Accordion("Agent Performance", open=False):
|
| 218 |
performance_display = gr.JSON(
|
| 219 |
label="",
|
| 220 |
show_label=False
|
| 221 |
)
|
| 222 |
+
interface_components['performance_display'] = performance_display
|
| 223 |
|
| 224 |
with gr.Accordion("Session Context", open=False):
|
| 225 |
context_display = gr.JSON(
|
| 226 |
label="",
|
| 227 |
show_label=False
|
| 228 |
)
|
| 229 |
+
interface_components['context_display'] = context_display
|
| 230 |
|
| 231 |
# Mobile Bottom Navigation
|
| 232 |
with gr.Row(visible=False, elem_id="mobile_nav") as mobile_navigation:
|
|
|
|
| 273 |
# Wire up the submit handler INSIDE the gr.Blocks context
|
| 274 |
if 'send_btn' in interface_components and 'message_input' in interface_components and 'chatbot' in interface_components:
|
| 275 |
# Connect the submit handler with the GPU-decorated function
|
| 276 |
+
# Include Details tab components as outputs
|
| 277 |
+
outputs = [interface_components['chatbot'], interface_components['message_input']]
|
| 278 |
+
if 'reasoning_display' in interface_components:
|
| 279 |
+
outputs.append(interface_components['reasoning_display'])
|
| 280 |
+
if 'performance_display' in interface_components:
|
| 281 |
+
outputs.append(interface_components['performance_display'])
|
| 282 |
+
if 'context_display' in interface_components:
|
| 283 |
+
outputs.append(interface_components['context_display'])
|
| 284 |
+
|
| 285 |
interface_components['send_btn'].click(
|
| 286 |
fn=chat_handler_fn,
|
| 287 |
inputs=[interface_components['message_input'], interface_components['chatbot']],
|
| 288 |
+
outputs=outputs
|
| 289 |
)
|
| 290 |
|
| 291 |
return demo, interface_components
|
|
|
|
| 317 |
|
| 318 |
return demo
|
| 319 |
|
| 320 |
+
async def process_message_async(message: str, history: Optional[List], session_id: str) -> Tuple[List, str, dict, dict, dict]:
|
| 321 |
"""
|
| 322 |
Process message with full orchestration system
|
| 323 |
+
Returns (updated_history, empty_string, reasoning_data, performance_data, context_data)
|
| 324 |
|
| 325 |
GUARANTEES:
|
| 326 |
- Always returns a response (never None or empty)
|
| 327 |
- Handles all error cases gracefully
|
| 328 |
- Provides fallback responses at every level
|
| 329 |
+
- Returns metadata for Details tab
|
| 330 |
"""
|
| 331 |
global orchestrator
|
| 332 |
|
|
|
|
| 337 |
|
| 338 |
if not message or not message.strip():
|
| 339 |
logger.debug("Empty message received")
|
| 340 |
+
return history if history else [], "", {}, {}, {}
|
| 341 |
|
| 342 |
if history is None:
|
| 343 |
history = []
|
|
|
|
| 347 |
# Add user message
|
| 348 |
new_history.append({"role": "user", "content": message.strip()})
|
| 349 |
|
| 350 |
+
# Initialize Details tab data
|
| 351 |
+
reasoning_data = {}
|
| 352 |
+
performance_data = {}
|
| 353 |
+
context_data = {}
|
| 354 |
+
|
| 355 |
# GUARANTEE: Always get a response
|
| 356 |
response = "Hello! I'm processing your request..."
|
| 357 |
|
|
|
|
| 367 |
|
| 368 |
# Extract response from result with multiple fallback checks
|
| 369 |
if isinstance(result, dict):
|
| 370 |
+
# Extract the text response (not the dict)
|
| 371 |
response = (
|
| 372 |
result.get('response') or
|
| 373 |
result.get('final_response') or
|
|
|
|
| 375 |
result.get('original_response') or
|
| 376 |
str(result.get('result', ''))
|
| 377 |
)
|
| 378 |
+
|
| 379 |
+
# Extract metadata for Details tab
|
| 380 |
+
reasoning_data = {
|
| 381 |
+
"intent": result.get('metadata', {}).get('intent', 'unknown'),
|
| 382 |
+
"execution_plan": result.get('metadata', {}).get('execution_plan', {}),
|
| 383 |
+
"processing_steps": result.get('metadata', {}).get('processing_steps', [])
|
| 384 |
+
}
|
| 385 |
+
|
| 386 |
+
performance_data = {
|
| 387 |
+
"agent_trace": result.get('agent_trace', []),
|
| 388 |
+
"processing_time": result.get('metadata', {}).get('processing_time', 0),
|
| 389 |
+
"token_count": result.get('metadata', {}).get('token_count', 0),
|
| 390 |
+
"confidence_score": result.get('confidence_score', 0.7),
|
| 391 |
+
"agents_used": result.get('metadata', {}).get('agents_used', [])
|
| 392 |
+
}
|
| 393 |
+
|
| 394 |
+
context_data = {
|
| 395 |
+
"interaction_id": result.get('interaction_id', 'unknown'),
|
| 396 |
+
"session_id": session_id,
|
| 397 |
+
"timestamp": result.get('timestamp', ''),
|
| 398 |
+
"warnings": result.get('metadata', {}).get('warnings', [])
|
| 399 |
+
}
|
| 400 |
else:
|
| 401 |
response = str(result) if result else "Processing complete."
|
| 402 |
|
|
|
|
| 413 |
logger.error(f"Orchestrator error: {orch_error}", exc_info=True)
|
| 414 |
# Fallback response with error info
|
| 415 |
response = f"I'm experiencing some technical difficulties. Your message was: '{message[:100]}...' Please try again or rephrase your question."
|
| 416 |
+
reasoning_data = {"error": str(orch_error)}
|
| 417 |
else:
|
| 418 |
# Fallback placeholder - always informative
|
| 419 |
logger.info("Using placeholder response (orchestrator unavailable)")
|
| 420 |
response = f"Hello! I received your message: '{message}'.\n\nThe full AI orchestration system is {'available but not loaded' if orchestrator_available else 'not available'}."
|
| 421 |
+
reasoning_data = {"status": "orchestrator_unavailable"}
|
| 422 |
|
| 423 |
# Add assistant response
|
| 424 |
new_history.append({"role": "assistant", "content": response})
|
| 425 |
logger.info("✓ Message processing complete")
|
| 426 |
|
| 427 |
+
return new_history, "", reasoning_data, performance_data, context_data
|
| 428 |
|
| 429 |
except Exception as e:
|
| 430 |
# FINAL FALLBACK: Always return something to user
|
|
|
|
| 441 |
)
|
| 442 |
error_history.append({"role": "assistant", "content": error_message})
|
| 443 |
|
| 444 |
+
return error_history, "", {"error": str(e)}, {}, {}
|
| 445 |
|
| 446 |
+
def process_message(message: str, history: Optional[List]) -> Tuple[List, str, dict, dict, dict]:
|
| 447 |
"""
|
| 448 |
Synchronous wrapper for async processing
|
| 449 |
+
Returns (history, empty_string, reasoning_data, performance_data, context_data)
|
| 450 |
"""
|
| 451 |
import asyncio
|
| 452 |
|
|
|
|
| 464 |
error_history = list(history) if history else []
|
| 465 |
error_history.append({"role": "user", "content": message})
|
| 466 |
error_history.append({"role": "assistant", "content": f"Error: {str(e)}"})
|
| 467 |
+
return error_history, "", {"error": str(e)}, {}, {}
|
| 468 |
|
| 469 |
# Decorate the chat handler with GPU if available
|
| 470 |
if SPACES_GPU_AVAILABLE and GPU is not None:
|
orchestrator_engine.py
CHANGED
|
@@ -1,6 +1,7 @@
|
|
| 1 |
# orchestrator_engine.py
|
| 2 |
import uuid
|
| 3 |
import logging
|
|
|
|
| 4 |
from datetime import datetime
|
| 5 |
|
| 6 |
logger = logging.getLogger(__name__)
|
|
@@ -20,6 +21,8 @@ class MVPOrchestrator:
|
|
| 20 |
logger.info(f"Processing request for session {session_id}")
|
| 21 |
logger.info(f"User input: {user_input[:100]}")
|
| 22 |
|
|
|
|
|
|
|
| 23 |
try:
|
| 24 |
# Step 1: Generate unique interaction ID
|
| 25 |
interaction_id = self._generate_interaction_id(session_id)
|
|
@@ -62,16 +65,40 @@ class MVPOrchestrator:
|
|
| 62 |
context=context
|
| 63 |
)
|
| 64 |
|
| 65 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 66 |
logger.info(f"Request processing complete. Response length: {len(str(result.get('response', '')))}")
|
| 67 |
return result
|
| 68 |
|
| 69 |
except Exception as e:
|
| 70 |
logger.error(f"Error in process_request: {e}", exc_info=True)
|
|
|
|
| 71 |
return {
|
| 72 |
"response": f"Error processing request: {str(e)}",
|
| 73 |
"error": str(e),
|
| 74 |
-
"interaction_id": str(uuid.uuid4())[:8]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 75 |
}
|
| 76 |
|
| 77 |
def _generate_interaction_id(self, session_id: str) -> str:
|
|
@@ -100,7 +127,7 @@ class MVPOrchestrator:
|
|
| 100 |
# TODO: Implement parallel/sequential agent execution
|
| 101 |
return {}
|
| 102 |
|
| 103 |
-
def _format_final_output(self, response: dict, interaction_id: str) -> dict:
|
| 104 |
"""
|
| 105 |
Format final output with tracing and metadata
|
| 106 |
"""
|
|
@@ -116,6 +143,23 @@ class MVPOrchestrator:
|
|
| 116 |
if not response_text:
|
| 117 |
response_text = "I apologize, but I'm having trouble generating a response right now. Please try again."
|
| 118 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 119 |
return {
|
| 120 |
"interaction_id": interaction_id,
|
| 121 |
"response": response_text,
|
|
@@ -123,12 +167,7 @@ class MVPOrchestrator:
|
|
| 123 |
"confidence_score": response.get("confidence_score", 0.7),
|
| 124 |
"agent_trace": self.execution_trace,
|
| 125 |
"timestamp": datetime.now().isoformat(),
|
| 126 |
-
"metadata":
|
| 127 |
-
"agents_used": response.get("agents_used", []),
|
| 128 |
-
"processing_time": response.get("processing_time", 0),
|
| 129 |
-
"token_count": response.get("token_count", 0),
|
| 130 |
-
"warnings": response.get("warnings", [])
|
| 131 |
-
}
|
| 132 |
}
|
| 133 |
|
| 134 |
def get_execution_trace(self) -> list:
|
|
|
|
| 1 |
# orchestrator_engine.py
|
| 2 |
import uuid
|
| 3 |
import logging
|
| 4 |
+
import time
|
| 5 |
from datetime import datetime
|
| 6 |
|
| 7 |
logger = logging.getLogger(__name__)
|
|
|
|
| 21 |
logger.info(f"Processing request for session {session_id}")
|
| 22 |
logger.info(f"User input: {user_input[:100]}")
|
| 23 |
|
| 24 |
+
start_time = time.time()
|
| 25 |
+
|
| 26 |
try:
|
| 27 |
# Step 1: Generate unique interaction ID
|
| 28 |
interaction_id = self._generate_interaction_id(session_id)
|
|
|
|
| 65 |
context=context
|
| 66 |
)
|
| 67 |
|
| 68 |
+
processing_time = time.time() - start_time
|
| 69 |
+
|
| 70 |
+
result = self._format_final_output(safety_checked, interaction_id, {
|
| 71 |
+
'intent': intent_result.get('primary_intent', 'unknown'),
|
| 72 |
+
'execution_plan': execution_plan,
|
| 73 |
+
'processing_steps': [
|
| 74 |
+
'Context management',
|
| 75 |
+
'Intent recognition',
|
| 76 |
+
'Execution planning',
|
| 77 |
+
'Agent execution',
|
| 78 |
+
'Response synthesis',
|
| 79 |
+
'Safety check'
|
| 80 |
+
],
|
| 81 |
+
'processing_time': processing_time,
|
| 82 |
+
'agents_used': list(self.agents.keys())
|
| 83 |
+
})
|
| 84 |
logger.info(f"Request processing complete. Response length: {len(str(result.get('response', '')))}")
|
| 85 |
return result
|
| 86 |
|
| 87 |
except Exception as e:
|
| 88 |
logger.error(f"Error in process_request: {e}", exc_info=True)
|
| 89 |
+
processing_time = time.time() - start_time
|
| 90 |
return {
|
| 91 |
"response": f"Error processing request: {str(e)}",
|
| 92 |
"error": str(e),
|
| 93 |
+
"interaction_id": str(uuid.uuid4())[:8],
|
| 94 |
+
"agent_trace": [],
|
| 95 |
+
"timestamp": datetime.now().isoformat(),
|
| 96 |
+
"metadata": {
|
| 97 |
+
"agents_used": [],
|
| 98 |
+
"processing_time": processing_time,
|
| 99 |
+
"token_count": 0,
|
| 100 |
+
"warnings": []
|
| 101 |
+
}
|
| 102 |
}
|
| 103 |
|
| 104 |
def _generate_interaction_id(self, session_id: str) -> str:
|
|
|
|
| 127 |
# TODO: Implement parallel/sequential agent execution
|
| 128 |
return {}
|
| 129 |
|
| 130 |
+
def _format_final_output(self, response: dict, interaction_id: str, additional_metadata: dict = None) -> dict:
|
| 131 |
"""
|
| 132 |
Format final output with tracing and metadata
|
| 133 |
"""
|
|
|
|
| 143 |
if not response_text:
|
| 144 |
response_text = "I apologize, but I'm having trouble generating a response right now. Please try again."
|
| 145 |
|
| 146 |
+
# Extract warnings from safety check result
|
| 147 |
+
warnings = []
|
| 148 |
+
if "warnings" in response:
|
| 149 |
+
warnings = response["warnings"] if isinstance(response["warnings"], list) else []
|
| 150 |
+
|
| 151 |
+
# Build metadata dict
|
| 152 |
+
metadata = {
|
| 153 |
+
"agents_used": response.get("agents_used", []),
|
| 154 |
+
"processing_time": response.get("processing_time", 0),
|
| 155 |
+
"token_count": response.get("token_count", 0),
|
| 156 |
+
"warnings": warnings
|
| 157 |
+
}
|
| 158 |
+
|
| 159 |
+
# Merge in any additional metadata
|
| 160 |
+
if additional_metadata:
|
| 161 |
+
metadata.update(additional_metadata)
|
| 162 |
+
|
| 163 |
return {
|
| 164 |
"interaction_id": interaction_id,
|
| 165 |
"response": response_text,
|
|
|
|
| 167 |
"confidence_score": response.get("confidence_score", 0.7),
|
| 168 |
"agent_trace": self.execution_trace,
|
| 169 |
"timestamp": datetime.now().isoformat(),
|
| 170 |
+
"metadata": metadata
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 171 |
}
|
| 172 |
|
| 173 |
def get_execution_trace(self) -> list:
|