Spaces:
Sleeping
Sleeping
File size: 24,871 Bytes
62585a9 76c95c2 62585a9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 |
import os
import re
import shutil
import tempfile
import yaml
from pathlib import Path
from typing import Generator
from smolagents import CodeAgent, LiteLLMModel, MultiStepAgent, PlanningStep, Tool
from smolagents.agent_types import AgentAudio, AgentImage, AgentText
from smolagents.memory import ActionStep, FinalAnswerStep, MemoryStep
from smolagents.models import ChatMessageStreamDelta
from smolagents.utils import _is_package_available
from tools import search, scraper, FileReader, FileWriter, CommitChanges, get_repository_structure # Assuming tools.py is in the same directory
def get_step_footnote_content(step_log: MemoryStep, step_name: str) -> str:
"""Get a footnote string for a step log with duration and token information"""
step_footnote = f"**{step_name}**"
if hasattr(step_log, "input_token_count") and hasattr(step_log, "output_token_count"):
token_str = f" | Input tokens: {step_log.input_token_count:,} | Output tokens: {step_log.output_token_count:,}"
step_footnote += token_str
if hasattr(step_log, "duration"):
step_duration = f" | Duration: {round(float(step_log.duration), 2)}" if step_log.duration else None
step_footnote += step_duration
step_footnote_content = f"""<span style="color: #bbbbc2; font-size: 12px;">{step_footnote}</span> """
return step_footnote_content
def _clean_model_output(model_output: str) -> str:
"""
Clean up model output by removing trailing tags and extra backticks.
Args:
model_output (`str`): Raw model output.
Returns:
`str`: Cleaned model output.
"""
if not model_output:
return ""
model_output = model_output.strip()
# Remove any trailing <end_code> and extra backticks, handling multiple possible formats
model_output = re.sub(r"```\s*<end_code>", "```", model_output) # handles ```<end_code>
model_output = re.sub(r"<end_code>\s*```", "```", model_output) # handles <end_code>```
model_output = re.sub(r"```\s*\n\s*<end_code>", "```", model_output) # handles ```\n<end_code>
return model_output.strip()
def _format_code_content(content: str) -> str:
"""
Format code content as Python code block if it's not already formatted.
Args:
content (`str`): Code content to format.
Returns:
`str`: Code content formatted as a Python code block.
"""
content = content.strip()
# Remove existing code blocks and end_code tags
content = re.sub(r"```.*?\n", "", content)
content = re.sub(r"\s*<end_code>\s*", "", content)
content = content.strip()
# Add Python code block formatting if not already present
if not content.startswith("```python"):
content = f"```python\n{content}\n```"
return content
def _process_action_step(step_log: ActionStep, skip_model_outputs: bool = False) -> Generator:
"""
Process an [`ActionStep`] and yield appropriate Gradio ChatMessage objects.
Args:
step_log ([`ActionStep`]): ActionStep to process.
skip_model_outputs (`bool`): Whether to skip model outputs.
Yields:
`gradio.ChatMessage`: Gradio ChatMessages representing the action step.
"""
import gradio as gr
# Output the step number
step_number = f"Step {step_log.step_number}" if step_log.step_number is not None else "Step"
if not skip_model_outputs:
yield gr.ChatMessage(role="assistant", content=f"**{step_number}**", metadata={"status": "done"})
# First yield the thought/reasoning from the LLM
if not skip_model_outputs and getattr(step_log, "model_output", ""):
model_output = _clean_model_output(step_log.model_output)
yield gr.ChatMessage(role="assistant", content=model_output, metadata={"status": "done"})
# For tool calls, create a parent message
if getattr(step_log, "tool_calls", []):
first_tool_call = step_log.tool_calls[0]
used_code = first_tool_call.name == "python_interpreter"
# Process arguments based on type
args = first_tool_call.arguments
if isinstance(args, dict):
content = str(args.get("answer", str(args)))
else:
content = str(args).strip()
# Format code content if needed
if used_code:
content = _format_code_content(content)
# Create the tool call message
parent_message_tool = gr.ChatMessage(
role="assistant",
content=content,
metadata={
"title": f"๐ ๏ธ Used tool {first_tool_call.name}",
"status": "done",
},
)
yield parent_message_tool
# Display execution logs if they exist
if getattr(step_log, "observations", "") and step_log.observations.strip():
log_content = step_log.observations.strip()
if log_content:
log_content = re.sub(r"^Execution logs:\s*", "", log_content)
yield gr.ChatMessage(
role="assistant",
content=f"```bash\n{log_content}\n",
metadata={"title": "๐ Execution Logs", "status": "done"},
)
# Display any images in observations
if getattr(step_log, "observations_images", []):
for image in step_log.observations_images:
path_image = AgentImage(image).to_string()
yield gr.ChatMessage(
role="assistant",
content={"path": path_image, "mime_type": f"image/{path_image.split('.')[-1]}"},
metadata={"title": "๐ผ๏ธ Output Image", "status": "done"},
)
# Handle errors
if getattr(step_log, "error", None):
yield gr.ChatMessage(
role="assistant", content=str(step_log.error), metadata={"title": "๐ฅ Error", "status": "done"}
)
# Add step footnote and separator
yield gr.ChatMessage(
role="assistant", content=get_step_footnote_content(step_log, step_number), metadata={"status": "done"}
)
yield gr.ChatMessage(role="assistant", content="-----", metadata={"status": "done"})
def _process_planning_step(step_log: PlanningStep, skip_model_outputs: bool = False) -> Generator:
"""
Process a [`PlanningStep`] and yield appropriate gradio.ChatMessage objects.
Args:
step_log ([`PlanningStep`]): PlanningStep to process.
Yields:
`gradio.ChatMessage`: Gradio ChatMessages representing the planning step.
"""
import gradio as gr
if not skip_model_outputs:
yield gr.ChatMessage(role="assistant", content="**Planning step**", metadata={"status": "done"})
yield gr.ChatMessage(role="assistant", content=step_log.plan, metadata={"status": "done"})
yield gr.ChatMessage(
role="assistant", content=get_step_footnote_content(step_log, "Planning step"), metadata={"status": "done"}
)
yield gr.ChatMessage(role="assistant", content="-----", metadata={"status": "done"})
def _process_final_answer_step(step_log: FinalAnswerStep) -> Generator:
"""
Process a [`FinalAnswerStep`] and yield appropriate gradio.ChatMessage objects.
Args:
step_log ([`FinalAnswerStep`]): FinalAnswerStep to process.
Yields:
`gradio.ChatMessage`: Gradio ChatMessages representing the final answer.
"""
import gradio as gr
final_answer = step_log.final_answer
if isinstance(final_answer, AgentText):
yield gr.ChatMessage(
role="assistant",
content=f"**Final answer:**\n{final_answer.to_string()}\n",
metadata={"status": "done"},
)
elif isinstance(final_answer, AgentImage):
yield gr.ChatMessage(
role="assistant",
content={"path": final_answer.to_string(), "mime_type": "image/png"},
metadata={"status": "done"},
)
elif isinstance(final_answer, AgentAudio):
yield gr.ChatMessage(
role="assistant",
content={"path": final_answer.to_string(), "mime_type": "audio/wav"},
metadata={"status": "done"},
)
else:
yield gr.ChatMessage(
role="assistant", content=f"**Final answer:** {str(final_answer)}", metadata={"status": "done"}
)
def pull_messages_from_step(step_log: MemoryStep, skip_model_outputs: bool = False):
"""Extract ChatMessage objects from agent steps with proper nesting.
Args:
step_log: The step log to display as gr.ChatMessage objects.
skip_model_outputs: If True, skip the model outputs when creating the gr.ChatMessage objects:
This is used for instance when streaming model outputs have already been displayed.
"""
if not _is_package_available("gradio"):
raise ModuleNotFoundError(
"Please install 'gradio' extra to use the GradioUI: `pip install 'smolagents[gradio]'`"
)
if isinstance(step_log, ActionStep):
yield from _process_action_step(step_log, skip_model_outputs)
elif isinstance(step_log, PlanningStep):
yield from _process_planning_step(step_log, skip_model_outputs)
elif isinstance(step_log, FinalAnswerStep):
yield from _process_final_answer_step(step_log)
else:
raise ValueError(f"Unsupported step type: {type(step_log)}")
def stream_to_gradio(
agent,
task: str,
task_images: list | None = None,
reset_agent_memory: bool = False,
additional_args: dict | None = None,
):
"""Runs an agent with the given task and streams the messages from the agent as gradio ChatMessages."""
if not _is_package_available("gradio"):
raise ModuleNotFoundError(
"Please install 'gradio' extra to use the GradioUI: `pip install 'smolagents[gradio]'`"
)
intermediate_text = ""
for step_log in agent.run(
task, images=task_images, stream=True, reset=reset_agent_memory, additional_args=additional_args
):
# Track tokens if model provides them
if getattr(agent.model, "last_input_token_count", None) is not None:
if isinstance(step_log, (ActionStep, PlanningStep)):
step_log.input_token_count = agent.model.last_input_token_count
step_log.output_token_count = agent.model.last_output_token_count
if isinstance(step_log, MemoryStep):
intermediate_text = ""
for message in pull_messages_from_step(
step_log,
# If we're streaming model outputs, no need to display them twice
skip_model_outputs=getattr(agent, "stream_outputs", False),
):
yield message
elif isinstance(step_log, ChatMessageStreamDelta):
intermediate_text += step_log.content or ""
yield intermediate_text
class GradioUI:
"""A one-line interface to launch your agent in Gradio"""
def __init__(self, agent_name: str = "Agent interface", agent_description: str | None = None):
if not _is_package_available("gradio"):
raise ModuleNotFoundError(
"Please install 'gradio' extra to use the GradioUI: `pip install 'smolagents[gradio]'`"
)
self.agent_name = agent_name
self.agent_description = agent_description
def _initialize_agents(self, space_id: str, temp_dir: str):
"""Initializes agents and tools for a given space_id and temporary directory."""
# Initialize model with your API key
model = LiteLLMModel(model_id="gemini/gemini-2.0-flash")
# Get repository structure
repo_structure = get_repository_structure(space_id=space_id)
# Load prompt templates
try:
with open("planning_agent_prompt_templates.yaml", "r") as f:
planning_agent_prompt_templates = yaml.safe_load(f)
with open("swe_agent_prompt_templates.yaml", "r") as f:
swe_agent_prompt_templates = yaml.safe_load(f)
except FileNotFoundError as e:
print(f"Error loading prompt templates: {e}")
print("Please ensure 'planning_agent_prompt_templates.yaml' and 'swe_agent_prompt_templates.yaml' are in the same directory.")
return None, None # Indicate failure
# Enhance prompts with repository structure
planning_agent_prompt_templates["system_prompt"] = planning_agent_prompt_templates["system_prompt"] + "\n\n\n" + repo_structure + "\n\n"
swe_agent_prompt_templates["system_prompt"] = swe_agent_prompt_templates["system_prompt"] + "\n\n\n" + repo_structure + "\n\n"
# Initialize tool instances with temp directory
read_file = FileReader(space_id=space_id, folder_path=temp_dir)
write_file = FileWriter(space_id=space_id, folder_path=temp_dir)
commit_changes = CommitChanges(space_id=space_id, folder_path=temp_dir)
# Initialize SWE Agent with enhanced capabilities and improved description
swe_agent = CodeAgent(
model=model,
prompt_templates=swe_agent_prompt_templates,
verbosity_level=1,
tools=[search, scraper, read_file, write_file],
name="swe_agent",
description="An expert Software Engineer capable of designing, developing, and debugging code. This agent can read and write files, search the web, and scrape web content to assist in coding tasks. It excels at implementing detailed technical specifications provided by the Planning Agent."
)
# Initialize Planning Agent with improved planning focus and description
planning_agent = CodeAgent(
model=model,
prompt_templates=planning_agent_prompt_templates,
verbosity_level=1,
tools=[search, scraper, read_file, write_file, commit_changes],
managed_agents=[swe_agent],
name="planning_agent",
description="A high-level planning agent responsible for breaking down complex user requests into actionable steps and delegating coding tasks to the Software Engineer Agent. It focuses on strategy, task decomposition, and coordinating the overall development process.",
stream_outputs=True
)
return planning_agent, swe_agent
def _update_space_id_and_agents(self, new_space_id: str, current_state: dict):
"""Handles space_id change, cleans up old temp dir, creates new, and re-initializes agents."""
import gradio as gr
old_temp_dir = current_state.get("temp_dir")
if old_temp_dir and os.path.exists(old_temp_dir):
print(f"Cleaning up old temporary directory: {old_temp_dir}")
shutil.rmtree(old_temp_dir)
if not new_space_id:
current_state["space_id"] = None
current_state["temp_dir"] = None
current_state["agent"] = None
current_state["file_upload_folder"] = None
print("Space ID is empty. Agents are not initialized.")
return new_space_id, None, None, None, gr.Textbox("Please enter a Space ID to initialize agents.", visible=True)
# Create new temporary directory
temp_dir = tempfile.mkdtemp(prefix=f"ai_workspace_{new_space_id.replace('/', '_')}_")
file_upload_folder = Path(temp_dir) / "uploads"
file_upload_folder.mkdir(parents=True, exist_ok=True)
# Initialize agents
planning_agent, _ = self._initialize_agents(new_space_id, temp_dir)
if planning_agent is None:
# Handle initialization failure
shutil.rmtree(temp_dir) # Clean up the newly created temp dir
current_state["space_id"] = None
current_state["temp_dir"] = None
current_state["agent"] = None
current_state["file_upload_folder"] = None
return new_space_id, None, None, None, gr.Textbox("Failed to initialize agents. Check console for errors.", visible=True)
# Update session state
current_state["space_id"] = new_space_id
current_state["temp_dir"] = temp_dir
current_state["agent"] = planning_agent
current_state["file_upload_folder"] = file_upload_folder
print(f"Initialized agents for Space ID: {new_space_id} in {temp_dir}")
return new_space_id, [], [], file_upload_folder, gr.Textbox(f"Agents initialized for Space ID: {new_space_id}", visible=True)
def interact_with_agent(self, prompt, messages, session_state):
import gradio as gr
agent = session_state.get("agent")
if agent is None:
messages.append(gr.ChatMessage(role="assistant", content="Please enter a Space ID and initialize the agents first."))
yield messages
return
try:
messages.append(gr.ChatMessage(role="user", content=prompt, metadata={"status": "done"}))
yield messages
for msg in stream_to_gradio(agent, task=prompt, reset_agent_memory=False):
if isinstance(msg, gr.ChatMessage):
messages.append(msg)
elif isinstance(msg, str): # Then it's only a completion delta
try:
if messages[-1].metadata["status"] == "pending":
messages[-1].content = msg
else:
messages.append(
gr.ChatMessage(role="assistant", content=msg, metadata={"status": "pending"})
)
except Exception as e:
raise e
yield messages
yield messages
except Exception as e:
print(f"Error in interaction: {str(e)}")
messages.append(gr.ChatMessage(role="assistant", content=f"Error: {str(e)}"))
yield messages
def upload_file(self, file, file_uploads_log, session_state, allowed_file_types=None):
"""
Handle file uploads, default allowed types are .pdf, .docx, and .txt
"""
import gradio as gr
file_upload_folder = session_state.get("file_upload_folder")
if file_upload_folder is None:
return gr.Textbox("Please enter a Space ID and initialize agents before uploading files.", visible=True), file_uploads_log
if file is None:
return gr.Textbox(value="No file uploaded", visible=True), file_uploads_log
if allowed_file_types is None:
allowed_file_types = [".pdf", ".docx", ".txt"]
file_ext = os.path.splitext(file.name)[1].lower()
if file_ext not in allowed_file_types:
return gr.Textbox("File type disallowed", visible=True), file_uploads_log
# Sanitize file name
original_name = os.path.basename(file.name)
sanitized_name = re.sub(
r"[^\w\-.]", "_", original_name
) # Replace any non-alphanumeric, non-dash, or non-dot characters with underscores
# Save the uploaded file to the specified folder
file_path = os.path.join(file_upload_folder, os.path.basename(sanitized_name))
shutil.copy(file.name, file_path)
return gr.Textbox(f"File uploaded: {file_path}", visible=True), file_uploads_log + [file_path]
def log_user_message(self, text_input, file_uploads_log):
import gradio as gr
return (
text_input
+ (
f"\nYou have been provided with these files, which might be helpful or not: {file_uploads_log}"
if len(file_uploads_log) > 0
else ""
),
"",
gr.Button(interactive=False),
)
def launch(self, share: bool = True, **kwargs):
self.create_app().launch(debug=True, share=share, **kwargs)
def create_app(self):
import gradio as gr
with gr.Blocks(theme="ocean", fill_height=True) as demo:
# Add session state to store session-specific data
session_state = gr.State({})
stored_messages = gr.State([])
file_uploads_log = gr.State([])
space_id_state = gr.State(None) # State to hold the current space_id
temp_dir_state = gr.State(None) # State to hold the current temp_dir
file_upload_folder_state = gr.State(None) # State to hold the current file upload folder
with gr.Sidebar():
gr.Markdown(
f"# {self.agent_name.replace('_', ' ').capitalize()}"
"\n> This web ui allows you to interact with a `smolagents` agent that can use tools and execute steps to complete tasks."
+ (f"\n\n**Agent description:**\n{self.agent_description}" if self.agent_description else "")
)
# Add Space ID input
space_id_input = gr.Textbox(
label="Hugging Face Space ID",
placeholder="Enter your Space ID (e.g., username/space-name)",
interactive=True
)
initialization_status = gr.Textbox(label="Initialization Status", interactive=False, visible=True)
# Trigger agent initialization when space_id changes
space_id_input.change(
self._update_space_id_and_agents,
[space_id_input, session_state],
[space_id_state, stored_messages, file_uploads_log, file_upload_folder_state, initialization_status]
)
with gr.Group():
gr.Markdown("**Your request**", container=True)
text_input = gr.Textbox(
lines=3,
label="Chat Message",
container=False,
placeholder="Enter your prompt here and press Shift+Enter or press the button",
)
submit_btn = gr.Button("Submit", variant="primary")
gr.HTML(
"<br><br><h4><center>Powered by <a target='_blank' href='https://github.com/huggingface/smolagents'><b>smolagents</b></a></center></h4>"
)
# Main chat interface
chatbot = gr.Chatbot(
label="Agent",
type="messages",
avatar_images=(
None,
"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/smolagents/mascot_smol.png",
),
resizeable=True,
scale=1,
)
# Set up event handlers
text_input.submit(
self.log_user_message,
[text_input, file_uploads_log],
[stored_messages, text_input, submit_btn],
).then(self.interact_with_agent, [stored_messages, chatbot, session_state], [chatbot]).then(
lambda: (
gr.Textbox(
interactive=True, placeholder="Enter your prompt here and press Shift+Enter or the button"
),
gr.Button(interactive=True),
),
None,
[text_input, submit_btn],
)
submit_btn.click(
self.log_user_message,
[text_input, file_uploads_log],
[stored_messages, text_input, submit_btn],
).then(self.interact_with_agent, [stored_messages, chatbot, session_state], [chatbot]).then(
lambda: (
gr.Textbox(
interactive=True, placeholder="Enter your prompt here and press Shift+Enter or the button"
),
gr.Button(interactive=True),
),
None,
[text_input, submit_btn],
)
return demo
def run_gradio_agent():
"""Runs the Gradio UI for the agent."""
# The GradioUI class now handles space_id input and agent initialization internally
GradioUI(
agent_name="SmolAgents Gradio Interface",
agent_description="Interact with a SmolAgents planning agent capable of code generation and execution."
).launch()
if __name__ == "__main__":
# The script now directly launches the Gradio UI
run_gradio_agent() |