Spaces:
Running
Running
David de la Iglesia Castro
commited on
Use `any-agent`. (#38)
Browse files* Cleanup config
* Use any_agent lib
* Add SSH step
* Move ssh inside install
* Fix
* fix docs and tests
- .github/workflows/docs.yaml +5 -2
- .github/workflows/tests.yaml +4 -2
- .gitignore +1 -0
- docs/api.md +0 -26
- examples/langchain_single_agent.yaml +7 -10
- examples/langchain_single_agent_vertical.yaml +9 -10
- examples/openai_multi_agent.yaml +37 -7
- examples/openai_single_agent.yaml +7 -10
- examples/openai_single_agent_vertical.yaml +7 -7
- examples/smolagents_single_agent.yaml +7 -7
- examples/smolagents_single_agent_mcp.yaml +0 -12
- examples/smolagents_single_agent_vertical.yaml +10 -10
- pyproject.toml +1 -26
- src/surf_spot_finder/agents/__init__.py +0 -30
- src/surf_spot_finder/agents/langchain.py +0 -70
- src/surf_spot_finder/agents/openai.py +0 -187
- src/surf_spot_finder/agents/smolagents.py +0 -100
- src/surf_spot_finder/cli.py +27 -68
- src/surf_spot_finder/config.py +17 -46
- src/surf_spot_finder/{prompts β instructions}/__init__.py +0 -0
- src/surf_spot_finder/{prompts β instructions}/openai.py +0 -0
- src/surf_spot_finder/instructions/shared.py +0 -0
- src/surf_spot_finder/{prompts β instructions}/smolagents.py +0 -0
- src/surf_spot_finder/prompts/shared.py +0 -5
- src/surf_spot_finder/tools/__init__.py +5 -12
- src/surf_spot_finder/tools/user_interaction.py +0 -30
- src/surf_spot_finder/tools/web_browsing.py +0 -54
- src/surf_spot_finder/tools/wrappers.py +0 -41
- src/surf_spot_finder/tracing.py +0 -114
- tests/integration/agents/test_integration_openai.py +0 -34
- tests/integration/agents/test_integration_smolagents.py +0 -67
- tests/unit/agents/test_unit_langchain.py +0 -33
- tests/unit/agents/test_unit_openai.py +0 -110
- tests/unit/agents/test_unit_smolagents.py +0 -71
- tests/unit/test_unit_tracing.py +0 -47
.github/workflows/docs.yaml
CHANGED
@@ -36,8 +36,11 @@ jobs:
|
|
36 |
git config user.name 'github-actions[bot]'
|
37 |
git config user.email 'github-actions[bot]@users.noreply.github.com'
|
38 |
|
39 |
-
- name: Install requirements
|
40 |
-
run:
|
|
|
|
|
|
|
41 |
|
42 |
- name: Build docs
|
43 |
if: github.event_name == 'pull_request'
|
|
|
36 |
git config user.name 'github-actions[bot]'
|
37 |
git config user.email 'github-actions[bot]@users.noreply.github.com'
|
38 |
|
39 |
+
- name: Install requirements # TODO: Remove SSF_SSH_TOKEN when repo becomes public
|
40 |
+
run: |
|
41 |
+
eval `ssh-agent -s`
|
42 |
+
ssh-add - <<< '${{ secrets.SSF_SSH_TOKEN }}'
|
43 |
+
pip install -e '.[docs]'
|
44 |
|
45 |
- name: Build docs
|
46 |
if: github.event_name == 'pull_request'
|
.github/workflows/tests.yaml
CHANGED
@@ -26,11 +26,13 @@ jobs:
|
|
26 |
with:
|
27 |
python-version: '3.11'
|
28 |
cache: "pip"
|
29 |
-
|
|
|
30 |
run: |
|
31 |
eval `ssh-agent -s`
|
32 |
ssh-add - <<< '${{ secrets.SSF_SSH_TOKEN }}'
|
33 |
-
pip install -e '.[
|
|
|
34 |
|
35 |
- name: Run tests
|
36 |
run: pytest -v tests
|
|
|
26 |
with:
|
27 |
python-version: '3.11'
|
28 |
cache: "pip"
|
29 |
+
|
30 |
+
- name: Install # TODO: Remove when repo becomes public
|
31 |
run: |
|
32 |
eval `ssh-agent -s`
|
33 |
ssh-add - <<< '${{ secrets.SSF_SSH_TOKEN }}'
|
34 |
+
pip install -e '.[tests]'
|
35 |
+
|
36 |
|
37 |
- name: Run tests
|
38 |
run: pytest -v tests
|
.gitignore
CHANGED
@@ -168,3 +168,4 @@ cython_debug/
|
|
168 |
.vscode/
|
169 |
|
170 |
output
|
|
|
|
168 |
.vscode/
|
169 |
|
170 |
output
|
171 |
+
telemetry_output
|
docs/api.md
CHANGED
@@ -4,34 +4,8 @@
|
|
4 |
|
5 |
::: surf_spot_finder.config.Config
|
6 |
|
7 |
-
## Agents
|
8 |
-
|
9 |
-
::: surf_spot_finder.agents.RUNNERS
|
10 |
-
|
11 |
-
::: surf_spot_finder.agents.langchain
|
12 |
-
|
13 |
-
::: surf_spot_finder.agents.openai
|
14 |
-
|
15 |
-
::: surf_spot_finder.agents.smolagents
|
16 |
-
|
17 |
## Tools
|
18 |
|
19 |
::: surf_spot_finder.tools.openmeteo
|
20 |
|
21 |
::: surf_spot_finder.tools.openstreetmap
|
22 |
-
|
23 |
-
::: surf_spot_finder.tools.user_interaction
|
24 |
-
|
25 |
-
::: surf_spot_finder.tools.web_browsing
|
26 |
-
|
27 |
-
## Tracing
|
28 |
-
|
29 |
-
::: surf_spot_finder.tracing
|
30 |
-
|
31 |
-
## Prompts
|
32 |
-
|
33 |
-
::: surf_spot_finder.prompts.openai.SINGLE_AGENT_SYSTEM_PROMPT
|
34 |
-
|
35 |
-
::: surf_spot_finder.prompts.openai.MULTI_AGENT_SYSTEM_PROMPT
|
36 |
-
|
37 |
-
::: surf_spot_finder.prompts.shared.INPUT_PROMPT
|
|
|
4 |
|
5 |
::: surf_spot_finder.config.Config
|
6 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
7 |
## Tools
|
8 |
|
9 |
::: surf_spot_finder.tools.openmeteo
|
10 |
|
11 |
::: surf_spot_finder.tools.openstreetmap
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
examples/langchain_single_agent.yaml
CHANGED
@@ -1,11 +1,8 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
|
|
7 |
model_id: o3-mini
|
8 |
-
agent_type: langchain
|
9 |
-
tools:
|
10 |
-
- "surf_spot_finder.tools.search_web"
|
11 |
-
- "surf_spot_finder.tools.visit_webpage"
|
|
|
1 |
+
location: Pontevedra
|
2 |
+
date: 2025-04-02 12:00
|
3 |
+
max_driving_hours: 2
|
4 |
+
|
5 |
+
framework: langchain
|
6 |
+
|
7 |
+
main_agent:
|
8 |
model_id: o3-mini
|
|
|
|
|
|
|
|
examples/langchain_single_agent_vertical.yaml
CHANGED
@@ -1,17 +1,16 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
|
|
7 |
model_id: o3-mini
|
8 |
-
# model_id: ollama/llama3.2:3b
|
9 |
-
agent_type: langchain
|
10 |
tools:
|
11 |
- "surf_spot_finder.tools.driving_hours_to_meters"
|
12 |
- "surf_spot_finder.tools.get_area_lat_lon"
|
13 |
- "surf_spot_finder.tools.get_surfing_spots"
|
14 |
- "surf_spot_finder.tools.get_wave_forecast"
|
15 |
- "surf_spot_finder.tools.get_wind_forecast"
|
16 |
-
- "
|
17 |
-
- "
|
|
|
1 |
+
location: Pontevedra
|
2 |
+
date: 2025-04-02 12:00
|
3 |
+
max_driving_hours: 2
|
4 |
+
|
5 |
+
framework: langchain
|
6 |
+
|
7 |
+
main_agent:
|
8 |
model_id: o3-mini
|
|
|
|
|
9 |
tools:
|
10 |
- "surf_spot_finder.tools.driving_hours_to_meters"
|
11 |
- "surf_spot_finder.tools.get_area_lat_lon"
|
12 |
- "surf_spot_finder.tools.get_surfing_spots"
|
13 |
- "surf_spot_finder.tools.get_wave_forecast"
|
14 |
- "surf_spot_finder.tools.get_wind_forecast"
|
15 |
+
- "any_agent.tools.search_web"
|
16 |
+
- "any_agent.tools.visit_webpage"
|
examples/openai_multi_agent.yaml
CHANGED
@@ -1,8 +1,38 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
|
|
|
|
|
|
|
6 |
model_id: o3-mini
|
7 |
-
|
8 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
location: Pontevedra
|
2 |
+
date: 2025-04-02 12:00
|
3 |
+
max_driving_hours: 2
|
4 |
+
|
5 |
+
framework: openai
|
6 |
+
|
7 |
+
main_agent:
|
8 |
+
name: main_agent
|
9 |
model_id: o3-mini
|
10 |
+
instructions: >
|
11 |
+
# System context\n
|
12 |
+
You are part of a multi-agent system called the Agents SDK, designed to make agent coordination and execution easy.
|
13 |
+
Agents uses two primary abstraction: **Agents** and **Handoffs**.
|
14 |
+
An agent encompasses instructions and tools and can hand off a conversation to another agent when appropriate.
|
15 |
+
Handoffs are achieved by calling a handoff function, generally named `transfer_to_<agent_name>`.
|
16 |
+
Transfers between agents are handled seamlessly in the background; do not mention or draw attention to these transfers in your conversation with the user.\n
|
17 |
+
|
18 |
+
managed_agents:
|
19 |
+
|
20 |
+
- name: user-verifiaction-agent
|
21 |
+
model_id: gpt-4o-mini
|
22 |
+
instructions: Ask users to verify a step, plan or answer.
|
23 |
+
tools:
|
24 |
+
- any_agent.tools.ask_user_verification
|
25 |
+
|
26 |
+
- name: general-web-search-agent
|
27 |
+
model_id: gpt-4o-mini
|
28 |
+
instructions: Search the web and visit webpages to find answers.
|
29 |
+
tools:
|
30 |
+
- any_agent.tools.search_web
|
31 |
+
- any_agent.tools.visit_webpage
|
32 |
+
|
33 |
+
- name: user-communication-agent
|
34 |
+
model_id: gpt-4o-mini
|
35 |
+
instructions: Communicates to the user
|
36 |
+
handoff: True
|
37 |
+
tools:
|
38 |
+
- any_agent.tools.show_final_answer
|
examples/openai_single_agent.yaml
CHANGED
@@ -1,11 +1,8 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
|
|
7 |
model_id: o3-mini
|
8 |
-
agent_type: openai
|
9 |
-
tools:
|
10 |
-
- "surf_spot_finder.tools.search_web"
|
11 |
-
- "surf_spot_finder.tools.visit_webpage"
|
|
|
1 |
+
location: Pontevedra
|
2 |
+
date: 2025-04-02 12:00
|
3 |
+
max_driving_hours: 2
|
4 |
+
|
5 |
+
framework: openai
|
6 |
+
|
7 |
+
main_agent:
|
8 |
model_id: o3-mini
|
|
|
|
|
|
|
|
examples/openai_single_agent_vertical.yaml
CHANGED
@@ -1,11 +1,11 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
|
|
7 |
model_id: o3-mini
|
8 |
-
agent_type: openai
|
9 |
tools:
|
10 |
- "surf_spot_finder.tools.driving_hours_to_meters"
|
11 |
- "surf_spot_finder.tools.get_area_lat_lon"
|
|
|
1 |
+
location: Pontevedra
|
2 |
+
date: 2025-04-02 12:00
|
3 |
+
max_driving_hours: 2
|
4 |
+
|
5 |
+
framework: openai
|
6 |
+
|
7 |
+
main_agent:
|
8 |
model_id: o3-mini
|
|
|
9 |
tools:
|
10 |
- "surf_spot_finder.tools.driving_hours_to_meters"
|
11 |
- "surf_spot_finder.tools.get_area_lat_lon"
|
examples/smolagents_single_agent.yaml
CHANGED
@@ -1,9 +1,9 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
|
|
7 |
model_id: openai/o3-mini
|
8 |
api_key_var: OPENAI_API_KEY
|
9 |
-
agent_type: smolagents
|
|
|
1 |
+
location: Pontevedra
|
2 |
+
date: 2025-04-02 12:00
|
3 |
+
max_driving_hours: 2
|
4 |
+
|
5 |
+
framework: smolagents
|
6 |
+
|
7 |
+
main_agent:
|
8 |
model_id: openai/o3-mini
|
9 |
api_key_var: OPENAI_API_KEY
|
|
examples/smolagents_single_agent_mcp.yaml
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
input:
|
2 |
-
location: Pontevedra
|
3 |
-
date: 2025-03-27 12:00
|
4 |
-
max_driving_hours: 2
|
5 |
-
# input_prompt_template:
|
6 |
-
agent:
|
7 |
-
model_id: openai/gpt-3.5-turbo
|
8 |
-
api_key_var: OPENAI_API_KEY
|
9 |
-
agent_type: smolagents
|
10 |
-
tools:
|
11 |
-
- "smolagents.DuckDuckGoSearchTool"
|
12 |
-
- "mcp/fetch"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
examples/smolagents_single_agent_vertical.yaml
CHANGED
@@ -1,19 +1,19 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
|
|
8 |
api_key_var: OPENAI_API_KEY
|
9 |
-
agent_type: smolagents
|
10 |
tools:
|
11 |
- "surf_spot_finder.tools.driving_hours_to_meters"
|
12 |
- "surf_spot_finder.tools.get_area_lat_lon"
|
13 |
- "surf_spot_finder.tools.get_surfing_spots"
|
14 |
- "surf_spot_finder.tools.get_wave_forecast"
|
15 |
- "surf_spot_finder.tools.get_wind_forecast"
|
16 |
-
- "
|
17 |
-
- "
|
18 |
- "smolagents.PythonInterpreterTool"
|
19 |
- "smolagents.FinalAnswerTool"
|
|
|
1 |
+
location: Pontevedra
|
2 |
+
date: 2025-04-02 12:00
|
3 |
+
max_driving_hours: 2
|
4 |
+
|
5 |
+
framework: smolagents
|
6 |
+
|
7 |
+
main_agent:
|
8 |
+
model_id: openai/o3-mini
|
9 |
api_key_var: OPENAI_API_KEY
|
|
|
10 |
tools:
|
11 |
- "surf_spot_finder.tools.driving_hours_to_meters"
|
12 |
- "surf_spot_finder.tools.get_area_lat_lon"
|
13 |
- "surf_spot_finder.tools.get_surfing_spots"
|
14 |
- "surf_spot_finder.tools.get_wave_forecast"
|
15 |
- "surf_spot_finder.tools.get_wind_forecast"
|
16 |
+
- "any_agent.tools.search_web"
|
17 |
+
- "any_agent.tools.visit_webpage"
|
18 |
- "smolagents.PythonInterpreterTool"
|
19 |
- "smolagents.FinalAnswerTool"
|
pyproject.toml
CHANGED
@@ -13,30 +13,10 @@ dependencies = [
|
|
13 |
"fire",
|
14 |
"loguru",
|
15 |
"pydantic",
|
|
|
16 |
]
|
17 |
|
18 |
[project.optional-dependencies]
|
19 |
-
langchain = [
|
20 |
-
"langchain",
|
21 |
-
"langgraph",
|
22 |
-
"langchain-openai>=0.3.9",
|
23 |
-
"langchain-ollama>=0.3.0",
|
24 |
-
"openinference-instrumentation-langchain"
|
25 |
-
]
|
26 |
-
smolagents = [
|
27 |
-
"smolagents[litellm]>=1.10.0",
|
28 |
-
"openinference-instrumentation-smolagents>=0.1.4"
|
29 |
-
]
|
30 |
-
|
31 |
-
openai = [
|
32 |
-
"openai-agents",
|
33 |
-
"openinference-instrumentation-openai-agents>=0.1.2"
|
34 |
-
]
|
35 |
-
|
36 |
-
mcp = [
|
37 |
-
"mcp==1.3.0",
|
38 |
-
]
|
39 |
-
|
40 |
demo = [
|
41 |
"gradio",
|
42 |
"spaces"
|
@@ -55,11 +35,6 @@ tests = [
|
|
55 |
"evaluate>=0.4.3",
|
56 |
]
|
57 |
|
58 |
-
# TODO maybe we don't want to keep this, or we want to swap this to Lumigator SDK
|
59 |
-
arize = [
|
60 |
-
"arize-phoenix>=8.12.1",
|
61 |
-
]
|
62 |
-
|
63 |
[project.urls]
|
64 |
Documentation = "https://mozilla-ai.github.io/surf-spot-finder/"
|
65 |
Issues = "https://github.com/mozilla-ai/surf-spot-finder/issues"
|
|
|
13 |
"fire",
|
14 |
"loguru",
|
15 |
"pydantic",
|
16 |
+
"pyyaml",
|
17 |
]
|
18 |
|
19 |
[project.optional-dependencies]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
20 |
demo = [
|
21 |
"gradio",
|
22 |
"spaces"
|
|
|
35 |
"evaluate>=0.4.3",
|
36 |
]
|
37 |
|
|
|
|
|
|
|
|
|
|
|
38 |
[project.urls]
|
39 |
Documentation = "https://mozilla-ai.github.io/surf-spot-finder/"
|
40 |
Issues = "https://github.com/mozilla-ai/surf-spot-finder/issues"
|
src/surf_spot_finder/agents/__init__.py
DELETED
@@ -1,30 +0,0 @@
|
|
1 |
-
from enum import Enum
|
2 |
-
from .langchain import run_lanchain_agent
|
3 |
-
from .openai import run_openai_agent, run_openai_multi_agent
|
4 |
-
from .smolagents import run_smolagent
|
5 |
-
|
6 |
-
|
7 |
-
# Define the available agent type enums
|
8 |
-
class AgentType(str, Enum):
|
9 |
-
LANGCHAIN = "langchain"
|
10 |
-
OPENAI = "openai"
|
11 |
-
OPENAI_MULTI_AGENT = "openai_multi_agent"
|
12 |
-
SMOLAGENTS = "smolagents"
|
13 |
-
|
14 |
-
|
15 |
-
RUNNERS = {
|
16 |
-
AgentType.LANGCHAIN: run_lanchain_agent,
|
17 |
-
AgentType.OPENAI: run_openai_agent,
|
18 |
-
AgentType.SMOLAGENTS: run_smolagent,
|
19 |
-
AgentType.OPENAI_MULTI_AGENT: run_openai_multi_agent,
|
20 |
-
}
|
21 |
-
|
22 |
-
|
23 |
-
def validate_agent_type(value: str) -> str:
|
24 |
-
try:
|
25 |
-
agent_type = AgentType(value)
|
26 |
-
if agent_type not in RUNNERS:
|
27 |
-
raise ValueError(f"agent_type {value} is valid but has no runner")
|
28 |
-
return value
|
29 |
-
except ValueError:
|
30 |
-
raise ValueError(f"agent_type must be one of {[e.value for e in AgentType]}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/surf_spot_finder/agents/langchain.py
DELETED
@@ -1,70 +0,0 @@
|
|
1 |
-
import inspect
|
2 |
-
import importlib
|
3 |
-
|
4 |
-
from loguru import logger
|
5 |
-
|
6 |
-
try:
|
7 |
-
from langchain.chat_models import init_chat_model
|
8 |
-
from langchain_core.messages import HumanMessage
|
9 |
-
from langchain_core.tools import BaseTool, tool
|
10 |
-
from langgraph.checkpoint.memory import MemorySaver
|
11 |
-
from langgraph.prebuilt import create_react_agent
|
12 |
-
|
13 |
-
langchain_available = True
|
14 |
-
except ImportError:
|
15 |
-
langchain_available = False
|
16 |
-
|
17 |
-
DEFAULT_RECURSION_LIMIT = 50
|
18 |
-
|
19 |
-
|
20 |
-
@logger.catch(reraise=True)
|
21 |
-
def run_lanchain_agent(
|
22 |
-
model_id: str, prompt: str, tools: list[str] | None = None, **kwargs
|
23 |
-
):
|
24 |
-
"""Runs an langchain ReAct agent with the given prompt and configuration.
|
25 |
-
|
26 |
-
Uses [create_react_agent](https://langchain-ai.github.io/langgraph/reference/prebuilt/#langgraph.prebuilt.chat_agent_executor.create_react_agent).
|
27 |
-
|
28 |
-
Args:
|
29 |
-
model_id: The ID of the model to use.
|
30 |
-
See [init_chat_model](https://python.langchain.com/api_reference/langchain/chat_models/langchain.chat_models.base.init_chat_model.html).
|
31 |
-
prompt: The prompt to be given to the agent.
|
32 |
-
"""
|
33 |
-
if not langchain_available:
|
34 |
-
raise ImportError(
|
35 |
-
"You need to `pip install langchain langgraph` to use this agent"
|
36 |
-
)
|
37 |
-
|
38 |
-
if tools is None:
|
39 |
-
tools = [
|
40 |
-
"surf_spot_finder.tools.search_web",
|
41 |
-
"surf_spot_finder.tools.visit_webpage",
|
42 |
-
]
|
43 |
-
|
44 |
-
imported_tools = []
|
45 |
-
for imported_tool in tools:
|
46 |
-
module, func = imported_tool.rsplit(".", 1)
|
47 |
-
module = importlib.import_module(module)
|
48 |
-
imported_tool = getattr(module, func)
|
49 |
-
if inspect.isclass(imported_tool):
|
50 |
-
imported_tool = imported_tool()
|
51 |
-
if not isinstance(imported_tool, BaseTool):
|
52 |
-
imported_tool = tool(imported_tool)
|
53 |
-
imported_tools.append((imported_tool))
|
54 |
-
if "/" in model_id:
|
55 |
-
model_provider, model_id = model_id.split("/")
|
56 |
-
model = init_chat_model(model_id, model_provider=model_provider)
|
57 |
-
else:
|
58 |
-
model = init_chat_model(model_id)
|
59 |
-
agent = create_react_agent(
|
60 |
-
model=model, tools=imported_tools, checkpointer=MemorySaver()
|
61 |
-
)
|
62 |
-
for step in agent.stream(
|
63 |
-
{"messages": [HumanMessage(content=prompt)]},
|
64 |
-
{
|
65 |
-
"configurable": {"thread_id": "abc123"},
|
66 |
-
"recursion_limit": DEFAULT_RECURSION_LIMIT,
|
67 |
-
},
|
68 |
-
stream_mode="values",
|
69 |
-
):
|
70 |
-
step["messages"][-1].pretty_print()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/surf_spot_finder/agents/openai.py
DELETED
@@ -1,187 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
from typing import Optional
|
3 |
-
|
4 |
-
from loguru import logger
|
5 |
-
|
6 |
-
from surf_spot_finder.prompts.openai import (
|
7 |
-
SINGLE_AGENT_SYSTEM_PROMPT,
|
8 |
-
MULTI_AGENT_SYSTEM_PROMPT,
|
9 |
-
)
|
10 |
-
from surf_spot_finder.tools.wrappers import import_and_wrap_tools, wrap_tool_openai
|
11 |
-
|
12 |
-
|
13 |
-
try:
|
14 |
-
from agents import (
|
15 |
-
Agent,
|
16 |
-
AsyncOpenAI,
|
17 |
-
OpenAIChatCompletionsModel,
|
18 |
-
Runner,
|
19 |
-
RunResult,
|
20 |
-
function_tool,
|
21 |
-
)
|
22 |
-
|
23 |
-
agents_available = True
|
24 |
-
except ImportError:
|
25 |
-
agents_available = None
|
26 |
-
|
27 |
-
DEFAULT_MAX_TURNS = 20
|
28 |
-
|
29 |
-
|
30 |
-
@logger.catch(reraise=True)
|
31 |
-
def run_openai_agent(
|
32 |
-
model_id: str,
|
33 |
-
prompt: str,
|
34 |
-
name: str = "surf-spot-finder",
|
35 |
-
instructions: Optional[str] = SINGLE_AGENT_SYSTEM_PROMPT,
|
36 |
-
api_key_var: Optional[str] = None,
|
37 |
-
api_base: Optional[str] = None,
|
38 |
-
tools: Optional[list[str]] = None,
|
39 |
-
max_turns: Optional[int] = DEFAULT_MAX_TURNS,
|
40 |
-
) -> RunResult:
|
41 |
-
"""Runs an OpenAI agent with the given prompt and configuration.
|
42 |
-
|
43 |
-
It leverages the 'agents' library to create and manage the agent
|
44 |
-
execution.
|
45 |
-
|
46 |
-
See https://openai.github.io/openai-agents-python/ref/agent/ for more details.
|
47 |
-
|
48 |
-
|
49 |
-
Args:
|
50 |
-
model_id (str): The ID of the OpenAI model to use (e.g., "gpt4o").
|
51 |
-
See https://platform.openai.com/docs/api-reference/models.
|
52 |
-
prompt (str): The prompt to be given to the agent.
|
53 |
-
name (str, optional): The name of the agent. Defaults to "surf-spot-finder".
|
54 |
-
instructions (Optional[str], optional): Initial instructions to give the agent.
|
55 |
-
Defaults to [SINGLE_AGENT_SYSTEM_PROMPT][surf_spot_finder.prompts.openai.SINGLE_AGENT_SYSTEM_PROMPT].
|
56 |
-
api_key_var (Optional[str], optional): The name of the environment variable
|
57 |
-
containing the OpenAI API key. If provided, along with `base_url`, an
|
58 |
-
external OpenAI client will be used. Defaults to None.
|
59 |
-
api_base (Optional[str], optional): The base URL for the OpenAI API.
|
60 |
-
Required if `api_key_var` is provided to use an external OpenAI client.
|
61 |
-
Defaults to None.
|
62 |
-
|
63 |
-
|
64 |
-
Returns:
|
65 |
-
RunResult: A RunResult object containing the output of the agent run.
|
66 |
-
See https://openai.github.io/openai-agents-python/ref/result/#agents.result.RunResult.
|
67 |
-
"""
|
68 |
-
if not agents_available:
|
69 |
-
raise ImportError("You need to `pip install openai-agents` to use this agent")
|
70 |
-
|
71 |
-
if tools is None:
|
72 |
-
tools = [
|
73 |
-
"surf_spot_finder.tools.search_web",
|
74 |
-
"surf_spot_finder.tools.visit_webpage",
|
75 |
-
]
|
76 |
-
|
77 |
-
imported_tools = import_and_wrap_tools(tools, wrap_tool_openai)
|
78 |
-
|
79 |
-
if api_key_var and api_base:
|
80 |
-
external_client = AsyncOpenAI(
|
81 |
-
api_key=os.environ[api_key_var],
|
82 |
-
base_url=api_base,
|
83 |
-
)
|
84 |
-
agent = Agent(
|
85 |
-
name=name,
|
86 |
-
instructions=instructions,
|
87 |
-
model=OpenAIChatCompletionsModel(
|
88 |
-
model=model_id,
|
89 |
-
openai_client=external_client,
|
90 |
-
),
|
91 |
-
tools=imported_tools,
|
92 |
-
)
|
93 |
-
else:
|
94 |
-
agent = Agent(
|
95 |
-
model=model_id,
|
96 |
-
instructions=instructions,
|
97 |
-
name=name,
|
98 |
-
tools=imported_tools,
|
99 |
-
)
|
100 |
-
result = Runner.run_sync(starting_agent=agent, input=prompt, max_turns=max_turns)
|
101 |
-
logger.info(result.final_output)
|
102 |
-
return result
|
103 |
-
|
104 |
-
|
105 |
-
@logger.catch(reraise=True)
|
106 |
-
def run_openai_multi_agent(
|
107 |
-
model_id: str,
|
108 |
-
prompt: str,
|
109 |
-
name: str = "surf-spot-finder",
|
110 |
-
instructions: Optional[str] = MULTI_AGENT_SYSTEM_PROMPT,
|
111 |
-
max_turns: Optional[int] = DEFAULT_MAX_TURNS,
|
112 |
-
**kwargs,
|
113 |
-
) -> RunResult:
|
114 |
-
"""Runs multiple OpenAI agents orchestrated by a main agent.
|
115 |
-
|
116 |
-
It leverages the 'agents' library to create and manage the agent
|
117 |
-
execution.
|
118 |
-
|
119 |
-
See https://openai.github.io/openai-agents-python/ref/agent/ for more details.
|
120 |
-
|
121 |
-
|
122 |
-
Args:
|
123 |
-
model_id (str): The ID of the OpenAI model to use (e.g., "gpt4o").
|
124 |
-
See https://platform.openai.com/docs/api-reference/models.
|
125 |
-
prompt (str): The prompt to be given to the agent.
|
126 |
-
name (str, optional): The name of the main agent. Defaults to "surf-spot-finder".
|
127 |
-
instructions (Optional[str], optional): Initial instructions to give the agent.
|
128 |
-
Defaults to [MULTI_AGENT_SYSTEM_PROMPT][surf_spot_finder.prompts.openai.MULTI_AGENT_SYSTEM_PROMPT].
|
129 |
-
|
130 |
-
Returns:
|
131 |
-
RunResult: A RunResult object containing the output of the agent run.
|
132 |
-
See https://openai.github.io/openai-agents-python/ref/result/#agents.result.RunResult.
|
133 |
-
"""
|
134 |
-
if not agents_available:
|
135 |
-
raise ImportError("You need to `pip install openai-agents` to use this agent")
|
136 |
-
|
137 |
-
from surf_spot_finder.tools import (
|
138 |
-
ask_user_verification,
|
139 |
-
show_final_answer,
|
140 |
-
show_plan,
|
141 |
-
search_web,
|
142 |
-
visit_webpage,
|
143 |
-
)
|
144 |
-
|
145 |
-
user_verification_agent = Agent(
|
146 |
-
model=model_id,
|
147 |
-
instructions="Interact with the user by showing information and asking for verification.",
|
148 |
-
name="user-verification-agent",
|
149 |
-
tools=[function_tool(ask_user_verification), function_tool(show_plan)],
|
150 |
-
)
|
151 |
-
|
152 |
-
search_web_agent = Agent(
|
153 |
-
model=model_id,
|
154 |
-
instructions="Find relevant information about the provided task by using your tools.",
|
155 |
-
name="search-web-agent",
|
156 |
-
tools=[function_tool(search_web), function_tool(visit_webpage)],
|
157 |
-
)
|
158 |
-
|
159 |
-
communication_agent = Agent(
|
160 |
-
model=model_id,
|
161 |
-
instructions="Communicate the final answer to the user.",
|
162 |
-
name="communication-agent",
|
163 |
-
tools=[function_tool(show_final_answer)],
|
164 |
-
)
|
165 |
-
|
166 |
-
main_agent = Agent(
|
167 |
-
model=model_id,
|
168 |
-
instructions=instructions,
|
169 |
-
name=name,
|
170 |
-
handoffs=[communication_agent],
|
171 |
-
tools=[
|
172 |
-
search_web_agent.as_tool(
|
173 |
-
tool_name="search_web_with_agent",
|
174 |
-
tool_description=search_web_agent.instructions,
|
175 |
-
),
|
176 |
-
user_verification_agent.as_tool(
|
177 |
-
tool_name="ask_user_verification_with_agent",
|
178 |
-
tool_description=user_verification_agent.instructions,
|
179 |
-
),
|
180 |
-
],
|
181 |
-
)
|
182 |
-
|
183 |
-
result = Runner.run_sync(
|
184 |
-
starting_agent=main_agent, input=prompt, max_turns=max_turns
|
185 |
-
)
|
186 |
-
logger.info(result.final_output)
|
187 |
-
return result
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/surf_spot_finder/agents/smolagents.py
DELETED
@@ -1,100 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
from typing import Optional
|
3 |
-
|
4 |
-
from loguru import logger
|
5 |
-
|
6 |
-
from surf_spot_finder.prompts.smolagents import SYSTEM_PROMPT
|
7 |
-
from surf_spot_finder.tools.wrappers import import_and_wrap_tools, wrap_tool_smolagents
|
8 |
-
|
9 |
-
|
10 |
-
try:
|
11 |
-
from smolagents import (
|
12 |
-
CodeAgent,
|
13 |
-
LiteLLMModel,
|
14 |
-
)
|
15 |
-
|
16 |
-
smolagents_available = True
|
17 |
-
except ImportError:
|
18 |
-
smolagents_available = None
|
19 |
-
|
20 |
-
|
21 |
-
@logger.catch(reraise=True)
|
22 |
-
def run_smolagent(
|
23 |
-
model_id: str,
|
24 |
-
prompt: str,
|
25 |
-
api_key_var: Optional[str] = None,
|
26 |
-
api_base: Optional[str] = None,
|
27 |
-
tools: Optional[list[str]] = None,
|
28 |
-
) -> CodeAgent:
|
29 |
-
"""
|
30 |
-
Create and configure a Smolagents CodeAgent with the specified model.
|
31 |
-
|
32 |
-
See https://docs.litellm.ai/docs/providers for details on available LiteLLM providers.
|
33 |
-
|
34 |
-
Args:
|
35 |
-
model_id (str): Model identifier using LiteLLM syntax (e.g., 'openai/o1', 'anthropic/claude-3-sonnet')
|
36 |
-
prompt (str): Prompt to provide to the model
|
37 |
-
api_key_var (Optional[str]): Name of environment variable containing the API key
|
38 |
-
api_base (Optional[str]): Custom API base URL, if needed for non-default endpoints
|
39 |
-
|
40 |
-
Returns:
|
41 |
-
CodeAgent: Configured agent ready to process requests
|
42 |
-
|
43 |
-
Example:
|
44 |
-
|
45 |
-
>>> agent = run_smolagent("anthropic/claude-3-haiku", "my prompt here", "ANTHROPIC_API_KEY", None, None)
|
46 |
-
>>> agent.run("Find surf spots near San Diego")
|
47 |
-
"""
|
48 |
-
if not smolagents_available:
|
49 |
-
raise ImportError("You need to `pip install smolagents` to use this agent")
|
50 |
-
|
51 |
-
if tools is None:
|
52 |
-
tools = [
|
53 |
-
"smolagents.DuckDuckGoSearchTool",
|
54 |
-
"smolagents.VisitWebpageTool",
|
55 |
-
"smolagents.PythonInterpreterTool",
|
56 |
-
]
|
57 |
-
|
58 |
-
mcp_tool = None
|
59 |
-
for tool in tools:
|
60 |
-
if "mcp" in tool:
|
61 |
-
mcp_tool = tool
|
62 |
-
tools.remove(tool)
|
63 |
-
imported_tools = import_and_wrap_tools(tools, wrap_tool_smolagents)
|
64 |
-
|
65 |
-
model = LiteLLMModel(
|
66 |
-
model_id=model_id,
|
67 |
-
api_base=api_base if api_base else None,
|
68 |
-
api_key=os.environ[api_key_var] if api_key_var else None,
|
69 |
-
)
|
70 |
-
|
71 |
-
if mcp_tool:
|
72 |
-
from mcp import StdioServerParameters
|
73 |
-
from smolagents import ToolCollection
|
74 |
-
|
75 |
-
# We could easily use any of the MCPs at https://github.com/modelcontextprotocol/servers
|
76 |
-
# or at https://glama.ai/mcp/servers
|
77 |
-
# or at https://smithery.ai/
|
78 |
-
server_parameters = StdioServerParameters(
|
79 |
-
command="docker",
|
80 |
-
args=["run", "-i", "--rm", mcp_tool],
|
81 |
-
env={**os.environ},
|
82 |
-
)
|
83 |
-
# https://huggingface.co/docs/smolagents/v1.10.0/en/reference/tools#smolagents.ToolCollection.from_mcp
|
84 |
-
with ToolCollection.from_mcp(server_parameters) as tool_collection:
|
85 |
-
agent = CodeAgent(
|
86 |
-
tools=imported_tools + tool_collection.tools,
|
87 |
-
prompt_templates={"system_prompt": SYSTEM_PROMPT},
|
88 |
-
model=model,
|
89 |
-
add_base_tools=False, # Turn this on if you want to let it run python code as it sees fit
|
90 |
-
)
|
91 |
-
agent.run(prompt)
|
92 |
-
else:
|
93 |
-
agent = CodeAgent(
|
94 |
-
tools=imported_tools,
|
95 |
-
prompt_templates={"system_prompt": SYSTEM_PROMPT},
|
96 |
-
model=model,
|
97 |
-
)
|
98 |
-
agent.run(prompt)
|
99 |
-
|
100 |
-
return agent
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/surf_spot_finder/cli.py
CHANGED
@@ -1,4 +1,5 @@
|
|
1 |
-
|
|
|
2 |
|
3 |
from fire import Fire
|
4 |
from loguru import logger
|
@@ -6,88 +7,46 @@ from loguru import logger
|
|
6 |
from surf_spot_finder.config import (
|
7 |
Config,
|
8 |
)
|
9 |
-
from
|
10 |
-
from
|
11 |
-
from surf_spot_finder.tracing import get_tracer_provider, setup_tracing
|
12 |
|
13 |
|
14 |
@logger.catch(reraise=True)
|
15 |
def find_surf_spot(
|
16 |
-
|
17 |
-
date: Optional[str] = None,
|
18 |
-
max_driving_hours: Optional[int] = None,
|
19 |
-
model_id: Optional[str] = None,
|
20 |
-
agent_type: str = "smolagents",
|
21 |
-
api_key_var: Optional[str] = None,
|
22 |
-
input_prompt_template: str = INPUT_PROMPT,
|
23 |
-
json_tracer: bool = True,
|
24 |
-
api_base: Optional[str] = None,
|
25 |
-
tools: Optional[list[dict]] = None,
|
26 |
-
from_config: Optional[str] = None,
|
27 |
) -> str:
|
28 |
"""Find the best surf spot based on the given criteria.
|
29 |
|
30 |
Args:
|
31 |
-
|
32 |
-
|
33 |
-
date (str): The date to search for.
|
34 |
-
Required if `from_config` is not provided.
|
35 |
-
max_driving_hours (int): The maximum driving hours from the location.
|
36 |
-
Required if `from_config` is not provided.
|
37 |
-
model_id (str): The ID of the model to use.
|
38 |
-
Required if `from_config` is not provided.
|
39 |
|
40 |
-
If using `agent_type=smolagents`, use LiteLLM syntax (e.g., 'openai/o1', 'anthropic/claude-3-sonnet').
|
41 |
-
If using `agent_type={openai,openai_multi_agent}`, use OpenAI syntax (e.g., 'o1').
|
42 |
-
agent_type (str, optional): The type of agent to use.
|
43 |
-
Must be one of the supported types in [RUNNERS][surf_spot_finder.agents.RUNNERS].
|
44 |
-
api_key_var (Optional[str], optional): The name of the environment variable containing the API key.
|
45 |
-
input_prompt_template (str, optional): The template for the imput_prompt.
|
46 |
-
|
47 |
-
Must contain the following placeholders: `{LOCATION}`, `{MAX_DRIVING_HOURS}`, and `{DATE}`.
|
48 |
-
json_tracer (bool, optional): Whether to use the custom JSON file exporter.
|
49 |
-
api_base (Optional[str], optional): The base URL for the API.
|
50 |
-
from_config (Optional[str], optional): Path to a YAML config file.
|
51 |
-
|
52 |
-
If provided, all other arguments will be ignored.
|
53 |
"""
|
54 |
-
|
55 |
-
|
56 |
-
config = Config.from_yaml(from_config)
|
57 |
-
else:
|
58 |
-
config = Config(
|
59 |
-
location=location,
|
60 |
-
date=date,
|
61 |
-
max_driving_hours=max_driving_hours,
|
62 |
-
model_id=model_id,
|
63 |
-
agent_type=agent_type,
|
64 |
-
api_key_var=api_key_var,
|
65 |
-
prompt=input_prompt_template,
|
66 |
-
json_tracer=json_tracer,
|
67 |
-
api_base=api_base,
|
68 |
-
tools=tools,
|
69 |
-
)
|
70 |
|
71 |
logger.info("Setting up tracing")
|
72 |
-
tracer_provider, tracing_path = get_tracer_provider(
|
73 |
-
|
74 |
-
|
75 |
-
|
|
|
|
|
|
|
|
|
|
|
76 |
)
|
77 |
-
setup_tracing(tracer_provider, config.agent_type)
|
78 |
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
LOCATION=config.location,
|
84 |
-
MAX_DRIVING_HOURS=config.max_driving_hours,
|
85 |
-
DATE=config.date,
|
86 |
-
),
|
87 |
-
api_base=config.api_base,
|
88 |
-
api_key_var=config.api_key_var,
|
89 |
-
tools=config.tools,
|
90 |
)
|
|
|
|
|
|
|
|
|
|
|
91 |
return tracing_path
|
92 |
|
93 |
|
|
|
1 |
+
import yaml
|
2 |
+
from pathlib import Path
|
3 |
|
4 |
from fire import Fire
|
5 |
from loguru import logger
|
|
|
7 |
from surf_spot_finder.config import (
|
8 |
Config,
|
9 |
)
|
10 |
+
from any_agent import load_agent, run_agent
|
11 |
+
from any_agent.tracing import get_tracer_provider, setup_tracing
|
|
|
12 |
|
13 |
|
14 |
@logger.catch(reraise=True)
|
15 |
def find_surf_spot(
|
16 |
+
config_file: str,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
17 |
) -> str:
|
18 |
"""Find the best surf spot based on the given criteria.
|
19 |
|
20 |
Args:
|
21 |
+
config_file: Path to a YAML config file.
|
22 |
+
See [Config][surf_spot_finder.config.Config]
|
|
|
|
|
|
|
|
|
|
|
|
|
23 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
24 |
"""
|
25 |
+
logger.info(f"Loading {config_file}")
|
26 |
+
config = Config.model_validate(yaml.safe_load(Path(config_file).read_text()))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
27 |
|
28 |
logger.info("Setting up tracing")
|
29 |
+
tracer_provider, tracing_path = get_tracer_provider(project_name="surf-spot-finder")
|
30 |
+
setup_tracing(tracer_provider, config.framework)
|
31 |
+
|
32 |
+
logger.info(f"Loading {config.framework} agent")
|
33 |
+
logger.info(f"{config.managed_agents}")
|
34 |
+
agent = load_agent(
|
35 |
+
framework=config.framework,
|
36 |
+
main_agent=config.main_agent,
|
37 |
+
managed_agents=config.managed_agents,
|
38 |
)
|
|
|
39 |
|
40 |
+
query = config.input_prompt_template.format(
|
41 |
+
LOCATION=config.location,
|
42 |
+
MAX_DRIVING_HOURS=config.max_driving_hours,
|
43 |
+
DATE=config.date,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
44 |
)
|
45 |
+
logger.info(f"Running agent with query:\n{query}")
|
46 |
+
run_agent(agent, query)
|
47 |
+
|
48 |
+
logger.success("Done!")
|
49 |
+
|
50 |
return tracing_path
|
51 |
|
52 |
|
src/surf_spot_finder/config.py
CHANGED
@@ -1,8 +1,14 @@
|
|
1 |
-
from typing import Annotated
|
|
|
|
|
2 |
from pydantic import AfterValidator, BaseModel, FutureDatetime, PositiveInt
|
3 |
-
import yaml
|
4 |
|
5 |
-
|
|
|
|
|
|
|
|
|
|
|
6 |
|
7 |
|
8 |
def validate_prompt(value) -> str:
|
@@ -12,50 +18,15 @@ def validate_prompt(value) -> str:
|
|
12 |
return value
|
13 |
|
14 |
|
15 |
-
def validate_agent_type(value) -> str:
|
16 |
-
from surf_spot_finder.agents import validate_agent_type
|
17 |
-
|
18 |
-
validate_agent_type(value)
|
19 |
-
return value
|
20 |
-
|
21 |
-
|
22 |
class Config(BaseModel):
|
23 |
-
input_prompt_template: Annotated[str, AfterValidator(validate_prompt)] = (
|
24 |
-
INPUT_PROMPT
|
25 |
-
)
|
26 |
location: str
|
27 |
max_driving_hours: PositiveInt
|
28 |
date: FutureDatetime
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
def from_yaml(cls, yaml_path: str) -> "Config":
|
38 |
-
"""
|
39 |
-
Create a Config instance from a YAML file.
|
40 |
-
|
41 |
-
Args:
|
42 |
-
yaml_path: Path to the YAML configuration file
|
43 |
-
|
44 |
-
Returns:
|
45 |
-
Config: A new Config instance populated with values from the YAML file
|
46 |
-
"""
|
47 |
-
with open(yaml_path, "r") as f:
|
48 |
-
data = yaml.safe_load(f)
|
49 |
-
|
50 |
-
# Extract and flatten the nested structure
|
51 |
-
config_dict = {}
|
52 |
-
|
53 |
-
# Add input parameters
|
54 |
-
if "input" in data:
|
55 |
-
config_dict.update(data["input"])
|
56 |
-
|
57 |
-
# Add agent parameters
|
58 |
-
if "agent" in data:
|
59 |
-
config_dict.update(data["agent"])
|
60 |
-
# Create instance from the flattened dictionary
|
61 |
-
return cls(**config_dict)
|
|
|
1 |
+
from typing import Annotated
|
2 |
+
|
3 |
+
from any_agent.schema import AgentSchema
|
4 |
from pydantic import AfterValidator, BaseModel, FutureDatetime, PositiveInt
|
|
|
5 |
|
6 |
+
|
7 |
+
INPUT_PROMPT_TEMPLATE = """
|
8 |
+
According to the forecast, what will be the best spot to surf around {LOCATION},
|
9 |
+
in a {MAX_DRIVING_HOURS} hour driving radius,
|
10 |
+
at {DATE}?"
|
11 |
+
""".strip()
|
12 |
|
13 |
|
14 |
def validate_prompt(value) -> str:
|
|
|
18 |
return value
|
19 |
|
20 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
21 |
class Config(BaseModel):
|
|
|
|
|
|
|
22 |
location: str
|
23 |
max_driving_hours: PositiveInt
|
24 |
date: FutureDatetime
|
25 |
+
input_prompt_template: Annotated[str, AfterValidator(validate_prompt)] = (
|
26 |
+
INPUT_PROMPT_TEMPLATE
|
27 |
+
)
|
28 |
+
|
29 |
+
framework: str
|
30 |
+
|
31 |
+
main_agent: AgentSchema
|
32 |
+
managed_agents: list[AgentSchema] | None = None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/surf_spot_finder/{prompts β instructions}/__init__.py
RENAMED
File without changes
|
src/surf_spot_finder/{prompts β instructions}/openai.py
RENAMED
File without changes
|
src/surf_spot_finder/instructions/shared.py
ADDED
File without changes
|
src/surf_spot_finder/{prompts β instructions}/smolagents.py
RENAMED
File without changes
|
src/surf_spot_finder/prompts/shared.py
DELETED
@@ -1,5 +0,0 @@
|
|
1 |
-
INPUT_PROMPT = """
|
2 |
-
According to the forecast, what will be the best spot to surf around {LOCATION},
|
3 |
-
in a {MAX_DRIVING_HOURS} hour driving radius,
|
4 |
-
at {DATE}?"
|
5 |
-
""".strip()
|
|
|
|
|
|
|
|
|
|
|
|
src/surf_spot_finder/tools/__init__.py
CHANGED
@@ -1,17 +1,10 @@
|
|
1 |
from .openmeteo import get_wave_forecast, get_wind_forecast
|
2 |
from .openstreetmap import driving_hours_to_meters, get_area_lat_lon, get_surfing_spots
|
3 |
-
from .user_interaction import show_final_answer, show_plan, ask_user_verification
|
4 |
-
from .web_browsing import search_web, visit_webpage
|
5 |
|
6 |
__all__ = [
|
7 |
-
driving_hours_to_meters,
|
8 |
-
get_area_lat_lon,
|
9 |
-
get_surfing_spots,
|
10 |
-
get_wave_forecast,
|
11 |
-
get_wind_forecast,
|
12 |
-
search_web,
|
13 |
-
show_final_answer,
|
14 |
-
show_plan,
|
15 |
-
ask_user_verification,
|
16 |
-
visit_webpage,
|
17 |
]
|
|
|
1 |
from .openmeteo import get_wave_forecast, get_wind_forecast
|
2 |
from .openstreetmap import driving_hours_to_meters, get_area_lat_lon, get_surfing_spots
|
|
|
|
|
3 |
|
4 |
__all__ = [
|
5 |
+
"driving_hours_to_meters",
|
6 |
+
"get_area_lat_lon",
|
7 |
+
"get_surfing_spots",
|
8 |
+
"get_wave_forecast",
|
9 |
+
"get_wind_forecast",
|
|
|
|
|
|
|
|
|
|
|
10 |
]
|
src/surf_spot_finder/tools/user_interaction.py
DELETED
@@ -1,30 +0,0 @@
|
|
1 |
-
from loguru import logger
|
2 |
-
|
3 |
-
|
4 |
-
def show_plan(plan: str) -> None:
|
5 |
-
"""Show the current plan to the user.
|
6 |
-
|
7 |
-
Args:
|
8 |
-
plan: The current plan.
|
9 |
-
"""
|
10 |
-
logger.info(f"Current plan: {plan}")
|
11 |
-
return plan
|
12 |
-
|
13 |
-
|
14 |
-
def show_final_answer(answer: str) -> None:
|
15 |
-
"""Show the final answer to the user.
|
16 |
-
|
17 |
-
Args:
|
18 |
-
answer: The final answer.
|
19 |
-
"""
|
20 |
-
logger.info(f"Final answer: {answer}")
|
21 |
-
return answer
|
22 |
-
|
23 |
-
|
24 |
-
def ask_user_verification(query: str) -> str:
|
25 |
-
"""Asks user to verify the given `query`.
|
26 |
-
|
27 |
-
Args:
|
28 |
-
query: The question that requires verification.
|
29 |
-
"""
|
30 |
-
return input(f"{query} => Type your answer here:")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/surf_spot_finder/tools/web_browsing.py
DELETED
@@ -1,54 +0,0 @@
|
|
1 |
-
import re
|
2 |
-
|
3 |
-
import requests
|
4 |
-
from duckduckgo_search import DDGS
|
5 |
-
from markdownify import markdownify
|
6 |
-
from requests.exceptions import RequestException
|
7 |
-
|
8 |
-
|
9 |
-
def _truncate_content(content: str, max_length: int) -> str:
|
10 |
-
if len(content) <= max_length:
|
11 |
-
return content
|
12 |
-
else:
|
13 |
-
return (
|
14 |
-
content[: max_length // 2]
|
15 |
-
+ f"\n..._This content has been truncated to stay below {max_length} characters_...\n"
|
16 |
-
+ content[-max_length // 2 :]
|
17 |
-
)
|
18 |
-
|
19 |
-
|
20 |
-
def search_web(query: str) -> str:
|
21 |
-
"""Performs a duckduckgo web search based on your query (think a Google search) then returns the top search results.
|
22 |
-
|
23 |
-
Args:
|
24 |
-
query: The search query to perform.
|
25 |
-
|
26 |
-
Returns:
|
27 |
-
The top search results.
|
28 |
-
"""
|
29 |
-
ddgs = DDGS()
|
30 |
-
results = ddgs.text(query, max_results=10)
|
31 |
-
return "\n".join(
|
32 |
-
f"[{result['title']}]({result['href']})\n{result['body']}" for result in results
|
33 |
-
)
|
34 |
-
|
35 |
-
|
36 |
-
def visit_webpage(url: str) -> str:
|
37 |
-
"""Visits a webpage at the given url and reads its content as a markdown string. Use this to browse webpages.
|
38 |
-
|
39 |
-
Args:
|
40 |
-
url: The url of the webpage to visit.
|
41 |
-
"""
|
42 |
-
try:
|
43 |
-
response = requests.get(url)
|
44 |
-
response.raise_for_status()
|
45 |
-
|
46 |
-
markdown_content = markdownify(response.text).strip()
|
47 |
-
|
48 |
-
markdown_content = re.sub(r"\n{2,}", "\n", markdown_content)
|
49 |
-
|
50 |
-
return _truncate_content(markdown_content, 10000)
|
51 |
-
except RequestException as e:
|
52 |
-
return f"Error fetching the webpage: {str(e)}"
|
53 |
-
except Exception as e:
|
54 |
-
return f"An unexpected error occurred: {str(e)}"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/surf_spot_finder/tools/wrappers.py
DELETED
@@ -1,41 +0,0 @@
|
|
1 |
-
import inspect
|
2 |
-
import importlib
|
3 |
-
from collections.abc import Callable
|
4 |
-
|
5 |
-
|
6 |
-
def import_and_wrap_tools(tools: list[str], wrapper: Callable) -> list[Callable]:
|
7 |
-
imported_tools = []
|
8 |
-
for tool in tools:
|
9 |
-
module, func = tool.rsplit(".", 1)
|
10 |
-
module = importlib.import_module(module)
|
11 |
-
imported_tool = getattr(module, func)
|
12 |
-
if inspect.isclass(imported_tool):
|
13 |
-
imported_tool = imported_tool()
|
14 |
-
imported_tools.append(wrapper(imported_tool))
|
15 |
-
return imported_tools
|
16 |
-
|
17 |
-
|
18 |
-
def wrap_tool_openai(tool):
|
19 |
-
from agents import function_tool, FunctionTool
|
20 |
-
|
21 |
-
if not isinstance(tool, FunctionTool):
|
22 |
-
return function_tool(tool)
|
23 |
-
return tool
|
24 |
-
|
25 |
-
|
26 |
-
def wrap_tool_langchain(tool):
|
27 |
-
from langchain_core.tools import BaseTool
|
28 |
-
from langchain_core.tools import tool as langchain_tool
|
29 |
-
|
30 |
-
if not isinstance(tool, BaseTool):
|
31 |
-
return langchain_tool(tool)
|
32 |
-
return tool
|
33 |
-
|
34 |
-
|
35 |
-
def wrap_tool_smolagents(tool):
|
36 |
-
from smolagents import Tool, tool as smolagents_tool
|
37 |
-
|
38 |
-
if not isinstance(tool, Tool):
|
39 |
-
return smolagents_tool(tool)
|
40 |
-
|
41 |
-
return tool
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/surf_spot_finder/tracing.py
DELETED
@@ -1,114 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import json
|
3 |
-
from datetime import datetime
|
4 |
-
from opentelemetry.sdk.trace import TracerProvider
|
5 |
-
from opentelemetry.sdk.trace.export import SimpleSpanProcessor, SpanExporter
|
6 |
-
|
7 |
-
from surf_spot_finder.agents import AgentType
|
8 |
-
|
9 |
-
|
10 |
-
class JsonFileSpanExporter(SpanExporter):
|
11 |
-
def __init__(self, file_name: str):
|
12 |
-
self.file_name = file_name
|
13 |
-
# Initialize with an empty array if file doesn't exist
|
14 |
-
if not os.path.exists(self.file_name):
|
15 |
-
with open(self.file_name, "w") as f:
|
16 |
-
json.dump([], f)
|
17 |
-
|
18 |
-
def export(self, spans) -> None:
|
19 |
-
# Read existing spans
|
20 |
-
try:
|
21 |
-
with open(self.file_name, "r") as f:
|
22 |
-
all_spans = json.load(f)
|
23 |
-
except (json.JSONDecodeError, FileNotFoundError):
|
24 |
-
all_spans = []
|
25 |
-
|
26 |
-
# Add new spans
|
27 |
-
for span in spans:
|
28 |
-
try:
|
29 |
-
# Try to parse the span data from to_json() if it returns a string
|
30 |
-
span_data = json.loads(span.to_json())
|
31 |
-
except (json.JSONDecodeError, TypeError, AttributeError):
|
32 |
-
# If span.to_json() doesn't return valid JSON string
|
33 |
-
span_data = {"error": "Could not serialize span", "span_str": str(span)}
|
34 |
-
|
35 |
-
all_spans.append(span_data)
|
36 |
-
|
37 |
-
# Write all spans back to the file as a proper JSON array
|
38 |
-
with open(self.file_name, "w") as f:
|
39 |
-
json.dump(all_spans, f, indent=2)
|
40 |
-
|
41 |
-
def shutdown(self):
|
42 |
-
pass
|
43 |
-
|
44 |
-
|
45 |
-
def get_tracer_provider(
|
46 |
-
project_name: str,
|
47 |
-
json_tracer: bool,
|
48 |
-
agent_type: AgentType,
|
49 |
-
output_dir: str = "output",
|
50 |
-
) -> tuple[TracerProvider, str | None]:
|
51 |
-
"""
|
52 |
-
Create a tracer_provider based on the selected mode.
|
53 |
-
|
54 |
-
Args:
|
55 |
-
project_name: Name of the project for tracing
|
56 |
-
json_tracer: Whether to use the custom JSON file exporter (True) or Phoenix (False)
|
57 |
-
agent_type: The type of agent being used.
|
58 |
-
output_dir: The directory where the telemetry output will be stored.
|
59 |
-
Only used if `json_tracer=True`.
|
60 |
-
Defaults to "output".
|
61 |
-
|
62 |
-
Returns:
|
63 |
-
tracer_provider: The configured tracer provider
|
64 |
-
file_name: The name of the JSON file where telemetry will be stored
|
65 |
-
"""
|
66 |
-
if json_tracer:
|
67 |
-
if not os.path.exists(output_dir):
|
68 |
-
os.makedirs(output_dir)
|
69 |
-
timestamp = datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
|
70 |
-
|
71 |
-
tracer_provider = TracerProvider()
|
72 |
-
file_name = f"{output_dir}/{agent_type}-{project_name}-{timestamp}.json"
|
73 |
-
json_file_exporter = JsonFileSpanExporter(file_name=file_name)
|
74 |
-
span_processor = SimpleSpanProcessor(json_file_exporter)
|
75 |
-
tracer_provider.add_span_processor(span_processor)
|
76 |
-
else:
|
77 |
-
from phoenix.otel import register
|
78 |
-
|
79 |
-
tracer_provider = register(
|
80 |
-
project_name=project_name, set_global_tracer_provider=True
|
81 |
-
)
|
82 |
-
file_name = None
|
83 |
-
|
84 |
-
return tracer_provider, file_name
|
85 |
-
|
86 |
-
|
87 |
-
def setup_tracing(tracer_provider: TracerProvider, agent_type: str) -> None:
|
88 |
-
"""Setup tracing for `agent_type` by instrumenting `trace_provider`.
|
89 |
-
|
90 |
-
Args:
|
91 |
-
tracer_provider (TracerProvider): The configured tracer provider from
|
92 |
-
[get_tracer_provider][surf_spot_finder.tracing.get_tracer_provider].
|
93 |
-
agent_type (str): The type of agent being used.
|
94 |
-
Must be one of the supported types in [RUNNERS][surf_spot_finder.agents.RUNNERS].
|
95 |
-
"""
|
96 |
-
from surf_spot_finder.agents import validate_agent_type
|
97 |
-
|
98 |
-
validate_agent_type(agent_type)
|
99 |
-
|
100 |
-
if "openai" in agent_type:
|
101 |
-
from openinference.instrumentation.openai_agents import (
|
102 |
-
OpenAIAgentsInstrumentor as Instrumentor,
|
103 |
-
)
|
104 |
-
elif agent_type == "smolagents":
|
105 |
-
from openinference.instrumentation.smolagents import (
|
106 |
-
SmolagentsInstrumentor as Instrumentor,
|
107 |
-
)
|
108 |
-
elif agent_type == "langchain":
|
109 |
-
from openinference.instrumentation.langchain import (
|
110 |
-
LangChainInstrumentor as Instrumentor,
|
111 |
-
)
|
112 |
-
else:
|
113 |
-
raise ValueError(f"Unsupported agent type: {agent_type}")
|
114 |
-
Instrumentor().instrument(tracer_provider=tracer_provider)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
tests/integration/agents/test_integration_openai.py
DELETED
@@ -1,34 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import pytest
|
3 |
-
|
4 |
-
from surf_spot_finder.agents.openai import run_openai_agent
|
5 |
-
|
6 |
-
INTEGRATION_MODEL = "gpt-4o-mini"
|
7 |
-
API_KEY_VAR = "OPENAI_API_KEY"
|
8 |
-
|
9 |
-
|
10 |
-
@pytest.mark.skipif(
|
11 |
-
"INTEGRATION_TESTS" not in os.environ,
|
12 |
-
reason="Integration tests require INTEGRATION_TESTS env var",
|
13 |
-
)
|
14 |
-
def test_openai_real_execution():
|
15 |
-
"""
|
16 |
-
Tests the actual execution of the agent against real APIs.
|
17 |
-
|
18 |
-
WARNING: This will make actual API calls and incur costs.
|
19 |
-
Only run when explicitly needed for full system testing.
|
20 |
-
|
21 |
-
Requires:
|
22 |
-
- OPENAI_API_KEY in environment variables
|
23 |
-
- INTEGRATION_TESTS env var to be set
|
24 |
-
"""
|
25 |
-
from agents import RunResult, ToolCallItem
|
26 |
-
|
27 |
-
result = run_openai_agent(
|
28 |
-
INTEGRATION_MODEL,
|
29 |
-
"What will be the best surf spot around Vigo, in a 2 hour driving radius, tomorrow?",
|
30 |
-
api_key_var=API_KEY_VAR,
|
31 |
-
)
|
32 |
-
|
33 |
-
assert isinstance(result, RunResult)
|
34 |
-
assert any(isinstance(item, ToolCallItem) for item in result.new_items)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
tests/integration/agents/test_integration_smolagents.py
DELETED
@@ -1,67 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import pytest
|
3 |
-
from unittest.mock import patch
|
4 |
-
|
5 |
-
from surf_spot_finder.agents.smolagents import run_smolagent
|
6 |
-
|
7 |
-
# TODO I'd rather not use openai
|
8 |
-
INTEGRATION_MODEL = "openai/gpt-3.5-turbo"
|
9 |
-
API_KEY_VAR = "OPENAI_API_KEY"
|
10 |
-
|
11 |
-
|
12 |
-
@pytest.mark.skipif(
|
13 |
-
"INTEGRATION_TESTS" not in os.environ,
|
14 |
-
reason="Integration tests require INTEGRATION_TESTS env var",
|
15 |
-
)
|
16 |
-
def test_smolagent_integration():
|
17 |
-
"""
|
18 |
-
Full integration test of the smolagent functionality.
|
19 |
-
|
20 |
-
Requires:
|
21 |
-
- Docker to be running
|
22 |
-
- OPENAI_API_KEY in environment variables
|
23 |
-
- INTEGRATION_TESTS env var to be set
|
24 |
-
"""
|
25 |
-
with patch("smolagents.CodeAgent") as MockCodeAgent:
|
26 |
-
# Create a mock agent that returns itself from run()
|
27 |
-
mock_agent = MockCodeAgent.return_value
|
28 |
-
mock_agent.run.return_value = mock_agent
|
29 |
-
|
30 |
-
# Run the agent
|
31 |
-
result = run_smolagent(
|
32 |
-
INTEGRATION_MODEL,
|
33 |
-
"Find popular surf spots in California",
|
34 |
-
api_key_var=API_KEY_VAR,
|
35 |
-
)
|
36 |
-
|
37 |
-
# Verify the agent was created and run
|
38 |
-
MockCodeAgent.assert_called_once()
|
39 |
-
mock_agent.run.assert_called_once_with("Find popular surf spots in California")
|
40 |
-
assert result is mock_agent
|
41 |
-
|
42 |
-
|
43 |
-
@pytest.mark.skipif(
|
44 |
-
"INTEGRATION_TESTS" not in os.environ,
|
45 |
-
reason="Full integration tests require INTEGRATION_TESTS env var",
|
46 |
-
)
|
47 |
-
def test_smolagent_real_execution():
|
48 |
-
"""
|
49 |
-
Tests the actual execution of the agent against real APIs.
|
50 |
-
|
51 |
-
WARNING: This will make actual API calls and incur costs.
|
52 |
-
Only run when explicitly needed for full system testing.
|
53 |
-
|
54 |
-
Requires:
|
55 |
-
- Docker to be running
|
56 |
-
- OPENAI_API_KEY in environment variables
|
57 |
-
- INTEGRATION_TESTS env var to be set
|
58 |
-
"""
|
59 |
-
# Run with a simple, inexpensive request
|
60 |
-
agent = run_smolagent(
|
61 |
-
INTEGRATION_MODEL,
|
62 |
-
"What are three popular surf spots in California?",
|
63 |
-
api_key_var=API_KEY_VAR,
|
64 |
-
)
|
65 |
-
|
66 |
-
# Basic verification that we got an agent back
|
67 |
-
assert agent is not None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
tests/unit/agents/test_unit_langchain.py
DELETED
@@ -1,33 +0,0 @@
|
|
1 |
-
from unittest.mock import patch, MagicMock
|
2 |
-
|
3 |
-
from surf_spot_finder.agents.langchain import (
|
4 |
-
run_lanchain_agent,
|
5 |
-
)
|
6 |
-
from surf_spot_finder.tools import (
|
7 |
-
search_web,
|
8 |
-
visit_webpage,
|
9 |
-
)
|
10 |
-
|
11 |
-
|
12 |
-
def test_run_langchain_agent_default():
|
13 |
-
model_mock = MagicMock()
|
14 |
-
create_mock = MagicMock()
|
15 |
-
agent_mock = MagicMock()
|
16 |
-
create_mock.return_value = agent_mock
|
17 |
-
memory_mock = MagicMock()
|
18 |
-
tool_mock = MagicMock()
|
19 |
-
|
20 |
-
with (
|
21 |
-
patch("surf_spot_finder.agents.langchain.create_react_agent", create_mock),
|
22 |
-
patch("surf_spot_finder.agents.langchain.init_chat_model", model_mock),
|
23 |
-
patch("surf_spot_finder.agents.langchain.MemorySaver", memory_mock),
|
24 |
-
patch("surf_spot_finder.agents.langchain.tool", tool_mock),
|
25 |
-
):
|
26 |
-
run_lanchain_agent("gpt-4o", "Test prompt")
|
27 |
-
model_mock.assert_called_once_with("gpt-4o")
|
28 |
-
create_mock.assert_called_once_with(
|
29 |
-
model=model_mock.return_value,
|
30 |
-
tools=[tool_mock(search_web), tool_mock(visit_webpage)],
|
31 |
-
checkpointer=memory_mock.return_value,
|
32 |
-
)
|
33 |
-
agent_mock.stream.assert_called_once()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
tests/unit/agents/test_unit_openai.py
DELETED
@@ -1,110 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import pytest
|
3 |
-
from unittest.mock import patch, MagicMock, ANY
|
4 |
-
|
5 |
-
from surf_spot_finder.agents.openai import (
|
6 |
-
run_openai_agent,
|
7 |
-
run_openai_multi_agent,
|
8 |
-
)
|
9 |
-
from surf_spot_finder.tools import (
|
10 |
-
show_final_answer,
|
11 |
-
show_plan,
|
12 |
-
ask_user_verification,
|
13 |
-
search_web,
|
14 |
-
visit_webpage,
|
15 |
-
)
|
16 |
-
from surf_spot_finder.prompts.openai import (
|
17 |
-
SINGLE_AGENT_SYSTEM_PROMPT,
|
18 |
-
MULTI_AGENT_SYSTEM_PROMPT,
|
19 |
-
)
|
20 |
-
|
21 |
-
|
22 |
-
def test_run_openai_agent_default():
|
23 |
-
mock_agent = MagicMock()
|
24 |
-
|
25 |
-
with (
|
26 |
-
patch("surf_spot_finder.agents.openai.Agent", mock_agent),
|
27 |
-
patch("surf_spot_finder.agents.openai.Runner", MagicMock()),
|
28 |
-
):
|
29 |
-
run_openai_agent("gpt-4o", "Test prompt")
|
30 |
-
mock_agent.assert_called_once_with(
|
31 |
-
model="gpt-4o",
|
32 |
-
instructions=SINGLE_AGENT_SYSTEM_PROMPT,
|
33 |
-
name="surf-spot-finder",
|
34 |
-
tools=ANY,
|
35 |
-
)
|
36 |
-
|
37 |
-
|
38 |
-
def test_run_openai_agent_base_url_and_api_key_var():
|
39 |
-
async_openai_mock = MagicMock()
|
40 |
-
openai_chat_completions_model = MagicMock()
|
41 |
-
with (
|
42 |
-
patch("surf_spot_finder.agents.openai.Agent", MagicMock()),
|
43 |
-
patch("surf_spot_finder.agents.openai.Runner", MagicMock()),
|
44 |
-
patch("surf_spot_finder.agents.openai.AsyncOpenAI", async_openai_mock),
|
45 |
-
patch(
|
46 |
-
"surf_spot_finder.agents.openai.OpenAIChatCompletionsModel",
|
47 |
-
openai_chat_completions_model,
|
48 |
-
),
|
49 |
-
patch.dict(os.environ, {"TEST_API_KEY": "test-key-12345"}),
|
50 |
-
):
|
51 |
-
run_openai_agent(
|
52 |
-
"gpt-4o", "Test prompt", api_base="FOO", api_key_var="TEST_API_KEY"
|
53 |
-
)
|
54 |
-
async_openai_mock.assert_called_once_with(
|
55 |
-
api_key="test-key-12345",
|
56 |
-
base_url="FOO",
|
57 |
-
)
|
58 |
-
openai_chat_completions_model.assert_called_once()
|
59 |
-
|
60 |
-
|
61 |
-
def test_run_openai_environment_error():
|
62 |
-
with patch.dict(os.environ, {}, clear=True):
|
63 |
-
with pytest.raises(KeyError, match="MISSING_KEY"):
|
64 |
-
run_openai_agent(
|
65 |
-
"test-model", "Test prompt", api_base="FOO", api_key_var="MISSING_KEY"
|
66 |
-
)
|
67 |
-
|
68 |
-
|
69 |
-
def test_run_openai_multiagent():
|
70 |
-
mock_agent = MagicMock()
|
71 |
-
mock_function_tool = MagicMock()
|
72 |
-
|
73 |
-
with (
|
74 |
-
patch("surf_spot_finder.agents.openai.Agent", mock_agent),
|
75 |
-
patch("surf_spot_finder.agents.openai.Runner", MagicMock()),
|
76 |
-
patch("surf_spot_finder.agents.openai.function_tool", mock_function_tool),
|
77 |
-
):
|
78 |
-
run_openai_multi_agent("gpt-4o", "Test prompt")
|
79 |
-
mock_agent.assert_any_call(
|
80 |
-
model="gpt-4o",
|
81 |
-
instructions="Interact with the user by showing information and asking for verification.",
|
82 |
-
name="user-verification-agent",
|
83 |
-
tools=[
|
84 |
-
mock_function_tool(show_plan),
|
85 |
-
mock_function_tool(ask_user_verification),
|
86 |
-
],
|
87 |
-
)
|
88 |
-
|
89 |
-
mock_agent.assert_any_call(
|
90 |
-
model="gpt-4o",
|
91 |
-
instructions="Find relevant information about the provided task by using your tools.",
|
92 |
-
name="search-web-agent",
|
93 |
-
tools=[mock_function_tool(search_web), mock_function_tool(visit_webpage)],
|
94 |
-
)
|
95 |
-
|
96 |
-
mock_agent.assert_any_call(
|
97 |
-
model="gpt-4o",
|
98 |
-
instructions="Communicate the final answer to the user.",
|
99 |
-
name="communication-agent",
|
100 |
-
tools=[mock_function_tool(show_final_answer)],
|
101 |
-
)
|
102 |
-
|
103 |
-
mock_agent.assert_any_call(
|
104 |
-
model="gpt-4o",
|
105 |
-
instructions=MULTI_AGENT_SYSTEM_PROMPT,
|
106 |
-
name="surf-spot-finder",
|
107 |
-
# TODO: add more elaborated checks
|
108 |
-
handoffs=ANY,
|
109 |
-
tools=ANY,
|
110 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
tests/unit/agents/test_unit_smolagents.py
DELETED
@@ -1,71 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import pytest
|
3 |
-
from unittest.mock import patch, MagicMock
|
4 |
-
import contextlib
|
5 |
-
|
6 |
-
from surf_spot_finder.agents.smolagents import run_smolagent
|
7 |
-
|
8 |
-
|
9 |
-
@pytest.fixture
|
10 |
-
def common_patches():
|
11 |
-
litellm_model_mock = MagicMock()
|
12 |
-
code_agent_mock = MagicMock()
|
13 |
-
patch_context = contextlib.ExitStack()
|
14 |
-
patch_context.enter_context(
|
15 |
-
patch("surf_spot_finder.agents.smolagents.CodeAgent", code_agent_mock)
|
16 |
-
)
|
17 |
-
patch_context.enter_context(
|
18 |
-
patch("surf_spot_finder.agents.smolagents.LiteLLMModel", litellm_model_mock)
|
19 |
-
)
|
20 |
-
yield patch_context, litellm_model_mock, code_agent_mock
|
21 |
-
patch_context.close()
|
22 |
-
|
23 |
-
|
24 |
-
def test_run_smolagent_with_api_key_var(common_patches):
|
25 |
-
patch_context, litellm_model_mock, code_agent_mock = common_patches
|
26 |
-
|
27 |
-
with patch_context, patch.dict(os.environ, {"TEST_API_KEY": "test-key-12345"}):
|
28 |
-
run_smolagent("openai/gpt-4", "Test prompt", api_key_var="TEST_API_KEY")
|
29 |
-
|
30 |
-
litellm_model_mock.assert_called()
|
31 |
-
model_call_kwargs = litellm_model_mock.call_args[1]
|
32 |
-
assert model_call_kwargs["model_id"] == "openai/gpt-4"
|
33 |
-
assert model_call_kwargs["api_key"] == "test-key-12345"
|
34 |
-
assert model_call_kwargs["api_base"] is None
|
35 |
-
|
36 |
-
code_agent_mock.assert_called_once()
|
37 |
-
code_agent_mock.return_value.run.assert_called_once_with("Test prompt")
|
38 |
-
|
39 |
-
|
40 |
-
def test_run_smolagent_with_custom_api_base(common_patches):
|
41 |
-
patch_context, litellm_model_mock, *_ = common_patches
|
42 |
-
|
43 |
-
with patch_context, patch.dict(os.environ, {"TEST_API_KEY": "test-key-12345"}):
|
44 |
-
run_smolagent(
|
45 |
-
"anthropic/claude-3-sonnet",
|
46 |
-
"Test prompt",
|
47 |
-
api_key_var="TEST_API_KEY",
|
48 |
-
api_base="https://custom-api.example.com",
|
49 |
-
)
|
50 |
-
last_call = litellm_model_mock.call_args_list[-1]
|
51 |
-
|
52 |
-
assert last_call[1]["model_id"] == "anthropic/claude-3-sonnet"
|
53 |
-
assert last_call[1]["api_key"] == "test-key-12345"
|
54 |
-
assert last_call[1]["api_base"] == "https://custom-api.example.com"
|
55 |
-
|
56 |
-
|
57 |
-
def test_run_smolagent_without_api_key(common_patches):
|
58 |
-
patch_context, litellm_model_mock, *_ = common_patches
|
59 |
-
|
60 |
-
with patch_context:
|
61 |
-
run_smolagent("ollama_chat/deepseek-r1", "Test prompt")
|
62 |
-
|
63 |
-
last_call = litellm_model_mock.call_args_list[-1]
|
64 |
-
assert last_call[1]["model_id"] == "ollama_chat/deepseek-r1"
|
65 |
-
assert last_call[1]["api_key"] is None
|
66 |
-
|
67 |
-
|
68 |
-
def test_run_smolagent_environment_error():
|
69 |
-
with patch.dict(os.environ, {}, clear=True):
|
70 |
-
with pytest.raises(KeyError, match="MISSING_KEY"):
|
71 |
-
run_smolagent("test-model", "Test prompt", api_key_var="MISSING_KEY")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
tests/unit/test_unit_tracing.py
DELETED
@@ -1,47 +0,0 @@
|
|
1 |
-
from unittest.mock import patch, MagicMock
|
2 |
-
|
3 |
-
import pytest
|
4 |
-
|
5 |
-
from surf_spot_finder.agents import AgentType
|
6 |
-
from surf_spot_finder.tracing import get_tracer_provider, setup_tracing
|
7 |
-
|
8 |
-
|
9 |
-
@pytest.mark.parametrize("json_tracer", [True, False])
|
10 |
-
def test_get_tracer_provider(tmp_path, json_tracer):
|
11 |
-
mock_tracer_provider = MagicMock()
|
12 |
-
mock_register = MagicMock()
|
13 |
-
|
14 |
-
with (
|
15 |
-
patch("surf_spot_finder.tracing.TracerProvider", mock_tracer_provider),
|
16 |
-
patch("phoenix.otel.register", mock_register),
|
17 |
-
):
|
18 |
-
get_tracer_provider(
|
19 |
-
project_name="test_project",
|
20 |
-
json_tracer=json_tracer,
|
21 |
-
agent_type=AgentType.SMOLAGENTS,
|
22 |
-
output_dir=tmp_path / "telemetry",
|
23 |
-
)
|
24 |
-
assert (tmp_path / "telemetry").exists() == json_tracer
|
25 |
-
if not json_tracer:
|
26 |
-
mock_register.assert_called_once_with(
|
27 |
-
project_name="test_project", set_global_tracer_provider=True
|
28 |
-
)
|
29 |
-
|
30 |
-
|
31 |
-
@pytest.mark.parametrize(
|
32 |
-
"agent_type,instrumentor",
|
33 |
-
[
|
34 |
-
("openai", "openai_agents.OpenAIAgentsInstrumentor"),
|
35 |
-
("openai_multi_agent", "openai_agents.OpenAIAgentsInstrumentor"),
|
36 |
-
("smolagents", "smolagents.SmolagentsInstrumentor"),
|
37 |
-
],
|
38 |
-
)
|
39 |
-
def test_setup_tracing(agent_type, instrumentor):
|
40 |
-
with patch(f"openinference.instrumentation.{instrumentor}") as mock_instrumentor:
|
41 |
-
setup_tracing(MagicMock(), agent_type)
|
42 |
-
mock_instrumentor.assert_called_once()
|
43 |
-
|
44 |
-
|
45 |
-
def test_invalid_agent_type():
|
46 |
-
with pytest.raises(ValueError, match="agent_type must be one of"):
|
47 |
-
setup_tracing(MagicMock(), "invalid_agent_type")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|