Spaces:
Runtime error
Runtime error
Complete up to external env config
Browse files- .gitignore +3 -0
- app.py +48 -5
- retriever.py +35 -0
- tools.py +87 -0
.gitignore
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
.env
|
2 |
+
__pycache__
|
3 |
+
*.pyc
|
app.py
CHANGED
@@ -1,9 +1,52 @@
|
|
1 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
|
|
|
3 |
|
4 |
-
|
5 |
-
|
|
|
6 |
|
|
|
7 |
|
8 |
-
|
9 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Integrates all components into an agent
|
2 |
+
# import gradio as gr
|
3 |
+
import asyncio
|
4 |
+
from llama_index.core.workflow import Context
|
5 |
+
from llama_index.core.agent.workflow import AgentWorkflow, ToolCallResult, AgentStream
|
6 |
+
from llama_index.llms.huggingface_api import HuggingFaceInferenceAPI
|
7 |
+
from retriever import guest_info_retriever
|
8 |
+
from tools import get_weather_info, get_hub_stats, google_search
|
9 |
+
from dotenv import load_dotenv
|
10 |
|
11 |
+
llm = HuggingFaceInferenceAPI(model_name="Qwen/Qwen2.5-Coder-32B-Instruct")
|
12 |
|
13 |
+
alfred = AgentWorkflow.from_tools_or_functions(
|
14 |
+
[guest_info_retriever, get_weather_info, get_hub_stats, google_search], llm=llm
|
15 |
+
)
|
16 |
|
17 |
+
ctx = Context(alfred)
|
18 |
|
19 |
+
|
20 |
+
async def main():
|
21 |
+
handler = alfred.run(
|
22 |
+
"Tell me about Lady Ada Lovelace.",
|
23 |
+
ctx=ctx,
|
24 |
+
)
|
25 |
+
async for ev in handler.stream_events():
|
26 |
+
if isinstance(ev, ToolCallResult):
|
27 |
+
print("")
|
28 |
+
print("Called tool: ", ev.tool_name, ev.tool_kwargs, "=>", ev.tool_output)
|
29 |
+
elif isinstance(ev, AgentStream): # showing the thought process
|
30 |
+
print(ev.delta, end="", flush=True)
|
31 |
+
print("🎩 Alfred's Response:")
|
32 |
+
response = await handler
|
33 |
+
print(response)
|
34 |
+
|
35 |
+
handler2 = alfred.run("What projects is she currently working on?", ctx=ctx)
|
36 |
+
async for ev in handler2.stream_events():
|
37 |
+
if isinstance(ev, ToolCallResult):
|
38 |
+
print("")
|
39 |
+
print("Called tool: ", ev.tool_name, ev.tool_kwargs, "=>", ev.tool_output)
|
40 |
+
elif isinstance(ev, AgentStream): # showing the thought process
|
41 |
+
print(ev.delta, end="", flush=True)
|
42 |
+
print("🎩 Alfred's Second Response:")
|
43 |
+
response2 = await handler2
|
44 |
+
print(response2)
|
45 |
+
|
46 |
+
|
47 |
+
# demo = gr.Interface(fn=greet, inputs="text", outputs="text")
|
48 |
+
# demo.launch()
|
49 |
+
|
50 |
+
if __name__ == "__main__":
|
51 |
+
load_dotenv()
|
52 |
+
asyncio.run(main())
|
retriever.py
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Implements retrieval functions to support knowledge access.
|
2 |
+
|
3 |
+
import datasets
|
4 |
+
from llama_index.core.schema import Document
|
5 |
+
from llama_index.retrievers.bm25 import BM25Retriever
|
6 |
+
|
7 |
+
# Load dataset
|
8 |
+
guest_dataset = datasets.load_dataset("agents-course/unit3-invitees", split="train")
|
9 |
+
|
10 |
+
# Convert dataset entries into Document objects
|
11 |
+
docs = [
|
12 |
+
Document(
|
13 |
+
text="\n".join(
|
14 |
+
[
|
15 |
+
f"Name: {guest['name']}",
|
16 |
+
f"Relation: {guest['relation']}",
|
17 |
+
f"Description: {guest['description']}",
|
18 |
+
f"Email: {guest['email']}",
|
19 |
+
]
|
20 |
+
),
|
21 |
+
metadata={"name": guest["name"]},
|
22 |
+
)
|
23 |
+
for guest in guest_dataset
|
24 |
+
]
|
25 |
+
|
26 |
+
bm25_retriever = BM25Retriever.from_defaults(nodes=docs)
|
27 |
+
|
28 |
+
|
29 |
+
def guest_info_retriever(query: str) -> str:
|
30 |
+
"""Retrieves detailed info about gala guests based on their name or relation"""
|
31 |
+
results = bm25_retriever.retrieve(query)
|
32 |
+
if results:
|
33 |
+
return "\n\n".join([doc.text for doc in results[:3]])
|
34 |
+
else:
|
35 |
+
return "No matching guest information found."
|
tools.py
ADDED
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Provides auxiliary tools for agent
|
2 |
+
from llama_index.tools.duckduckgo import DuckDuckGoSearchToolSpec
|
3 |
+
import random
|
4 |
+
from huggingface_hub import list_models
|
5 |
+
import aiohttp
|
6 |
+
import os
|
7 |
+
import json
|
8 |
+
from dotenv import load_dotenv
|
9 |
+
import asyncio
|
10 |
+
|
11 |
+
tool_spec = DuckDuckGoSearchToolSpec()
|
12 |
+
|
13 |
+
search_tool = tool_spec.duckduckgo_full_search
|
14 |
+
|
15 |
+
|
16 |
+
async def google_search(query: str, num_results: int = 5) -> str:
|
17 |
+
"""
|
18 |
+
Args:
|
19 |
+
query: Search query
|
20 |
+
num_results: Max results to return
|
21 |
+
Returns:
|
22 |
+
dict: JSON response from Google API.
|
23 |
+
"""
|
24 |
+
api_key = os.getenv("GOOGLE_API_KEY")
|
25 |
+
cse_id = os.getenv("GOOGLE_CSE_ID")
|
26 |
+
|
27 |
+
if not api_key or not cse_id:
|
28 |
+
raise ValueError(
|
29 |
+
"GOOGLE_API_KEY and GOOGLE_CSE_ID must be set in environment variables."
|
30 |
+
)
|
31 |
+
|
32 |
+
url = "https://www.googleapis.com/customsearch/v1"
|
33 |
+
params = {"key": api_key, "cx": cse_id, "q": query}
|
34 |
+
|
35 |
+
async with aiohttp.ClientSession() as session:
|
36 |
+
async with session.get(url, params=params) as response:
|
37 |
+
response.raise_for_status()
|
38 |
+
data = await response.json()
|
39 |
+
results = "Web Search results:\n\n" + "\n\n".join(
|
40 |
+
[
|
41 |
+
f"Link:{result['link']}\nTitle:{result['title']}\nSnippet:{result['snippet']}"
|
42 |
+
for result in data["items"][:num_results]
|
43 |
+
]
|
44 |
+
)
|
45 |
+
print(results)
|
46 |
+
return results
|
47 |
+
|
48 |
+
|
49 |
+
def get_weather_info(location: str) -> str:
|
50 |
+
"""Fetches dummy weather information for a given location"""
|
51 |
+
# Dummy weather data
|
52 |
+
weather_conditions = [
|
53 |
+
{"condition": "Rainy", "temp_c": 15},
|
54 |
+
{"condition": "Clear", "temp_c": 25},
|
55 |
+
{"condition": "Windy", "temp_c": 20},
|
56 |
+
]
|
57 |
+
# Randomly select a weather condition
|
58 |
+
data = random.choice(weather_conditions)
|
59 |
+
return f"Weather in {location}: {data['condition']}, {data['temp_c']}°C"
|
60 |
+
|
61 |
+
|
62 |
+
def get_hub_stats(author: str) -> str:
|
63 |
+
"""Fetches the most downloaded model from a specific author on the Hugging Face Hub."""
|
64 |
+
try:
|
65 |
+
# List models from the specified author, sorted by downloads
|
66 |
+
models = list(
|
67 |
+
list_models(author=author, sort="downloads", direction=-1, limit=1)
|
68 |
+
)
|
69 |
+
|
70 |
+
if models:
|
71 |
+
model = models[0]
|
72 |
+
return f"The most downloaded model by {author} is {model.id} with {model.downloads:,} downloads."
|
73 |
+
else:
|
74 |
+
return f"No models found for author {author}."
|
75 |
+
except Exception as e:
|
76 |
+
return f"Error fetching models for {author}: {str(e)}"
|
77 |
+
|
78 |
+
|
79 |
+
async def main():
|
80 |
+
response = await google_search("Who's the current President of France 2025?")
|
81 |
+
print(response)
|
82 |
+
|
83 |
+
|
84 |
+
if __name__ == "__main__":
|
85 |
+
load_dotenv()
|
86 |
+
asyncio.run(main())
|
87 |
+
# print(get_hub_stats("facebook"))
|