mbudisic commited on
Commit
72b38a2
·
1 Parent(s): d3e86a1

Agents now work in chainlit

Browse files
app.py CHANGED
@@ -1,8 +1,11 @@
 
1
  import asyncio
2
  import json
3
  import os
 
4
  from dataclasses import dataclass
5
- from typing import Any, Dict, List
 
6
 
7
  import chainlit as cl
8
  from dotenv import load_dotenv
@@ -14,20 +17,59 @@ from langchain_openai.embeddings import OpenAIEmbeddings
14
  from langchain_qdrant import QdrantVectorStore
15
  from qdrant_client import QdrantClient
16
 
 
 
 
 
 
 
17
  import pstuts_rag.datastore
18
  import pstuts_rag.rag
 
 
 
19
  from pstuts_rag.loader import load_json_files
 
 
 
 
 
 
 
 
 
 
 
 
 
20
 
21
 
22
  @dataclass
23
  class ApplicationParameters:
 
 
 
 
 
 
 
 
 
 
24
  filename = [f"data/{f}.json" for f in ["dev"]]
25
  embedding_model = "text-embedding-3-small"
26
  n_context_docs = 2
27
- llm_model = "gpt-4.1-mini"
28
 
29
 
30
  def set_api_key_if_not_present(key_name, prompt_message=""):
 
 
 
 
 
 
 
31
  if len(prompt_message) == 0:
32
  prompt_message = key_name
33
  if key_name not in os.environ or not os.environ[key_name]:
@@ -35,6 +77,25 @@ def set_api_key_if_not_present(key_name, prompt_message=""):
35
 
36
 
37
  class ApplicationState:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
38
  embeddings: OpenAIEmbeddings = None
39
  docs: List[Document] = []
40
  qdrant_client: QdrantClient = None
@@ -44,82 +105,284 @@ class ApplicationState:
44
  llm: BaseChatModel
45
  rag_chain: Runnable
46
 
 
 
 
 
 
47
  hasLoaded: asyncio.Event = asyncio.Event()
48
  pointsLoaded: int = 0
49
 
50
  def __init__(self) -> None:
 
 
 
51
  load_dotenv()
52
  set_api_key_if_not_present("OPENAI_API_KEY")
 
 
 
 
 
 
53
 
54
 
55
- state = ApplicationState()
 
56
  params = ApplicationParameters()
 
 
 
 
 
 
57
 
 
 
 
 
 
58
 
59
- async def fill_the_db():
 
 
 
 
 
60
  if state.datastore_manager.count_docs() == 0:
61
  data: List[Dict[str, Any]] = await load_json_files(params.filename)
62
  state.pointsLoaded = await state.datastore_manager.populate_database(
63
  raw_docs=data
64
  )
65
  await cl.Message(
66
- content=f"✅ The database has been loaded with {state.pointsLoaded} elements!"
67
  ).send()
 
 
68
 
69
 
70
  async def build_the_chain():
71
- state.rag_factory = pstuts_rag.rag.RAGChainFactory(
72
- retriever=state.datastore_manager.get_retriever()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
73
  )
74
- state.llm = ChatOpenAI(model=params.llm_model, temperature=0)
75
- state.rag_chain = state.rag_factory.get_rag_chain(state.llm)
76
- pass
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
77
 
78
 
79
  @cl.on_chat_start
80
  async def on_chat_start():
81
- state.qdrant_client = QdrantClient(":memory:")
 
 
 
 
 
 
 
82
 
83
- state.datastore_manager = pstuts_rag.datastore.DatastoreManager(
84
- qdrant_client=state.qdrant_client, name="local_test"
 
 
 
 
85
  )
86
- asyncio.run(main=fill_the_db())
87
- asyncio.run(main=build_the_chain())
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
88
 
89
 
90
  @cl.on_message
91
  async def main(message: cl.Message):
92
- # Send a response back to the user
93
- msg = cl.Message(content="")
94
- response = await state.rag_chain.ainvoke({"question": message.content})
95
 
96
- text, references = pstuts_rag.rag.RAGChainFactory.unpack_references(
97
- response.content
98
- )
99
- if isinstance(text, str):
100
- for token in [char for char in text]:
101
- await msg.stream_token(token)
102
-
103
- await msg.send()
104
-
105
- references = json.loads(references)
106
- print(references)
107
-
108
- msg_references = [
109
- (
110
- f"Watch {ref["title"]} from timestamp "
111
- f"{round(ref["start"] // 60)}m:{round(ref["start"] % 60)}s",
112
- cl.Video(
113
- name=ref["title"],
114
- url=f"{ref["source"]}#t={ref["start"]}",
115
- display="side",
116
- ),
117
- )
118
- for ref in references
119
- ]
120
- await cl.Message(content="Related videos").send()
121
- for e in msg_references:
122
- await cl.Message(content=e[0], elements=[e[1]]).send()
123
 
124
 
125
  if __name__ == "__main__":
 
1
+ import requests
2
  import asyncio
3
  import json
4
  import os
5
+ import getpass
6
  from dataclasses import dataclass
7
+ from typing import Any, Dict, List, Tuple
8
+ import re
9
 
10
  import chainlit as cl
11
  from dotenv import load_dotenv
 
17
  from langchain_qdrant import QdrantVectorStore
18
  from qdrant_client import QdrantClient
19
 
20
+ from langchain_core.messages import HumanMessage, BaseMessage
21
+ import langgraph.graph
22
+
23
+ from pstuts_rag.agents import PsTutsTeamState, create_team_supervisor
24
+ from pstuts_rag.agent_tavily import create_tavily_node
25
+
26
  import pstuts_rag.datastore
27
  import pstuts_rag.rag
28
+
29
+ from pstuts_rag.agent_rag import create_rag_node
30
+
31
  from pstuts_rag.loader import load_json_files
32
+ from pstuts_rag.prompt_templates import SUPERVISOR_SYSTEM
33
+
34
+ import nest_asyncio
35
+ from uuid import uuid4
36
+
37
+ # Apply nested asyncio to enable nested event loops
38
+ nest_asyncio.apply()
39
+
40
+ # Generate a unique ID for this application instance
41
+ unique_id = uuid4().hex[0:8]
42
+
43
+ VIDEOARCHIVE = "VideoArchiveSearch"
44
+ ADOBEHELP = "AdobeHelp"
45
 
46
 
47
  @dataclass
48
  class ApplicationParameters:
49
+ """
50
+ Configuration parameters for the application.
51
+
52
+ Attributes:
53
+ filename: List of JSON file paths to load data from
54
+ embedding_model: Name of the OpenAI embedding model to use
55
+ n_context_docs: Number of context documents to retrieve
56
+ tool_calling_model: Name of the OpenAI model to use for tool calling
57
+ """
58
+
59
  filename = [f"data/{f}.json" for f in ["dev"]]
60
  embedding_model = "text-embedding-3-small"
61
  n_context_docs = 2
62
+ tool_calling_model = "gpt-4.1-mini"
63
 
64
 
65
  def set_api_key_if_not_present(key_name, prompt_message=""):
66
+ """
67
+ Sets an API key in the environment if it's not already present.
68
+
69
+ Args:
70
+ key_name: Name of the environment variable to set
71
+ prompt_message: Custom prompt message for getpass (defaults to key_name)
72
+ """
73
  if len(prompt_message) == 0:
74
  prompt_message = key_name
75
  if key_name not in os.environ or not os.environ[key_name]:
 
77
 
78
 
79
  class ApplicationState:
80
+ """
81
+ Maintains the state of the application and its components.
82
+
83
+ Attributes:
84
+ embeddings: OpenAI embeddings model for vector operations
85
+ docs: List of loaded documents
86
+ qdrant_client: Client for Qdrant vector database
87
+ vector_store: Vector store for document retrieval
88
+ datastore_manager: Manager for data storage and retrieval
89
+ rag_factory: Factory for creating RAG chains
90
+ llm: Language model instance
91
+ rag_chain: Retrieval-augmented generation chain
92
+ ai_graph: Compiled AI agent graph
93
+ ai_graph_sketch: State graph for AI agent orchestration
94
+ tasks: List of asyncio tasks
95
+ hasLoaded: Event to track when loading is complete
96
+ pointsLoaded: Number of data points loaded into the database
97
+ """
98
+
99
  embeddings: OpenAIEmbeddings = None
100
  docs: List[Document] = []
101
  qdrant_client: QdrantClient = None
 
105
  llm: BaseChatModel
106
  rag_chain: Runnable
107
 
108
+ ai_graph: Runnable
109
+ ai_graph_sketch: langgraph.graph.StateGraph
110
+
111
+ tasks: List[asyncio.Task] = []
112
+
113
  hasLoaded: asyncio.Event = asyncio.Event()
114
  pointsLoaded: int = 0
115
 
116
  def __init__(self) -> None:
117
+ """
118
+ Initialize the application state and set up environment variables.
119
+ """
120
  load_dotenv()
121
  set_api_key_if_not_present("OPENAI_API_KEY")
122
+ set_api_key_if_not_present("TAVILY_API_KEY")
123
+ os.environ["LANGCHAIN_TRACING_V2"] = "true"
124
+ os.environ["LANGCHAIN_PROJECT"] = (
125
+ f"AIE - MBUDISIC - HF - CERT - {unique_id}"
126
+ )
127
+ set_api_key_if_not_present("LANGCHAIN_API_KEY")
128
 
129
 
130
+ # Initialize global application state
131
+ app_state = ApplicationState()
132
  params = ApplicationParameters()
133
+ ai_state = PsTutsTeamState(
134
+ messages=[],
135
+ team_members=[VIDEOARCHIVE, ADOBEHELP],
136
+ next="START",
137
+ )
138
+
139
 
140
+ async def fill_the_db(
141
+ state: ApplicationState,
142
+ ):
143
+ """
144
+ Populates the vector database with document data if it's empty.
145
 
146
+ Args:
147
+ state: Application state containing the datastore manager
148
+
149
+ Returns:
150
+ 0 if database already has documents, otherwise None
151
+ """
152
  if state.datastore_manager.count_docs() == 0:
153
  data: List[Dict[str, Any]] = await load_json_files(params.filename)
154
  state.pointsLoaded = await state.datastore_manager.populate_database(
155
  raw_docs=data
156
  )
157
  await cl.Message(
158
+ content=f"✅ The database has been loaded with {app_state.pointsLoaded} elements!"
159
  ).send()
160
+ else:
161
+ return 0
162
 
163
 
164
  async def build_the_chain():
165
+ """
166
+ Builds the RAG chain using the application state components.
167
+
168
+ Sets up the retrieval-augmented generation factory, initializes the language model,
169
+ and creates the RAG chain.
170
+ """
171
+ app_state.rag_factory = pstuts_rag.rag.RAGChainFactory(
172
+ retriever=app_state.datastore_manager.get_retriever()
173
+ )
174
+ app_state.llm = ChatOpenAI(model=params.tool_calling_model, temperature=0)
175
+ app_state.rag_chain = app_state.rag_factory.get_rag_chain(app_state.llm)
176
+
177
+
178
+ async def build_the_graph(current_state: ApplicationState):
179
+ """
180
+ Builds the agent graph for routing user queries.
181
+
182
+ Creates the necessary nodes (Adobe help, RAG search, supervisor), defines their
183
+ connections, and compiles the graph into a runnable chain.
184
+
185
+ Args:
186
+ current_state: Current application state with required components
187
+ """
188
+ adobe_help_node, _, _ = create_tavily_node(
189
+ llm=app_state.llm, name=ADOBEHELP
190
+ )
191
+
192
+ rag_node, _ = create_rag_node(
193
+ retriever=app_state.datastore_manager.get_retriever(),
194
+ llm=app_state.llm,
195
+ name=VIDEOARCHIVE,
196
+ )
197
+
198
+ supervisor_agent = create_team_supervisor(
199
+ app_state.llm,
200
+ SUPERVISOR_SYSTEM,
201
+ [VIDEOARCHIVE, ADOBEHELP],
202
+ )
203
+
204
+ ai_graph = langgraph.graph.StateGraph(PsTutsTeamState)
205
+
206
+ ai_graph.add_node(VIDEOARCHIVE, rag_node)
207
+ ai_graph.add_node(ADOBEHELP, adobe_help_node)
208
+ ai_graph.add_node("supervisor", supervisor_agent)
209
+
210
+ edges = [
211
+ [VIDEOARCHIVE, "supervisor"],
212
+ [ADOBEHELP, "supervisor"],
213
+ ]
214
+
215
+ [ai_graph.add_edge(*p) for p in edges]
216
+
217
+ ai_graph.add_conditional_edges(
218
+ "supervisor",
219
+ lambda x: x["next"],
220
+ {
221
+ VIDEOARCHIVE: VIDEOARCHIVE,
222
+ ADOBEHELP: ADOBEHELP,
223
+ "FINISH": langgraph.graph.END,
224
+ },
225
  )
226
+
227
+ ai_graph.set_entry_point("supervisor")
228
+ app_state.ai_graph_sketch = ai_graph
229
+ app_state.ai_graph = enter_chain | ai_graph.compile()
230
+
231
+
232
+ def enter_chain(message: str):
233
+ """
234
+ Entry point for the agent graph chain.
235
+
236
+ Transforms a user message into the state format expected by the agent graph.
237
+
238
+ Args:
239
+ message: User's input message
240
+
241
+ Returns:
242
+ Dictionary with the message and team members information
243
+ """
244
+ results = {
245
+ "messages": [HumanMessage(content=message)],
246
+ "team_members": [VIDEOARCHIVE, ADOBEHELP],
247
+ }
248
+ return results
249
 
250
 
251
  @cl.on_chat_start
252
  async def on_chat_start():
253
+ """
254
+ Initializes the application when a new chat session starts.
255
+
256
+ Sets up the language model, vector database components, and spawns tasks
257
+ for database population and graph building.
258
+ """
259
+ app_state.llm = ChatOpenAI(model=params.tool_calling_model, temperature=0)
260
+ app_state.qdrant_client = QdrantClient(":memory:")
261
 
262
+ app_state.embeddings = OpenAIEmbeddings(model="text-embedding-3-small")
263
+
264
+ app_state.datastore_manager = pstuts_rag.datastore.DatastoreManager(
265
+ qdrant_client=app_state.qdrant_client,
266
+ name="local_test",
267
+ embeddings=app_state.embeddings,
268
  )
269
+
270
+ app_state.tasks.append(asyncio.create_task(fill_the_db(app_state)))
271
+
272
+ app_state.tasks.append(asyncio.create_task(build_the_graph(app_state)))
273
+
274
+
275
+ def process_response(
276
+ response_message: BaseMessage,
277
+ ) -> Tuple[str, List[cl.Message]]:
278
+ """
279
+ Processes a response from the AI agents.
280
+
281
+ Extracts the main text and video references from the response,
282
+ and creates message elements for displaying video content.
283
+
284
+ Args:
285
+ response: Response object from the AI agent
286
+
287
+ Returns:
288
+ Tuple containing the text response and a list of message elements with video references
289
+ """
290
+ streamed_text = f"[_from: {response_message.name}_]\n"
291
+ msg_references = []
292
+
293
+ if response_message.name == VIDEOARCHIVE:
294
+ text, references = pstuts_rag.rag.RAGChainFactory.unpack_references(
295
+ str(response_message.content)
296
+ )
297
+ streamed_text += text
298
+
299
+ if len(references) > 0:
300
+ references = json.loads(references)
301
+ print(references)
302
+
303
+ for ref in references:
304
+ msg_references.append(
305
+ cl.Message(
306
+ content=(
307
+ f"Watch {ref["title"]} from timestamp "
308
+ f"{round(ref["start"] // 60)}m:{round(ref["start"] % 60)}s"
309
+ ),
310
+ elements=[
311
+ cl.Video(
312
+ name=ref["title"],
313
+ url=f"{ref["source"]}#t={ref["start"]}",
314
+ display="side",
315
+ )
316
+ ],
317
+ )
318
+ )
319
+ else:
320
+ streamed_text += str(response_message.content)
321
+
322
+ # Find all URLs in the content
323
+ urls = re.findall(
324
+ r"https?://(?:[-\w.]|(?:%[\da-fA-F]{2}))+[/\w\.-]*(?:\?[/\w\.-=&%]*)?",
325
+ str(response_message.content),
326
+ )
327
+ print(urls)
328
+ links = []
329
+ # Create a list of unique URLs
330
+ for idx, u in enumerate(list(set(urls))):
331
+
332
+ url = "https://api.microlink.io"
333
+ params = {
334
+ "url": u,
335
+ "screenshot": True,
336
+ }
337
+
338
+ payload = requests.get(url, params)
339
+
340
+ if payload:
341
+ print(f"Successful screenshot\n{payload.json()}")
342
+ links.append(
343
+ cl.Image(
344
+ name=f"Website {idx} Preview: {u}",
345
+ display="side", # Show in the sidebar
346
+ url=payload.json()["data"]["screenshot"]["url"],
347
+ )
348
+ )
349
+
350
+ print(links)
351
+ msg_references.append(
352
+ cl.Message(
353
+ content="\n".join([l.url for l in links]), elements=links
354
+ )
355
+ )
356
+
357
+ return streamed_text, msg_references
358
 
359
 
360
  @cl.on_message
361
  async def main(message: cl.Message):
362
+ """
363
+ Processes incoming user messages and sends responses.
 
364
 
365
+ Streams the AI agent's response, processes it to extract text and video references,
366
+ and sends the content back to the user's chat interface.
367
+
368
+ Args:
369
+ message: User's input message
370
+ """
371
+ for s in app_state.ai_graph.stream(
372
+ message.content, {"recursion_limit": 20}
373
+ ):
374
+ if "__end__" not in s and "supervisor" not in s.keys():
375
+ for [node_type, node_response] in s.items():
376
+ print(f"Processing {node_type} messages")
377
+ for message in node_response["messages"]:
378
+ print(f"Message {message}")
379
+ msg = cl.Message(content="")
380
+ text, references = process_response(message)
381
+ for token in [char for char in text]:
382
+ await msg.stream_token(token)
383
+ await msg.send()
384
+ for m in references:
385
+ await m.send()
 
 
 
 
 
 
386
 
387
 
388
  if __name__ == "__main__":
notebooks/transcript_agents.ipynb CHANGED
@@ -9,7 +9,7 @@
9
  },
10
  {
11
  "cell_type": "code",
12
- "execution_count": 282,
13
  "metadata": {},
14
  "outputs": [],
15
  "source": [
@@ -22,7 +22,7 @@
22
  },
23
  {
24
  "cell_type": "code",
25
- "execution_count": 283,
26
  "metadata": {},
27
  "outputs": [],
28
  "source": [
@@ -31,18 +31,9 @@
31
  },
32
  {
33
  "cell_type": "code",
34
- "execution_count": 284,
35
  "metadata": {},
36
- "outputs": [
37
- {
38
- "name": "stdout",
39
- "output_type": "stream",
40
- "text": [
41
- "The autoreload extension is already loaded. To reload it, use:\n",
42
- " %reload_ext autoreload\n"
43
- ]
44
- }
45
- ],
46
  "source": [
47
  "%load_ext autoreload\n",
48
  "%autoreload 2\n"
@@ -50,7 +41,7 @@
50
  },
51
  {
52
  "cell_type": "code",
53
- "execution_count": 285,
54
  "metadata": {},
55
  "outputs": [],
56
  "source": [
@@ -75,7 +66,7 @@
75
  },
76
  {
77
  "cell_type": "code",
78
- "execution_count": 286,
79
  "metadata": {},
80
  "outputs": [],
81
  "source": [
@@ -92,7 +83,7 @@
92
  },
93
  {
94
  "cell_type": "code",
95
- "execution_count": 287,
96
  "metadata": {},
97
  "outputs": [],
98
  "source": [
@@ -105,7 +96,7 @@
105
  },
106
  {
107
  "cell_type": "code",
108
- "execution_count": 288,
109
  "metadata": {},
110
  "outputs": [],
111
  "source": [
@@ -115,7 +106,7 @@
115
  },
116
  {
117
  "cell_type": "code",
118
- "execution_count": 289,
119
  "metadata": {},
120
  "outputs": [],
121
  "source": [
@@ -131,7 +122,7 @@
131
  },
132
  {
133
  "cell_type": "code",
134
- "execution_count": 290,
135
  "metadata": {},
136
  "outputs": [],
137
  "source": [
@@ -143,9 +134,17 @@
143
  },
144
  {
145
  "cell_type": "code",
146
- "execution_count": 291,
147
  "metadata": {},
148
  "outputs": [
 
 
 
 
 
 
 
 
149
  {
150
  "data": {
151
  "text/plain": [
@@ -154,7 +153,7 @@
154
  " 'content': 'Cropping is the easiest way to remove unwanted objects or people at the edges of a photograph. Anything outside the crop boundary will disappear from your image',\n",
155
  " 'score': 0.585789},\n",
156
  " {'title': 'Crop, resize, and resample images in Photoshop Elements',\n",
157
- " 'url': 'https://helpx.adobe.com/au/photoshop-elements/kb/crop-resize-resample-photoshop-elements.html',\n",
158
  " 'content': 'When you crop an image, you trim away material from the edges to show a smaller area, often for artistic reasons.',\n",
159
  " 'score': 0.585789},\n",
160
  " {'title': 'How to crop and straighten photos in Photoshop',\n",
@@ -167,11 +166,11 @@
167
  " 'score': 0.5367749},\n",
168
  " {'title': 'Crop a photo using the Crop tool',\n",
169
  " 'url': 'https://helpx.adobe.com/photoshop/using/tool-techniques/crop-tool.html',\n",
170
- " 'content': 'The Crop tool allows you to select an area of a photo and remove or crop everything outside the selected area. Photoshop Crop Tool.',\n",
171
- " 'score': 0.49567938}]"
172
  ]
173
  },
174
- "execution_count": 291,
175
  "metadata": {},
176
  "output_type": "execute_result"
177
  }
@@ -182,7 +181,7 @@
182
  },
183
  {
184
  "cell_type": "code",
185
- "execution_count": 292,
186
  "metadata": {},
187
  "outputs": [],
188
  "source": [
@@ -191,35 +190,42 @@
191
  },
192
  {
193
  "cell_type": "code",
194
- "execution_count": 293,
195
  "metadata": {},
196
  "outputs": [
197
  {
198
  "name": "stdout",
199
  "output_type": "stream",
200
  "text": [
201
- "('To create and use clipping masks in Adobe Photoshop, follow these steps:\\n'\n",
202
  " '\\n'\n",
203
- " '1. Arrange your layers in the Layers panel so that the layer you want to use '\n",
204
- " 'as a mask is directly below the layer you want to mask.\\n'\n",
205
  " '\\n'\n",
206
- " '2. Select the top layer (the one you want to be clipped) in the Layers '\n",
207
- " 'panel.\\n'\n",
208
  " '\\n'\n",
209
- " '3. Choose Layer > Create Clipping Mask from the menu. Alternatively, you can '\n",
210
- " 'right-click the top layer and select \"Create Clipping Mask.\"\\n'\n",
211
  " '\\n'\n",
212
- " '4. The top layer will now be clipped to the shape and transparency of the '\n",
213
- " 'layer below it, meaning it will only show through where the bottom layer has '\n",
214
- " 'pixels.\\n'\n",
215
  " '\\n'\n",
216
- " 'You can use two or more layers as a clipping mask, and you can release the '\n",
217
- " 'clipping mask by selecting the clipped layer and choosing Layer > Release '\n",
218
- " 'Clipping Mask.\\n'\n",
 
 
219
  " '\\n'\n",
220
- " \"For more detailed instructions, you can visit Adobe's official help page on \"\n",
221
- " 'clipping masks:\\n'\n",
222
- " 'https://helpx.adobe.com/photoshop/using/revealing-layers-clipping-masks.html')\n"
 
 
 
 
 
 
223
  ]
224
  }
225
  ],
@@ -230,7 +236,7 @@
230
  },
231
  {
232
  "cell_type": "code",
233
- "execution_count": 294,
234
  "metadata": {},
235
  "outputs": [],
236
  "source": [
@@ -239,7 +245,7 @@
239
  },
240
  {
241
  "cell_type": "code",
242
- "execution_count": 295,
243
  "metadata": {},
244
  "outputs": [
245
  {
@@ -249,19 +255,22 @@
249
  "================================\u001b[1m Human Message \u001b[0m=================================\n",
250
  "Name: AdobeHelp\n",
251
  "\n",
252
- "Layers in Adobe Photoshop are fundamental elements that allow you to work on different parts of an image independently without affecting other parts. Think of layers as stacked, transparent sheets of glass, where each layer can contain images, text, effects, or objects. You can edit, move, and apply changes to one layer without altering the content on other layers.\n",
 
 
253
  "\n",
254
- "Layers help you make nondestructive edits by stacking and managing images, text, and graphics separately. They are arranged in a stack in the Layers panel, usually located in the bottom right of the workspace. You can add multiple layers to composite images, add text, apply filters, and create complex designs.\n",
255
  "\n",
256
- "The bottommost layer is often the Background layer, which is locked by default but can be converted to a regular layer for more flexibility.\n",
 
 
 
 
257
  "\n",
258
- "In summary, layers let you:\n",
259
- "- Work on different elements independently\n",
260
- "- Apply effects and adjustments to specific parts\n",
261
- "- Rearrange content by changing the stacking order\n",
262
- "- Control opacity and blending modes for creative effects\n",
263
  "\n",
264
- "For more details, you can visit Adobe's official help page about layers: \n",
265
  "https://helpx.adobe.com/photoshop/web/edit-images/manage-layers/about-layers.html\n"
266
  ]
267
  }
@@ -282,7 +291,7 @@
282
  },
283
  {
284
  "cell_type": "code",
285
- "execution_count": 296,
286
  "metadata": {},
287
  "outputs": [],
288
  "source": [
@@ -304,7 +313,7 @@
304
  },
305
  {
306
  "cell_type": "code",
307
- "execution_count": 297,
308
  "metadata": {},
309
  "outputs": [],
310
  "source": [
@@ -327,7 +336,7 @@
327
  },
328
  {
329
  "cell_type": "code",
330
- "execution_count": 298,
331
  "metadata": {},
332
  "outputs": [
333
  {
@@ -350,7 +359,7 @@
350
  },
351
  {
352
  "cell_type": "code",
353
- "execution_count": 299,
354
  "metadata": {},
355
  "outputs": [],
356
  "source": [
@@ -360,7 +369,7 @@
360
  },
361
  {
362
  "cell_type": "code",
363
- "execution_count": 300,
364
  "metadata": {},
365
  "outputs": [
366
  {
@@ -378,7 +387,7 @@
378
  },
379
  {
380
  "cell_type": "code",
381
- "execution_count": 301,
382
  "metadata": {},
383
  "outputs": [
384
  {
@@ -388,7 +397,7 @@
388
  "================================\u001b[1m Human Message \u001b[0m=================================\n",
389
  "Name: VideoArchiveSearch\n",
390
  "\n",
391
- "Layers are the building blocks of any image in Photoshop CC. You can think of layers like separate flat panes of glass stacked on top of each other, with each layer containing separate pieces of content. Some parts of a layer can be transparent, allowing you to see through to the layers below. This setup lets you edit parts of an image independently without affecting the rest of the image. You work with layers in the Layers panel, where you can toggle their visibility on and off using the Eye icon. (See explanation around 0:28–2:00 and 1:252:32 in the video) 🎨🖼️\n",
392
  "**REFERENCES**\n",
393
  "[\n",
394
  " {\n",
@@ -421,9 +430,18 @@
421
  },
422
  {
423
  "cell_type": "code",
424
- "execution_count": 302,
425
  "metadata": {},
426
- "outputs": [],
 
 
 
 
 
 
 
 
 
427
  "source": [
428
  "from pstuts_rag.agents import create_team_supervisor\n",
429
  "from pstuts_rag.prompt_templates import SUPERVISOR_SYSTEM\n",
@@ -437,16 +455,16 @@
437
  },
438
  {
439
  "cell_type": "code",
440
- "execution_count": 303,
441
  "metadata": {},
442
  "outputs": [
443
  {
444
  "data": {
445
  "text/plain": [
446
- "<langgraph.graph.state.StateGraph at 0x71685a01e120>"
447
  ]
448
  },
449
- "execution_count": 303,
450
  "metadata": {},
451
  "output_type": "execute_result"
452
  }
@@ -478,7 +496,34 @@
478
  },
479
  {
480
  "cell_type": "code",
481
- "execution_count": 304,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
482
  "metadata": {},
483
  "outputs": [],
484
  "source": [
@@ -488,7 +533,7 @@
488
  },
489
  {
490
  "cell_type": "code",
491
- "execution_count": 305,
492
  "metadata": {},
493
  "outputs": [],
494
  "source": [
@@ -498,7 +543,7 @@
498
  },
499
  {
500
  "cell_type": "code",
501
- "execution_count": 306,
502
  "metadata": {},
503
  "outputs": [
504
  {
@@ -513,10 +558,10 @@
513
  " * \n",
514
  " +------------+ \n",
515
  " | supervisor | \n",
516
- " .....+------------+..... \n",
517
- " .... . .... \n",
518
- " ..... . ..... \n",
519
- " ... . ... \n",
520
  "+-----------+ +--------------------+ +---------+ \n",
521
  "| AdobeHelp | | VideoArchiveSearch | | __end__ | \n",
522
  "+-----------+ +--------------------+ +---------+ \n"
@@ -532,7 +577,7 @@
532
  },
533
  {
534
  "cell_type": "code",
535
- "execution_count": 307,
536
  "metadata": {},
537
  "outputs": [],
538
  "source": [
@@ -556,27 +601,24 @@
556
  " for response in s.values():\n",
557
  " for msg in response['messages']:\n",
558
  " msg.pretty_print()\n",
559
- " else:\n",
560
- " print(s)\n",
561
  " print(\"---\")\n",
562
  " \n"
563
  ]
564
  },
565
  {
566
  "cell_type": "code",
567
- "execution_count": 308,
568
  "metadata": {},
569
  "outputs": [
570
  {
571
  "name": "stdout",
572
  "output_type": "stream",
573
  "text": [
574
- "{'supervisor': {'next': 'VideoArchiveSearch'}}\n",
575
  "---\n",
576
  "================================\u001b[1m Human Message \u001b[0m=================================\n",
577
  "Name: VideoArchiveSearch\n",
578
  "\n",
579
- "Layers are the building blocks of any image in Photoshop CC. You can think of layers like separate flat panes of glass stacked on top of each other, where each layer contains separate pieces of content. Some parts of a layer can be transparent, allowing you to see through to the layers below. This setup lets you edit parts of an image independently without affecting the rest of the image. You work with layers in the Layers panel, where you can toggle their visibility on and off to see what each layer contains (explained around 0:28 to 1:03 and 1:25 to 2:33 in the video).\n",
580
  "**REFERENCES**\n",
581
  "[\n",
582
  " {\n",
@@ -593,7 +635,6 @@
593
  " }\n",
594
  "]\n",
595
  "---\n",
596
- "{'supervisor': {'next': 'FINISH'}}\n",
597
  "---\n"
598
  ]
599
  }
@@ -604,40 +645,41 @@
604
  },
605
  {
606
  "cell_type": "code",
607
- "execution_count": 309,
608
  "metadata": {},
609
  "outputs": [
610
  {
611
  "name": "stdout",
612
  "output_type": "stream",
613
  "text": [
614
- "{'supervisor': {'next': 'VideoArchiveSearch'}}\n",
615
  "---\n",
616
  "================================\u001b[1m Human Message \u001b[0m=================================\n",
617
  "Name: VideoArchiveSearch\n",
618
  "\n",
619
  "I don't know. This isn’t covered in the training videos.\n",
620
  "---\n",
621
- "{'supervisor': {'next': 'AdobeHelp'}}\n",
622
  "---\n",
623
  "================================\u001b[1m Human Message \u001b[0m=================================\n",
624
  "Name: AdobeHelp\n",
625
  "\n",
626
- "To crop a layer in Adobe Photoshop, you can use the Crop tool in a way that targets the active layer or a selection within that layer. Here's how to do it:\n",
627
  "\n",
628
  "1. Select the layer you want to crop by clicking its thumbnail in the Layers panel.\n",
629
  "2. Choose the Crop tool from the toolbar.\n",
630
- "3. A bounding box will appear around the active layer or your selection.\n",
631
  "4. Drag the handles of the bounding box to set the crop size.\n",
632
  "5. You can also rotate the layer by moving the rotation dial below the bounding box if needed.\n",
633
- "6. Once you have the desired crop area, apply the crop.\n",
 
634
  "\n",
635
  "This method allows you to crop and rotate an active layer or the contents of a selection non-destructively.\n",
636
  "\n",
637
- "For more details, you can visit Adobe's official help page on cropping and rotating layers:\n",
 
 
 
638
  "https://helpx.adobe.com/photoshop/using/crop-move-rotate-photos.html\n",
639
  "---\n",
640
- "{'supervisor': {'next': 'FINISH'}}\n",
641
  "---\n"
642
  ]
643
  }
 
9
  },
10
  {
11
  "cell_type": "code",
12
+ "execution_count": 1,
13
  "metadata": {},
14
  "outputs": [],
15
  "source": [
 
22
  },
23
  {
24
  "cell_type": "code",
25
+ "execution_count": 2,
26
  "metadata": {},
27
  "outputs": [],
28
  "source": [
 
31
  },
32
  {
33
  "cell_type": "code",
34
+ "execution_count": 3,
35
  "metadata": {},
36
+ "outputs": [],
 
 
 
 
 
 
 
 
 
37
  "source": [
38
  "%load_ext autoreload\n",
39
  "%autoreload 2\n"
 
41
  },
42
  {
43
  "cell_type": "code",
44
+ "execution_count": 4,
45
  "metadata": {},
46
  "outputs": [],
47
  "source": [
 
66
  },
67
  {
68
  "cell_type": "code",
69
+ "execution_count": 5,
70
  "metadata": {},
71
  "outputs": [],
72
  "source": [
 
83
  },
84
  {
85
  "cell_type": "code",
86
+ "execution_count": 6,
87
  "metadata": {},
88
  "outputs": [],
89
  "source": [
 
96
  },
97
  {
98
  "cell_type": "code",
99
+ "execution_count": 7,
100
  "metadata": {},
101
  "outputs": [],
102
  "source": [
 
106
  },
107
  {
108
  "cell_type": "code",
109
+ "execution_count": 8,
110
  "metadata": {},
111
  "outputs": [],
112
  "source": [
 
122
  },
123
  {
124
  "cell_type": "code",
125
+ "execution_count": 9,
126
  "metadata": {},
127
  "outputs": [],
128
  "source": [
 
134
  },
135
  {
136
  "cell_type": "code",
137
+ "execution_count": 10,
138
  "metadata": {},
139
  "outputs": [
140
+ {
141
+ "name": "stderr",
142
+ "output_type": "stream",
143
+ "text": [
144
+ "/tmp/ipykernel_7954/3445616894.py:1: LangChainDeprecationWarning: The method `BaseTool.__call__` was deprecated in langchain-core 0.1.47 and will be removed in 1.0. Use :meth:`~invoke` instead.\n",
145
+ " adobe_search(\"What is crop?\")\n"
146
+ ]
147
+ },
148
  {
149
  "data": {
150
  "text/plain": [
 
153
  " 'content': 'Cropping is the easiest way to remove unwanted objects or people at the edges of a photograph. Anything outside the crop boundary will disappear from your image',\n",
154
  " 'score': 0.585789},\n",
155
  " {'title': 'Crop, resize, and resample images in Photoshop Elements',\n",
156
+ " 'url': 'https://helpx.adobe.com/photoshop-elements/kb/crop-resize-resample-photoshop-elements.html',\n",
157
  " 'content': 'When you crop an image, you trim away material from the edges to show a smaller area, often for artistic reasons.',\n",
158
  " 'score': 0.585789},\n",
159
  " {'title': 'How to crop and straighten photos in Photoshop',\n",
 
166
  " 'score': 0.5367749},\n",
167
  " {'title': 'Crop a photo using the Crop tool',\n",
168
  " 'url': 'https://helpx.adobe.com/photoshop/using/tool-techniques/crop-tool.html',\n",
169
+ " 'content': 'The Crop tool allows you to select an area of a photo and remove or crop everything outside the selected area.',\n",
170
+ " 'score': 0.49465415}]"
171
  ]
172
  },
173
+ "execution_count": 10,
174
  "metadata": {},
175
  "output_type": "execute_result"
176
  }
 
181
  },
182
  {
183
  "cell_type": "code",
184
+ "execution_count": 11,
185
  "metadata": {},
186
  "outputs": [],
187
  "source": [
 
190
  },
191
  {
192
  "cell_type": "code",
193
+ "execution_count": 12,
194
  "metadata": {},
195
  "outputs": [
196
  {
197
  "name": "stdout",
198
  "output_type": "stream",
199
  "text": [
200
+ "('To create a new layer in Adobe Photoshop, you can do any of the following:\\n'\n",
201
  " '\\n'\n",
202
+ " '1. Click the \"Create a New Layer\" button at the bottom of the Layers panel. '\n",
203
+ " 'This creates a new transparent layer above the currently selected layer.\\n'\n",
204
  " '\\n'\n",
205
+ " '2. Choose Layer > New > Layer from the top menu. This opens a dialog box '\n",
206
+ " 'where you can name the layer and set options before creating it.\\n'\n",
207
  " '\\n'\n",
208
+ " '3. Alt-click (Windows) or Option-click (Mac) the \"Create a New Layer\" button '\n",
209
+ " 'to open the New Layer dialog box and set layer options.\\n'\n",
210
  " '\\n'\n",
211
+ " '4. Ctrl-click (Windows) or Command-click (Mac) the \"Create a New Layer\" '\n",
212
+ " 'button to add a new layer below the currently selected layer.\\n'\n",
 
213
  " '\\n'\n",
214
+ " 'Additionally, you can create a new layer from a selection by making a '\n",
215
+ " 'selection and then choosing:\\n'\n",
216
+ " '- Layer > New > Layer Via Copy (copies the selection to a new layer)\\n'\n",
217
+ " '- Layer > New > Layer Via Cut (cuts the selection and pastes it into a new '\n",
218
+ " 'layer)\\n'\n",
219
  " '\\n'\n",
220
+ " 'A new layer appears above the selected layer or within the selected group in '\n",
221
+ " 'the Layers panel.\\n'\n",
222
+ " '\\n'\n",
223
+ " \"For more details, you can visit Adobe's official help page on creating and \"\n",
224
+ " 'managing layers:\\n'\n",
225
+ " 'https://helpx.adobe.com/photoshop/using/create-layers-groups.html\\n'\n",
226
+ " '\\n'\n",
227
+ " '**URL**\\n'\n",
228
+ " 'https://helpx.adobe.com/photoshop/using/create-layers-groups.html')\n"
229
  ]
230
  }
231
  ],
 
236
  },
237
  {
238
  "cell_type": "code",
239
+ "execution_count": 13,
240
  "metadata": {},
241
  "outputs": [],
242
  "source": [
 
245
  },
246
  {
247
  "cell_type": "code",
248
+ "execution_count": 14,
249
  "metadata": {},
250
  "outputs": [
251
  {
 
255
  "================================\u001b[1m Human Message \u001b[0m=================================\n",
256
  "Name: AdobeHelp\n",
257
  "\n",
258
+ "Layers in Adobe Photoshop are fundamental elements used for editing and composing images. They are like stacked, transparent sheets of glass, each containing images, text, effects, or objects. Layers allow you to work on different parts of an image independently without affecting other layers. This makes editing nondestructive and flexible.\n",
259
+ "\n",
260
+ "You can think of layers as separate pieces of content stacked on top of each other. The Layers panel in Photoshop shows these layers arranged in a stack, where you can manage their order, visibility, and properties such as opacity and blending modes. Layers enable you to composite multiple images, add text, apply filters, and make adjustments selectively.\n",
261
  "\n",
262
+ "The bottommost layer is usually the Background layer, which is locked by default but can be converted into a regular layer for more editing options.\n",
263
  "\n",
264
+ "In summary, layers help you:\n",
265
+ "- Edit parts of an image independently\n",
266
+ "- Stack multiple images or elements\n",
267
+ "- Apply effects and adjustments nondestructively\n",
268
+ "- Manage complex compositions easily\n",
269
  "\n",
270
+ "For more detailed information, you can visit Adobe's official help page on layers:\n",
271
+ "https://helpx.adobe.com/photoshop/web/edit-images/manage-layers/about-layers.html\n",
 
 
 
272
  "\n",
273
+ "**URL**\n",
274
  "https://helpx.adobe.com/photoshop/web/edit-images/manage-layers/about-layers.html\n"
275
  ]
276
  }
 
291
  },
292
  {
293
  "cell_type": "code",
294
+ "execution_count": 15,
295
  "metadata": {},
296
  "outputs": [],
297
  "source": [
 
313
  },
314
  {
315
  "cell_type": "code",
316
+ "execution_count": 16,
317
  "metadata": {},
318
  "outputs": [],
319
  "source": [
 
336
  },
337
  {
338
  "cell_type": "code",
339
+ "execution_count": 17,
340
  "metadata": {},
341
  "outputs": [
342
  {
 
359
  },
360
  {
361
  "cell_type": "code",
362
+ "execution_count": 18,
363
  "metadata": {},
364
  "outputs": [],
365
  "source": [
 
369
  },
370
  {
371
  "cell_type": "code",
372
+ "execution_count": 19,
373
  "metadata": {},
374
  "outputs": [
375
  {
 
387
  },
388
  {
389
  "cell_type": "code",
390
+ "execution_count": 20,
391
  "metadata": {},
392
  "outputs": [
393
  {
 
397
  "================================\u001b[1m Human Message \u001b[0m=================================\n",
398
  "Name: VideoArchiveSearch\n",
399
  "\n",
400
+ "Layers are the building blocks of any image in Photoshop CC. You can think of layers like separate flat panes of glass stacked on top of each other, with each layer containing separate pieces of content. Some parts of a layer can be transparent, allowing you to see through to the layers below. This setup lets you edit parts of an image independently without affecting the rest of the image. You manage and work with layers in the Layers panel, where you can toggle their visibility on and off using the Eye icon. (See explanation around 0:28 to 1:00 and 1:25 to 2:32) 🎨🖼️\n",
401
  "**REFERENCES**\n",
402
  "[\n",
403
  " {\n",
 
430
  },
431
  {
432
  "cell_type": "code",
433
+ "execution_count": 21,
434
  "metadata": {},
435
+ "outputs": [
436
+ {
437
+ "name": "stderr",
438
+ "output_type": "stream",
439
+ "text": [
440
+ "/home/mbudisic/Documents/PsTuts-RAG/pstuts_rag/pstuts_rag/agents.py:79: LangChainDeprecationWarning: The method `BaseChatOpenAI.bind_functions` was deprecated in langchain-openai 0.2.1 and will be removed in 1.0.0. Use :meth:`~langchain_openai.chat_models.base.ChatOpenAI.bind_tools` instead.\n",
441
+ " | llm.bind_functions(functions=[function_def], function_call=\"route\")\n"
442
+ ]
443
+ }
444
+ ],
445
  "source": [
446
  "from pstuts_rag.agents import create_team_supervisor\n",
447
  "from pstuts_rag.prompt_templates import SUPERVISOR_SYSTEM\n",
 
455
  },
456
  {
457
  "cell_type": "code",
458
+ "execution_count": 22,
459
  "metadata": {},
460
  "outputs": [
461
  {
462
  "data": {
463
  "text/plain": [
464
+ "<langgraph.graph.state.StateGraph at 0x794d6582ecf0>"
465
  ]
466
  },
467
+ "execution_count": 22,
468
  "metadata": {},
469
  "output_type": "execute_result"
470
  }
 
496
  },
497
  {
498
  "cell_type": "code",
499
+ "execution_count": 32,
500
+ "metadata": {},
501
+ "outputs": [
502
+ {
503
+ "data": {
504
+ "text/plain": [
505
+ "dict_keys(['VideoArchiveSearch', 'AdobeHelp', 'supervisor'])"
506
+ ]
507
+ },
508
+ "execution_count": 32,
509
+ "metadata": {},
510
+ "output_type": "execute_result"
511
+ }
512
+ ],
513
+ "source": [
514
+ "adobe_help_graph.nodes.keys()"
515
+ ]
516
+ },
517
+ {
518
+ "cell_type": "code",
519
+ "execution_count": null,
520
+ "metadata": {},
521
+ "outputs": [],
522
+ "source": []
523
+ },
524
+ {
525
+ "cell_type": "code",
526
+ "execution_count": 23,
527
  "metadata": {},
528
  "outputs": [],
529
  "source": [
 
533
  },
534
  {
535
  "cell_type": "code",
536
+ "execution_count": 24,
537
  "metadata": {},
538
  "outputs": [],
539
  "source": [
 
543
  },
544
  {
545
  "cell_type": "code",
546
+ "execution_count": 25,
547
  "metadata": {},
548
  "outputs": [
549
  {
 
558
  " * \n",
559
  " +------------+ \n",
560
  " | supervisor | \n",
561
+ " *****+------------+..... \n",
562
+ " **** . .... \n",
563
+ " ***** . ..... \n",
564
+ " *** . ... \n",
565
  "+-----------+ +--------------------+ +---------+ \n",
566
  "| AdobeHelp | | VideoArchiveSearch | | __end__ | \n",
567
  "+-----------+ +--------------------+ +---------+ \n"
 
577
  },
578
  {
579
  "cell_type": "code",
580
+ "execution_count": 26,
581
  "metadata": {},
582
  "outputs": [],
583
  "source": [
 
601
  " for response in s.values():\n",
602
  " for msg in response['messages']:\n",
603
  " msg.pretty_print()\n",
 
 
604
  " print(\"---\")\n",
605
  " \n"
606
  ]
607
  },
608
  {
609
  "cell_type": "code",
610
+ "execution_count": 27,
611
  "metadata": {},
612
  "outputs": [
613
  {
614
  "name": "stdout",
615
  "output_type": "stream",
616
  "text": [
 
617
  "---\n",
618
  "================================\u001b[1m Human Message \u001b[0m=================================\n",
619
  "Name: VideoArchiveSearch\n",
620
  "\n",
621
+ "Layers are the building blocks of any image in Photoshop CC. You can think of layers like separate flat panes of glass stacked on top of each other, where each layer contains separate pieces of content. Some parts of a layer can be transparent, allowing you to see through to the layers below. This setup lets you edit parts of an image independently without affecting the rest of the image. You manage and work with layers in the Layers panel, where you can toggle their visibility on and off using the Eye icon. (See 0:281:03 and 1:252:32) 🎨🖼️\n",
622
  "**REFERENCES**\n",
623
  "[\n",
624
  " {\n",
 
635
  " }\n",
636
  "]\n",
637
  "---\n",
 
638
  "---\n"
639
  ]
640
  }
 
645
  },
646
  {
647
  "cell_type": "code",
648
+ "execution_count": 28,
649
  "metadata": {},
650
  "outputs": [
651
  {
652
  "name": "stdout",
653
  "output_type": "stream",
654
  "text": [
 
655
  "---\n",
656
  "================================\u001b[1m Human Message \u001b[0m=================================\n",
657
  "Name: VideoArchiveSearch\n",
658
  "\n",
659
  "I don't know. This isn’t covered in the training videos.\n",
660
  "---\n",
 
661
  "---\n",
662
  "================================\u001b[1m Human Message \u001b[0m=================================\n",
663
  "Name: AdobeHelp\n",
664
  "\n",
665
+ "To crop a layer in Adobe Photoshop, you can use the Crop tool in a way that targets the active layer or a selection within that layer. Here's how you can do it:\n",
666
  "\n",
667
  "1. Select the layer you want to crop by clicking its thumbnail in the Layers panel.\n",
668
  "2. Choose the Crop tool from the toolbar.\n",
669
+ "3. A bounding box will appear around the active layer or the selection.\n",
670
  "4. Drag the handles of the bounding box to set the crop size.\n",
671
  "5. You can also rotate the layer by moving the rotation dial below the bounding box if needed.\n",
672
+ "6. Apply touch gestures or mouse actions to pan, zoom, or reset the crop area.\n",
673
+ "7. When satisfied, confirm the crop to apply it to the layer.\n",
674
  "\n",
675
  "This method allows you to crop and rotate an active layer or the contents of a selection non-destructively.\n",
676
  "\n",
677
+ "For more detailed instructions, you can visit Adobe's official help page on cropping and rotating layers:\n",
678
+ "https://helpx.adobe.com/photoshop/using/crop-move-rotate-photos.html\n",
679
+ "\n",
680
+ "**URL**\n",
681
  "https://helpx.adobe.com/photoshop/using/crop-move-rotate-photos.html\n",
682
  "---\n",
 
683
  "---\n"
684
  ]
685
  }
pstuts_rag/pstuts_rag/agents.py CHANGED
@@ -19,13 +19,18 @@ class PsTutsTeamState(TypedDict):
19
  next: str
20
 
21
 
22
- def agent_node(state, agent, name, outputfield: str = "output"):
 
 
 
 
 
23
  """agent_node calls the invoke function of the agent Runnable"""
24
  # Initialize team_members if it's not already in the state
25
  if "team_members" not in state:
26
  state["team_members"] = []
27
  result = agent.invoke(state)
28
- return {"messages": [HumanMessage(content=result[outputfield], name=name)]}
29
 
30
 
31
  def create_agent(
@@ -47,7 +52,7 @@ def create_agent(
47
  return executor
48
 
49
 
50
- def create_team_supervisor(llm: ChatOpenAI, system_prompt, members):
51
  """An LLM-based router."""
52
  options = ["FINISH"] + members
53
  function_def = {
 
19
  next: str
20
 
21
 
22
+ def agent_node(
23
+ state: PsTutsTeamState,
24
+ agent: Runnable,
25
+ name: str,
26
+ output_field: str = "output",
27
+ ):
28
  """agent_node calls the invoke function of the agent Runnable"""
29
  # Initialize team_members if it's not already in the state
30
  if "team_members" not in state:
31
  state["team_members"] = []
32
  result = agent.invoke(state)
33
+ return {"messages": [AIMessage(content=result[output_field], name=name)]}
34
 
35
 
36
  def create_agent(
 
52
  return executor
53
 
54
 
55
+ def create_team_supervisor(llm: BaseChatModel, system_prompt, members):
56
  """An LLM-based router."""
57
  options = ["FINISH"] + members
58
  function_def = {
pstuts_rag/pstuts_rag/prompt_templates.py CHANGED
@@ -67,9 +67,6 @@ for Adobe Photoshop help topics using the tavily search engine.
67
  Users may provide you with partial questions - try your best to determine their intent.
68
 
69
  If Tavily provides no references, respond with "I don't know".
70
-
71
- IMPORTANT: Include ALL urls from all references Tavily provides.
72
- Separate them from the rest of the text using a line containing "**URL**"
73
  """
74
 
75
  SUPERVISOR_SYSTEM = """You are the Supervisor for an agentic RAG system. Your job is to
 
67
  Users may provide you with partial questions - try your best to determine their intent.
68
 
69
  If Tavily provides no references, respond with "I don't know".
 
 
 
70
  """
71
 
72
  SUPERVISOR_SYSTEM = """You are the Supervisor for an agentic RAG system. Your job is to
pstuts_rag/pstuts_rag/rag.py CHANGED
@@ -119,9 +119,7 @@ class RAGChainFactory:
119
  return text, references
120
 
121
  else:
122
- raise ValueError(
123
- f"No '**References:**' section found in input:\n{content}"
124
- )
125
 
126
  def __init__(
127
  self,
 
119
  return text, references
120
 
121
  else:
122
+ return content, ""
 
 
123
 
124
  def __init__(
125
  self,
pyproject.toml CHANGED
@@ -92,3 +92,8 @@ extension-pkg-allow-list = "numpy, torch" # compiled deps that astroid cannot p
92
 
93
  [tool.pylint.TYPECHECK]
94
  ignored-modules = "pkg_resources" # suppress noisy vendored imports
 
 
 
 
 
 
92
 
93
  [tool.pylint.TYPECHECK]
94
  ignored-modules = "pkg_resources" # suppress noisy vendored imports
95
+
96
+ [dependency-groups]
97
+ dev = [
98
+ "ipdb>=0.13.13",
99
+ ]
test_screenshot.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+
3
+ url = "https://api.microlink.io"
4
+ params = {
5
+ "url": "https://www.framatome.com",
6
+ "screenshot": True,
7
+ }
8
+
9
+ response = requests.get(url, params)
10
+
11
+ print(response.json())
uv.lock CHANGED
@@ -940,6 +940,19 @@ wheels = [
940
  { url = "https://files.pythonhosted.org/packages/2c/e1/e6716421ea10d38022b952c159d5161ca1193197fb744506875fbb87ea7b/iniconfig-2.1.0-py3-none-any.whl", hash = "sha256:9deba5723312380e77435581c6bf4935c94cbfab9b1ed33ef8d238ea168eb760", size = 6050 },
941
  ]
942
 
 
 
 
 
 
 
 
 
 
 
 
 
 
943
  [[package]]
944
  name = "ipykernel"
945
  version = "6.29.5"
@@ -2370,6 +2383,11 @@ dev = [
2370
  { name = "pytest" },
2371
  ]
2372
 
 
 
 
 
 
2373
  [package.metadata]
2374
  requires-dist = [
2375
  { name = "aiohttp", specifier = ">=3.8.0" },
@@ -2412,6 +2430,9 @@ requires-dist = [
2412
  ]
2413
  provides-extras = ["dev"]
2414
 
 
 
 
2415
  [[package]]
2416
  name = "psutil"
2417
  version = "7.0.0"
 
940
  { url = "https://files.pythonhosted.org/packages/2c/e1/e6716421ea10d38022b952c159d5161ca1193197fb744506875fbb87ea7b/iniconfig-2.1.0-py3-none-any.whl", hash = "sha256:9deba5723312380e77435581c6bf4935c94cbfab9b1ed33ef8d238ea168eb760", size = 6050 },
941
  ]
942
 
943
+ [[package]]
944
+ name = "ipdb"
945
+ version = "0.13.13"
946
+ source = { registry = "https://pypi.org/simple" }
947
+ dependencies = [
948
+ { name = "decorator" },
949
+ { name = "ipython" },
950
+ ]
951
+ sdist = { url = "https://files.pythonhosted.org/packages/3d/1b/7e07e7b752017f7693a0f4d41c13e5ca29ce8cbcfdcc1fd6c4ad8c0a27a0/ipdb-0.13.13.tar.gz", hash = "sha256:e3ac6018ef05126d442af680aad863006ec19d02290561ac88b8b1c0b0cfc726", size = 17042 }
952
+ wheels = [
953
+ { url = "https://files.pythonhosted.org/packages/0c/4c/b075da0092003d9a55cf2ecc1cae9384a1ca4f650d51b00fc59875fe76f6/ipdb-0.13.13-py3-none-any.whl", hash = "sha256:45529994741c4ab6d2388bfa5d7b725c2cf7fe9deffabdb8a6113aa5ed449ed4", size = 12130 },
954
+ ]
955
+
956
  [[package]]
957
  name = "ipykernel"
958
  version = "6.29.5"
 
2383
  { name = "pytest" },
2384
  ]
2385
 
2386
+ [package.dev-dependencies]
2387
+ dev = [
2388
+ { name = "ipdb" },
2389
+ ]
2390
+
2391
  [package.metadata]
2392
  requires-dist = [
2393
  { name = "aiohttp", specifier = ">=3.8.0" },
 
2430
  ]
2431
  provides-extras = ["dev"]
2432
 
2433
+ [package.metadata.requires-dev]
2434
+ dev = [{ name = "ipdb", specifier = ">=0.13.13" }]
2435
+
2436
  [[package]]
2437
  name = "psutil"
2438
  version = "7.0.0"