tonko22 commited on
Commit
23f553c
·
1 Parent(s): 83c3279

feat: update formatting tools to match new analysis structure

Browse files

- Remove handling of deprecated `significant_lines` field
- Improve rich formatting of lyrics using Panel
- Add `conclusion` style for conclusion section
- Fix error with missing `neutral` style
- Remove unused Markdown import
- Enhance lyrics formatting while preserving `quote` style

Gradio_UI.py CHANGED
@@ -268,7 +268,7 @@ class GradioUI:
268
  import gradio as gr
269
 
270
  # Define instruction text
271
- instructions = """
272
  # 🎵 Song Meaning Bot 🎶
273
  ### How to Use:
274
  1. Paste song title in the input field.
@@ -277,7 +277,7 @@ class GradioUI:
277
  """
278
 
279
  with gr.Blocks(fill_height=True) as demo:
280
- gr.Markdown(instructions)
281
  stored_messages = gr.State([])
282
  file_uploads_log = gr.State([])
283
  chatbot = gr.Chatbot(
 
268
  import gradio as gr
269
 
270
  # Define instruction text
271
+ ui_tip_text = """
272
  # 🎵 Song Meaning Bot 🎶
273
  ### How to Use:
274
  1. Paste song title in the input field.
 
277
  """
278
 
279
  with gr.Blocks(fill_height=True) as demo:
280
+ gr.Markdown(ui_tip_text)
281
  stored_messages = gr.State([])
282
  file_uploads_log = gr.State([])
283
  chatbot = gr.Chatbot(
app.py CHANGED
@@ -36,7 +36,7 @@ def main():
36
 
37
  # If using Ollama, we need to specify the API base URL
38
  # Initialize the LLM model based on configuration
39
- model_id = get_model_id(is_test=is_test)
40
  logger.info(f"Initializing with model: {model_id}")
41
  if is_test:
42
  api_base = get_ollama_api_base()
 
36
 
37
  # If using Ollama, we need to specify the API base URL
38
  # Initialize the LLM model based on configuration
39
+ model_id = get_model_id(use_local=is_test)
40
  logger.info(f"Initializing with model: {model_id}")
41
  if is_test:
42
  api_base = get_ollama_api_base()
config.py CHANGED
@@ -24,13 +24,28 @@ def load_api_keys():
24
  """Load API keys from environment variables."""
25
  # Gemini API is the default
26
  os.environ["GEMINI_API_KEY"] = os.getenv("GEMINI_API_KEY")
 
27
 
28
- def get_model_id(is_test=True):
29
- """Get the appropriate model ID based on configuration."""
30
- if is_test:
31
- return "ollama/gemma3:4b" # Using local Ollama with Gemma 3:4B instead of Claude
32
- else:
 
 
 
 
 
 
 
 
 
33
  return "gemini/gemini-2.0-flash"
 
 
 
 
 
34
 
35
  def get_ollama_api_base():
36
  """Get the API base URL for Ollama."""
 
24
  """Load API keys from environment variables."""
25
  # Gemini API is the default
26
  os.environ["GEMINI_API_KEY"] = os.getenv("GEMINI_API_KEY")
27
+ os.environ["OPENROUTER_API_KEY"] = os.getenv("OPENROUTER_API_KEY")
28
 
29
+ def get_model_id(use_local=True, provider="ollama"):
30
+ """Get the appropriate model ID based on configuration.
31
+
32
+ Args:
33
+ use_local: If True, use test configuration (local development).
34
+ If False, use production configuration.
35
+ provider: Model provider ('ollama', 'gemini', 'openrouter')
36
+
37
+ Returns:
38
+ String with model ID for the specified provider.
39
+ """
40
+ if provider == "ollama":
41
+ return "ollama/gemma3:4b" # Using local Ollama with Gemma 3:4B
42
+ elif provider == "gemini":
43
  return "gemini/gemini-2.0-flash"
44
+ elif provider == "openrouter":
45
+ return "openrouter/google/gemini-2.0-flash-lite-preview-02-05:free" # OpenRouter Claude 3 Opus
46
+ else:
47
+ # Default fallback
48
+ return "ollama/gemma3:4b" if use_local else "gemini/gemini-2.0-flash"
49
 
50
  def get_ollama_api_base():
51
  """Get the API base URL for Ollama."""
prompts/prompts_hf.yaml CHANGED
@@ -1,4 +1,4 @@
1
- "system_prompt": |-
2
  You are an expert assistant who can solve any task using code blobs. You will be given a task to solve as best you can.
3
  To do so, you have been given access to a list of tools: these tools are basically Python functions which you can call with code.
4
  To solve the task, you must plan forward to proceed in a series of steps, in a cycle of 'Thought:', 'Code:', and 'Observation:' sequences.
@@ -172,8 +172,8 @@
172
  10. Don't give up! You're in charge of solving the task, not providing directions to solve it.
173
 
174
  Now Begin! If you solve the task correctly, you will receive a reward of $1,000,000.
175
- "planning":
176
- "initial_facts": |-
177
  Below I will present you a task.
178
 
179
  You will now build a comprehensive preparatory survey of which facts we have at our disposal and which ones we still need.
@@ -196,7 +196,13 @@
196
  ### 2. Facts to look up
197
  ### 3. Facts to derive
198
  Do not add anything else.
199
- "initial_plan": |-
 
 
 
 
 
 
200
  You are a world expert at making efficient plans to solve any task using a set of carefully crafted tools.
201
 
202
  Now for the given task, develop a step-by-step high-level plan taking into account the above inputs and list of facts.
@@ -234,7 +240,7 @@
234
  ```
235
 
236
  Now begin! Write your plan below.
237
- "update_facts_pre_messages": |-
238
  You are a world expert at gathering known and unknown facts based on a conversation.
239
  Below you will find a task, and a history of attempts made to solve the task. You will have to produce a list of these:
240
  ### 1. Facts given in the task
@@ -242,7 +248,7 @@
242
  ### 3. Facts still to look up
243
  ### 4. Facts still to derive
244
  Find the task and history below:
245
- "update_facts_post_messages": |-
246
  Earlier we've built a list of facts.
247
  But since in your previous steps you may have learned useful new facts or invalidated some false ones.
248
  Please update your list of facts based on the previous history, and provide these headings:
@@ -252,7 +258,7 @@
252
  ### 4. Facts still to derive
253
 
254
  Now write your new list of facts below.
255
- "update_plan_pre_messages": |-
256
  You are a world expert at making efficient plans to solve any task using a set of carefully crafted tools.
257
 
258
  You have been given a task:
@@ -263,7 +269,7 @@
263
  Find below the record of what has been tried so far to solve it. Then you will be asked to make an updated plan to solve the task.
264
  If the previous tries so far have met some success, you can make an updated plan based on these actions.
265
  If you are stalled, you can make a completely new plan starting from scratch.
266
- "update_plan_post_messages": |-
267
  You're still working towards solving this task:
268
  ```
269
  {{task}}
@@ -299,23 +305,29 @@
299
  After writing the final step of the plan, write the '\n<end_plan>' tag and stop there.
300
 
301
  Now write your new plan below.
302
- "managed_agent":
303
- "task": |-
304
- You're a helpful agent named '{{name}}'.
305
- You have been submitted this task by your manager.
306
- ---
307
- Task:
308
- {{task}}
309
- ---
310
- You're helping your manager solve a wider task: so make sure to not provide a one-line answer, but give as much information as possible to give them a clear understanding of the answer.
311
-
312
- Your final_answer WILL HAVE to contain these parts:
313
- ### 1. Task outcome (short version):
314
- ### 2. Task outcome (extremely detailed version):
315
- ### 3. Additional context (if relevant):
316
-
317
- Put all these in your final_answer tool, everything that you do not pass as an argument to final_answer will be lost.
318
- And even if your task resolution is not successful, please return as much context as possible, so that your manager can act upon this feedback.
319
- "report": |-
320
- Here is the final answer from your managed agent '{{name}}':
321
- {{final_answer}}
 
 
 
 
 
 
 
1
+ system_prompt: |-
2
  You are an expert assistant who can solve any task using code blobs. You will be given a task to solve as best you can.
3
  To do so, you have been given access to a list of tools: these tools are basically Python functions which you can call with code.
4
  To solve the task, you must plan forward to proceed in a series of steps, in a cycle of 'Thought:', 'Code:', and 'Observation:' sequences.
 
172
  10. Don't give up! You're in charge of solving the task, not providing directions to solve it.
173
 
174
  Now Begin! If you solve the task correctly, you will receive a reward of $1,000,000.
175
+ planning:
176
+ initial_facts: |-
177
  Below I will present you a task.
178
 
179
  You will now build a comprehensive preparatory survey of which facts we have at our disposal and which ones we still need.
 
196
  ### 2. Facts to look up
197
  ### 3. Facts to derive
198
  Do not add anything else.
199
+
200
+ Here is the task:
201
+ ```
202
+ {{task}}
203
+ ```
204
+ Now begin!
205
+ initial_plan : |-
206
  You are a world expert at making efficient plans to solve any task using a set of carefully crafted tools.
207
 
208
  Now for the given task, develop a step-by-step high-level plan taking into account the above inputs and list of facts.
 
240
  ```
241
 
242
  Now begin! Write your plan below.
243
+ update_facts_pre_messages: |-
244
  You are a world expert at gathering known and unknown facts based on a conversation.
245
  Below you will find a task, and a history of attempts made to solve the task. You will have to produce a list of these:
246
  ### 1. Facts given in the task
 
248
  ### 3. Facts still to look up
249
  ### 4. Facts still to derive
250
  Find the task and history below:
251
+ update_facts_post_messages: |-
252
  Earlier we've built a list of facts.
253
  But since in your previous steps you may have learned useful new facts or invalidated some false ones.
254
  Please update your list of facts based on the previous history, and provide these headings:
 
258
  ### 4. Facts still to derive
259
 
260
  Now write your new list of facts below.
261
+ update_plan_pre_messages: |-
262
  You are a world expert at making efficient plans to solve any task using a set of carefully crafted tools.
263
 
264
  You have been given a task:
 
269
  Find below the record of what has been tried so far to solve it. Then you will be asked to make an updated plan to solve the task.
270
  If the previous tries so far have met some success, you can make an updated plan based on these actions.
271
  If you are stalled, you can make a completely new plan starting from scratch.
272
+ update_plan_post_messages: |-
273
  You're still working towards solving this task:
274
  ```
275
  {{task}}
 
305
  After writing the final step of the plan, write the '\n<end_plan>' tag and stop there.
306
 
307
  Now write your new plan below.
308
+ managed_agent:
309
+ task: |-
310
+ You're a helpful agent named '{{name}}'.
311
+ You have been submitted this task by your manager.
312
+ ---
313
+ Task:
314
+ {{task}}
315
+ ---
316
+ You're helping your manager solve a wider task: so make sure to not provide a one-line answer, but give as much information as possible to give them a clear understanding of the answer.
317
+
318
+ Your final_answer WILL HAVE to contain these parts:
319
+ ### 1. Task outcome (short version):
320
+ ### 2. Task outcome (extremely detailed version):
321
+ ### 3. Additional context (if relevant):
322
+
323
+ Put all these in your final_answer tool, everything that you do not pass as an argument to final_answer will be lost.
324
+ And even if your task resolution is not successful, please return as much context as possible, so that your manager can act upon this feedback.
325
+ report: |-
326
+ Here is the final answer from your managed agent '{{name}}':
327
+ {{final_answer}}
328
+ final_answer:
329
+ pre_messages: |-
330
+ An agent tried to answer a user query but it got stuck and failed to do so. You are tasked with providing an answer instead. Here is the agent's memory:
331
+ post_messages: |-
332
+ Based on the above, please provide an answer to the following user request:
333
+ {{task}}
pyproject.toml CHANGED
@@ -11,4 +11,5 @@ dependencies = [
11
  "loguru>=0.7.3",
12
  "pyyaml>=6.0.2",
13
  "smolagents>=1.9.2",
 
14
  ]
 
11
  "loguru>=0.7.3",
12
  "pyyaml>=6.0.2",
13
  "smolagents>=1.9.2",
14
+ "tenacity>=9.0.0",
15
  ]
run_single_agent.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ from smolagents import LiteLLMModel
4
+
5
+ from agents.single_agent import create_single_agent
6
+ from loguru import logger
7
+ from config import get_model_id, get_ollama_api_base, setup_logger, load_api_keys
8
+
9
+ setup_logger()
10
+ load_api_keys()
11
+
12
+ # Set environment variables for API keys if needed
13
+ os.environ["GEMINI_API_KEY"] = str(os.getenv("GEMINI_API_KEY"))
14
+
15
+
16
+ use_local = False
17
+
18
+ # If using Ollama, we need to specify the API base URL
19
+ # Initialize the LLM model based on configuration
20
+ model_id = "openrouter/google/gemini-2.0-flash-lite-preview-02-05:free"
21
+ logger.info(f"Initializing with model: {model_id}")
22
+ if use_local:
23
+ api_base = get_ollama_api_base()
24
+ logger.info(f"Using Ollama API base: {api_base}")
25
+ model = LiteLLMModel(model_id=model_id, api_base=api_base)
26
+ else:
27
+ model = LiteLLMModel(model_id=model_id)
28
+
29
+ # If the agent does not answer, the model is overloaded, please use another model or the following Hugging Face Endpoint that also contains qwen2.5 coder:
30
+ # model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud'
31
+
32
+ # Prompt the user for the song name
33
+ song_data = "RCHP - On Mercury"
34
+
35
+ agent = create_single_agent(model)
36
+
37
+ # Agent execution
38
+ agent.run(f"""
39
+ 1. Find and extract the lyrics of the song, {song_data}. Don't try to scrape from azlyrics.com or genius.com, others are ok.
40
+ 2. Perform deep lyrics analysis and return full lyrics and analysis results in a pretty human-readable format.
41
+ """)
tools/formatting_tools.py CHANGED
@@ -9,7 +9,6 @@ from rich.console import Console
9
  from rich.panel import Panel
10
  from rich.text import Text
11
  from rich.table import Table
12
- from rich.markdown import Markdown
13
  from rich.theme import Theme
14
  from rich.box import ROUNDED
15
  from rich.console import Group
@@ -37,6 +36,22 @@ class FormatAnalysisResultsTool(Tool):
37
  Returns:
38
  A formatted string representation of the analysis
39
  """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
40
  try:
41
  # Parse the JSON string into a Python dictionary if it's a string
42
  if isinstance(analysis_json, str):
@@ -59,7 +74,8 @@ class FormatAnalysisResultsTool(Tool):
59
  "negative": "red",
60
  "neutral": "magenta",
61
  "quote": "italic yellow",
62
- "metadata": "dim white"
 
63
  })
64
 
65
  # Apply the theme to our console
@@ -103,9 +119,19 @@ class FormatAnalysisResultsTool(Tool):
103
  section_content = []
104
 
105
  if lines:
106
- lyrics_text = "\n".join([f"> [quote]{line}[/]" for line in lines])
107
  section_content.append(Text("Lyrics:", style="bold blue"))
108
- section_content.append(Markdown(lyrics_text))
 
 
 
 
 
 
 
 
 
 
109
 
110
  section_content.append(Text("Analysis:", style="bold blue"))
111
  section_content.append(Text(section_analysis))
@@ -117,25 +143,12 @@ class FormatAnalysisResultsTool(Tool):
117
  border_style="cyan"
118
  ))
119
 
120
- # Significant lines
121
- sig_lines = analysis.get("significant_lines", [])
122
- if sig_lines:
123
- console.print("\n[heading]Significant Lines[/]")
124
-
125
- for i, line_data in enumerate(sig_lines):
126
- line = line_data.get("line", "")
127
- significance = line_data.get("significance", "")
128
-
129
- console.print(Panel(
130
- f"[quote]\"{line}\"[/]\n\n[bold blue]Significance:[/] {significance}",
131
- title=f"[highlight]Key Line #{i+1}[/]",
132
- border_style="highlight"
133
- ))
134
 
135
  # Conclusion
136
  conclusion = analysis.get("conclusion", "No conclusion available")
137
  console.print("\n[heading]Conclusion[/]")
138
- console.print(Panel(conclusion, border_style="neutral"))
139
 
140
  # Export the rich text as a string
141
  return console.export_text()
@@ -198,20 +211,7 @@ class FormatAnalysisResultsTool(Tool):
198
  formatted_text.append(section_analysis)
199
  formatted_text.append("")
200
 
201
- # Significant lines
202
- sig_lines = analysis.get("significant_lines", [])
203
- if sig_lines:
204
- formatted_text.append("SIGNIFICANT LINES")
205
- formatted_text.append("=================")
206
-
207
- for i, line_data in enumerate(sig_lines):
208
- line = line_data.get("line", "")
209
- significance = line_data.get("significance", "")
210
-
211
- formatted_text.append(f"Key Line #{i+1}:")
212
- formatted_text.append(f'"{line}"')
213
- formatted_text.append(f"Significance: {significance}")
214
- formatted_text.append("")
215
 
216
  # Conclusion
217
  conclusion = analysis.get("conclusion", "No conclusion available")
 
9
  from rich.panel import Panel
10
  from rich.text import Text
11
  from rich.table import Table
 
12
  from rich.theme import Theme
13
  from rich.box import ROUNDED
14
  from rich.console import Group
 
36
  Returns:
37
  A formatted string representation of the analysis
38
  """
39
+ # Expected JSON structure from analysis_tools.py:
40
+ # {
41
+ # "summary": "Overall analysis of the song vibes, meaning and mood",
42
+ # "main_themes": ["theme1", "theme2", ...],
43
+ # "mood": "The overall mood/emotion of the song",
44
+ # "sections_analysis": [
45
+ # {
46
+ # "section_type": "verse/chorus/bridge/etc.",
47
+ # "section_number": 1,
48
+ # "lines": ["line1", "line2", ...],
49
+ # "analysis": "Analysis of this section whith respect to the overall theme"
50
+ # },
51
+ # ...
52
+ # ],
53
+ # "conclusion": "The song vibes and concepts of the underlying meaning"
54
+ # }
55
  try:
56
  # Parse the JSON string into a Python dictionary if it's a string
57
  if isinstance(analysis_json, str):
 
74
  "negative": "red",
75
  "neutral": "magenta",
76
  "quote": "italic yellow",
77
+ "metadata": "dim white",
78
+ "conclusion": "bold magenta" # Add style for conclusion
79
  })
80
 
81
  # Apply the theme to our console
 
119
  section_content = []
120
 
121
  if lines:
122
+ # Format lyrics in a more readable way
123
  section_content.append(Text("Lyrics:", style="bold blue"))
124
+ # Форматируем каждую строку лирики с стилем quote
125
+ lyrics_lines = []
126
+ for line in lines:
127
+ lyrics_lines.append(f"[quote]{line}[/]")
128
+
129
+ lyrics_panel = Panel(
130
+ "\n".join(lyrics_lines),
131
+ border_style="blue",
132
+ padding=(1, 2)
133
+ )
134
+ section_content.append(lyrics_panel)
135
 
136
  section_content.append(Text("Analysis:", style="bold blue"))
137
  section_content.append(Text(section_analysis))
 
143
  border_style="cyan"
144
  ))
145
 
146
+ # We no longer have significant_lines in the new format
 
 
 
 
 
 
 
 
 
 
 
 
 
147
 
148
  # Conclusion
149
  conclusion = analysis.get("conclusion", "No conclusion available")
150
  console.print("\n[heading]Conclusion[/]")
151
+ console.print(Panel(conclusion, border_style="magenta"))
152
 
153
  # Export the rich text as a string
154
  return console.export_text()
 
211
  formatted_text.append(section_analysis)
212
  formatted_text.append("")
213
 
214
+ # We no longer have significant_lines in the new format
 
 
 
 
 
 
 
 
 
 
 
 
 
215
 
216
  # Conclusion
217
  conclusion = analysis.get("conclusion", "No conclusion available")