Spaces:
Running
Running
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,803 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import json
|
3 |
+
import re
|
4 |
+
import gradio as gr
|
5 |
+
import asyncio
|
6 |
+
import logging
|
7 |
+
import subprocess
|
8 |
+
from serpapi import GoogleSearch
|
9 |
+
from pydantic import BaseModel
|
10 |
+
from autogen_agentchat.agents import AssistantAgent
|
11 |
+
from autogen_agentchat.conditions import HandoffTermination, TextMentionTermination
|
12 |
+
from autogen_agentchat.teams import Swarm
|
13 |
+
from autogen_agentchat.ui import Console
|
14 |
+
from autogen_agentchat.messages import TextMessage, HandoffMessage, StructuredMessage
|
15 |
+
from autogen_ext.models.anthropic import AnthropicChatCompletionClient
|
16 |
+
from autogen_ext.models.openai import OpenAIChatCompletionClient
|
17 |
+
from autogen_ext.models.ollama import OllamaChatCompletionClient
|
18 |
+
from markdown_pdf import MarkdownPdf, Section
|
19 |
+
import traceback
|
20 |
+
import soundfile as sf
|
21 |
+
import tempfile
|
22 |
+
from pydub import AudioSegment
|
23 |
+
|
24 |
+
# Set up logging
|
25 |
+
logging.basicConfig(
|
26 |
+
level=logging.DEBUG,
|
27 |
+
format="%(asctime)s - %(levelname)s - %(message)s",
|
28 |
+
handlers=[
|
29 |
+
logging.FileHandler("lecture_generation.log"),
|
30 |
+
logging.StreamHandler()
|
31 |
+
]
|
32 |
+
)
|
33 |
+
logger = logging.getLogger(__name__)
|
34 |
+
|
35 |
+
# Set up environment
|
36 |
+
OUTPUT_DIR = "outputs"
|
37 |
+
os.makedirs(OUTPUT_DIR, exist_ok=True)
|
38 |
+
XTTS_MODEL_DIR = "XTTS-v2"
|
39 |
+
|
40 |
+
# Define Pydantic model for slide data
|
41 |
+
class Slide(BaseModel):
|
42 |
+
title: str
|
43 |
+
content: str
|
44 |
+
|
45 |
+
class SlidesOutput(BaseModel):
|
46 |
+
slides: list[Slide]
|
47 |
+
|
48 |
+
# Define search_web tool using SerpApi
|
49 |
+
def search_web(query: str, serpapi_key: str) -> str:
|
50 |
+
try:
|
51 |
+
params = {
|
52 |
+
"q": query,
|
53 |
+
"engine": "google",
|
54 |
+
"api_key": serpapi_key,
|
55 |
+
"num": 5
|
56 |
+
}
|
57 |
+
search = GoogleSearch(params)
|
58 |
+
results = search.get_dict()
|
59 |
+
|
60 |
+
if "error" in results:
|
61 |
+
logger.error("SerpApi error: %s", results["error"])
|
62 |
+
return f"Error during search: {results['error']}"
|
63 |
+
|
64 |
+
if "organic_results" not in results or not results["organic_results"]:
|
65 |
+
logger.info("No search results found for query: %s", query)
|
66 |
+
return f"No results found for query: {query}"
|
67 |
+
|
68 |
+
formatted_results = []
|
69 |
+
for item in results["organic_results"][:5]:
|
70 |
+
title = item.get("title", "No title")
|
71 |
+
snippet = item.get("snippet", "No snippet")
|
72 |
+
link = item.get("link", "No link")
|
73 |
+
formatted_results.append(f"Title: {title}\nSnippet: {snippet}\nLink: {link}\n")
|
74 |
+
|
75 |
+
formatted_output = "\n".join(formatted_results)
|
76 |
+
logger.info("Successfully retrieved search results for query: %s", query)
|
77 |
+
return f"Search results for {query}:\n{formatted_output}"
|
78 |
+
|
79 |
+
except Exception as e:
|
80 |
+
logger.error("Unexpected error during search: %s", str(e))
|
81 |
+
return f"Unexpected error during search: {str(e)}"
|
82 |
+
|
83 |
+
# Define helper function for progress HTML
|
84 |
+
def html_with_progress(label, progress):
|
85 |
+
return f"""
|
86 |
+
<div style="display: flex; flex-direction: column; justify-content: center; align-items: center; height: 100%; min-height: 700px; padding: 20px; text-align: center; border: 1px solid #ddd; border-radius: 8px;">
|
87 |
+
<div style="width: 100%; background-color: #FFFFFF; border-radius: 10px; overflow: hidden; margin-bottom: 20px;">
|
88 |
+
<div style="width: {progress}%; height: 30px; background-color: #4CAF50; border-radius: 10px;"></div>
|
89 |
+
</div>
|
90 |
+
<h2 style="font-style: italic; color: #555;">{label}</h2>
|
91 |
+
</div>
|
92 |
+
"""
|
93 |
+
|
94 |
+
# Function to get model client based on selected service
|
95 |
+
def get_model_client(service, api_key):
|
96 |
+
if service == "OpenAI-gpt-4o-2024-08-06":
|
97 |
+
return OpenAIChatCompletionClient(model="gpt-4o-2024-08-06", api_key=api_key)
|
98 |
+
elif service == "Anthropic-claude-3-sonnet-20240229":
|
99 |
+
return AnthropicChatCompletionClient(model="claude-3-sonnet-20240229", api_key=api_key)
|
100 |
+
elif service == "Google-gemini-1.5-flash":
|
101 |
+
return OpenAIChatCompletionClient(model="gemini-1.5-flash", api_key=api_key)
|
102 |
+
elif service == "Ollama-llama3.2":
|
103 |
+
return OllamaChatCompletionClient(model="llama3.2")
|
104 |
+
else:
|
105 |
+
raise ValueError("Invalid service")
|
106 |
+
|
107 |
+
# Helper function to clean script text
|
108 |
+
def clean_script_text(script):
|
109 |
+
if not script or not isinstance(script, str):
|
110 |
+
logger.error("Invalid script input: %s", script)
|
111 |
+
return None
|
112 |
+
script = re.sub(r"\*\*Slide \d+:.*?\*\*", "", script)
|
113 |
+
script = re.sub(r"\*\*|\*|\s*-\s*", "", script)
|
114 |
+
script = re.sub(r"\[.*?\]", "", script)
|
115 |
+
script = re.sub(r"Title:.*?\n|Content:.*?\n", "", script)
|
116 |
+
script = re.sub(r"-", " ", script)
|
117 |
+
script = script.replace("humanlike", "human like").replace("problemsolving", "problem solving")
|
118 |
+
script = re.sub(r"\s+", " ", script).strip()
|
119 |
+
script = re.sub(r"[^\w\s.,!?']", "", script)
|
120 |
+
if len(script) < 10 or len(script) > 500:
|
121 |
+
logger.error("Cleaned script length invalid (%d characters): %s", len(script), script)
|
122 |
+
return None
|
123 |
+
sentences = re.split(r"[.!?]+", script)
|
124 |
+
sentences = [s.strip() for s in sentences if s.strip()]
|
125 |
+
if len(sentences) < 1 or len(sentences) > 5:
|
126 |
+
logger.error("Cleaned script has invalid sentence count (%d): %s", len(sentences), script)
|
127 |
+
return None
|
128 |
+
return script
|
129 |
+
|
130 |
+
# Helper function to validate and convert speaker audio (MP3 or WAV)
|
131 |
+
async def validate_and_convert_speaker_audio(speaker_audio):
|
132 |
+
if not os.path.exists(speaker_audio):
|
133 |
+
logger.error("Speaker audio file does not exist: %s", speaker_audio)
|
134 |
+
return None
|
135 |
+
|
136 |
+
try:
|
137 |
+
# Check file extension
|
138 |
+
ext = os.path.splitext(speaker_audio)[1].lower()
|
139 |
+
if ext == ".mp3":
|
140 |
+
logger.info("Converting MP3 to WAV: %s", speaker_audio)
|
141 |
+
audio = AudioSegment.from_mp3(speaker_audio)
|
142 |
+
# Convert to mono, 22050 Hz
|
143 |
+
audio = audio.set_channels(1).set_frame_rate(22050)
|
144 |
+
with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as temp_file:
|
145 |
+
audio.export(temp_file.name, format="wav")
|
146 |
+
speaker_wav = temp_file.name
|
147 |
+
elif ext == ".wav":
|
148 |
+
speaker_wav = speaker_audio
|
149 |
+
else:
|
150 |
+
logger.error("Unsupported audio format: %s", ext)
|
151 |
+
return None
|
152 |
+
|
153 |
+
# Validate WAV file
|
154 |
+
data, samplerate = sf.read(speaker_wav)
|
155 |
+
if samplerate < 16000 or samplerate > 48000:
|
156 |
+
logger.error("Invalid sample rate for %s: %d Hz", speaker_wav, samplerate)
|
157 |
+
return None
|
158 |
+
if len(data) < 16000:
|
159 |
+
logger.error("Speaker audio too short: %d frames", len(data))
|
160 |
+
return None
|
161 |
+
if data.ndim == 2:
|
162 |
+
logger.info("Converting stereo WAV to mono: %s", speaker_wav)
|
163 |
+
data = data.mean(axis=1)
|
164 |
+
with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as temp_file:
|
165 |
+
sf.write(temp_file.name, data, samplerate)
|
166 |
+
speaker_wav = temp_file.name
|
167 |
+
|
168 |
+
logger.info("Validated speaker audio: %s", speaker_wav)
|
169 |
+
return speaker_wav
|
170 |
+
|
171 |
+
except Exception as e:
|
172 |
+
logger.error("Failed to validate or convert speaker audio %s: %s", speaker_audio, str(e))
|
173 |
+
return None
|
174 |
+
|
175 |
+
# Helper function to generate audio using XTTS-v2 CLI
|
176 |
+
def generate_xtts_audio(text, speaker_wav, output_path):
|
177 |
+
try:
|
178 |
+
cmd = [
|
179 |
+
"tts",
|
180 |
+
"--model_name", "tts_models/multilingual/multi-dataset/xtts_v2",
|
181 |
+
"--encoder_path", os.path.join(XTTS_MODEL_DIR, "model_se.pth.tar"),
|
182 |
+
"--encoder_config", os.path.join(XTTS_MODEL_DIR, "config_se.json"),
|
183 |
+
"--speaker_wav", speaker_wav,
|
184 |
+
"--text", text,
|
185 |
+
"--out_path", output_path,
|
186 |
+
"--language_idx", "en"
|
187 |
+
]
|
188 |
+
logger.debug("Executing tts command: %s", " ".join(cmd))
|
189 |
+
result = subprocess.run(
|
190 |
+
cmd,
|
191 |
+
capture_output=True,
|
192 |
+
text=True,
|
193 |
+
check=True
|
194 |
+
)
|
195 |
+
logger.info("tts command succeeded for %s: %s", output_path, result.stdout)
|
196 |
+
return True
|
197 |
+
except subprocess.CalledProcessError as e:
|
198 |
+
logger.error("tts command failed for %s: %s\n%s", output_path, e.stderr, e.stdout)
|
199 |
+
return False
|
200 |
+
except Exception as e:
|
201 |
+
logger.error("Unexpected error running tts for %s: %s", output_path, str(e))
|
202 |
+
return False
|
203 |
+
|
204 |
+
# Helper function to extract JSON from messages
|
205 |
+
def extract_json_from_message(message):
|
206 |
+
if isinstance(message, TextMessage):
|
207 |
+
content = message.content
|
208 |
+
logger.debug("Extracting JSON from TextMessage: %s", content)
|
209 |
+
if not isinstance(content, str):
|
210 |
+
logger.warning("TextMessage content is not a string: %s", content)
|
211 |
+
return None
|
212 |
+
# Try standard JSON block
|
213 |
+
pattern = r"```json\s*(.*?)\s*```"
|
214 |
+
match = re.search(pattern, content, re.DOTALL)
|
215 |
+
if match:
|
216 |
+
try:
|
217 |
+
return json.loads(match.group(1))
|
218 |
+
except json.JSONDecodeError as e:
|
219 |
+
logger.error("Failed to parse JSON from TextMessage: %s, Content: %s", e, content)
|
220 |
+
# Fallback: Try raw JSON array
|
221 |
+
json_pattern = r"\[\s*\{.*?\}\s*\]"
|
222 |
+
match = re.search(json_pattern, content, re.DOTALL)
|
223 |
+
if match:
|
224 |
+
try:
|
225 |
+
return json.loads(match.group(0))
|
226 |
+
except json.JSONDecodeError as e:
|
227 |
+
logger.error("Failed to parse fallback JSON from TextMessage: %s, Content: %s", e, content)
|
228 |
+
# Fallback: Try any JSON-like structure
|
229 |
+
try:
|
230 |
+
parsed = json.loads(content)
|
231 |
+
if isinstance(parsed, (list, dict)):
|
232 |
+
logger.info("Parsed JSON from raw content: %s", parsed)
|
233 |
+
return parsed
|
234 |
+
except json.JSONDecodeError:
|
235 |
+
pass
|
236 |
+
logger.warning("No JSON found in TextMessage content: %s", content)
|
237 |
+
return None
|
238 |
+
|
239 |
+
elif isinstance(message, StructuredMessage):
|
240 |
+
content = message.content
|
241 |
+
logger.debug("Extracting JSON from StructuredMessage: %s", content)
|
242 |
+
try:
|
243 |
+
if isinstance(content, BaseModel):
|
244 |
+
content_dict = content.dict()
|
245 |
+
return content_dict.get("slides", content_dict)
|
246 |
+
return content
|
247 |
+
except Exception as e:
|
248 |
+
logger.error("Failed to extract JSON from StructuredMessage: %s, Content: %s", e, content)
|
249 |
+
return None
|
250 |
+
|
251 |
+
elif isinstance(message, HandoffMessage):
|
252 |
+
logger.debug("Extracting JSON from HandoffMessage context")
|
253 |
+
for ctx_msg in message.context:
|
254 |
+
if hasattr(ctx_msg, "content"):
|
255 |
+
content = ctx_msg.content
|
256 |
+
logger.debug("Handoff context message content: %s", content)
|
257 |
+
if isinstance(content, str):
|
258 |
+
pattern = r"```json\s*(.*?)\s*```"
|
259 |
+
match = re.search(pattern, content, re.DOTALL)
|
260 |
+
if match:
|
261 |
+
try:
|
262 |
+
return json.loads(match.group(1))
|
263 |
+
except json.JSONDecodeError as e:
|
264 |
+
logger.error("Failed to parse JSON from HandoffMessage context: %s, Content: %s", e, content)
|
265 |
+
json_pattern = r"\[\s*\{.*?\}\s*\]"
|
266 |
+
match = re.search(json_pattern, content, re.DOTALL)
|
267 |
+
if match:
|
268 |
+
try:
|
269 |
+
return json.loads(match.group(0))
|
270 |
+
except json.JSONDecodeError as e:
|
271 |
+
logger.error("Failed to parse fallback JSON from HandoffMessage context: %s, Content: %s", e, content)
|
272 |
+
try:
|
273 |
+
parsed = json.loads(content)
|
274 |
+
if isinstance(parsed, (list, dict)):
|
275 |
+
logger.info("Parsed JSON from raw HandoffMessage context: %s", parsed)
|
276 |
+
return parsed
|
277 |
+
except json.JSONDecodeError:
|
278 |
+
pass
|
279 |
+
elif isinstance(content, dict):
|
280 |
+
return content.get("slides", content)
|
281 |
+
logger.warning("No JSON found in HandoffMessage context")
|
282 |
+
return None
|
283 |
+
|
284 |
+
logger.warning("Unsupported message type for JSON extraction: %s", type(message))
|
285 |
+
return None
|
286 |
+
|
287 |
+
# Function to generate Markdown and convert to PDF (landscape, centered)
|
288 |
+
def generate_slides_pdf(slides):
|
289 |
+
pdf = MarkdownPdf(page_size="A4", orientation="landscape")
|
290 |
+
for slide in slides:
|
291 |
+
content_lines = slide['content'].replace('\n', '\n\n')
|
292 |
+
markdown_content = f"""
|
293 |
+
<div style="display: flex; flex-direction: column; justify-content: center; align-items: center; height: 100%; text-align: center; padding: 20px;">
|
294 |
+
# {slide['title']}
|
295 |
+
|
296 |
+
*Prof. AI Feynman*
|
297 |
+
*Princeton University, April 26th, 2025*
|
298 |
+
|
299 |
+
{content_lines}
|
300 |
+
</div>
|
301 |
+
|
302 |
+
---
|
303 |
+
"""
|
304 |
+
pdf.add_section(Section(markdown_content, toc=False))
|
305 |
+
|
306 |
+
pdf_file = os.path.join(OUTPUT_DIR, "slides.pdf")
|
307 |
+
pdf.save(pdf_file)
|
308 |
+
|
309 |
+
logger.info("Generated PDF slides (landscape): %s", pdf_file)
|
310 |
+
return pdf_file
|
311 |
+
|
312 |
+
# Async function to update audio preview
|
313 |
+
async def update_audio_preview(audio_file):
|
314 |
+
if audio_file:
|
315 |
+
logger.info("Updating audio preview for file: %s", audio_file)
|
316 |
+
return audio_file
|
317 |
+
return None
|
318 |
+
|
319 |
+
# Async function to generate lecture materials and audio
|
320 |
+
async def on_generate(api_service, api_key, serpapi_key, title, topic, instructions, lecture_type, speaker_audio, num_slides):
|
321 |
+
if not serpapi_key:
|
322 |
+
yield f"""
|
323 |
+
<div style="display: flex; flex-direction: column; justify-content: center; align-items: center; height: 100%; min-height: 700px; padding: 20px; text-align: center; border: 1px solid #ddd; border-radius: 8px;">
|
324 |
+
<h2 style="color: #d9534f;">SerpApi key required</h2>
|
325 |
+
<p style="margin-top: 20px;">Please provide a valid SerpApi key and try again.</p>
|
326 |
+
</div>
|
327 |
+
"""
|
328 |
+
return
|
329 |
+
|
330 |
+
model_client = get_model_client(api_service, api_key)
|
331 |
+
|
332 |
+
research_agent = AssistantAgent(
|
333 |
+
name="research_agent",
|
334 |
+
model_client=model_client,
|
335 |
+
handoffs=["slide_agent"],
|
336 |
+
system_message="You are a Research Agent. Use the search_web tool to gather information on the topic and keywords from the initial message. Summarize the findings concisely in a single message, then use the handoff_to_slide_agent tool to pass the task to the Slide Agent. Do not produce any other output.",
|
337 |
+
tools=[lambda query: search_web(query, serpapi_key)]
|
338 |
+
)
|
339 |
+
slide_agent = AssistantAgent(
|
340 |
+
name="slide_agent",
|
341 |
+
model_client=model_client,
|
342 |
+
handoffs=["script_agent"],
|
343 |
+
system_message=f"""
|
344 |
+
You are a Slide Agent. Using the research from the conversation history and the number of content slides ({num_slides}) specified in the initial message, generate exactly {num_slides} content slides, then add a quiz slide, an assignment slide, and a thank-you slide. Output ONLY a JSON array wrapped in ```json ... ``` in a TextMessage, where each slide is an object with 'title' and 'content' keys. Do not include any explanatory text, comments, or other messages. Ensure the JSON is valid and complete before proceeding. After outputting the JSON, use the handoff_to_script_agent tool to pass the task to the Script Agent.
|
345 |
+
Example output:
|
346 |
+
```json
|
347 |
+
[
|
348 |
+
{{"title": "Slide 1", "content": "Content for slide 1"}},
|
349 |
+
{{"title": "Quiz", "content": "Quiz questions"}},
|
350 |
+
{{"title": "Assignment", "content": "Assignment details"}},
|
351 |
+
{{"title": "Thank You", "content": "Thank you message"}}
|
352 |
+
]
|
353 |
+
```""",
|
354 |
+
output_content_type=None,
|
355 |
+
reflect_on_tool_use=False
|
356 |
+
)
|
357 |
+
script_agent = AssistantAgent(
|
358 |
+
name="script_agent",
|
359 |
+
model_client=model_client,
|
360 |
+
handoffs=["feynman_agent"],
|
361 |
+
system_message=f"""
|
362 |
+
You are a Script Agent. Access the JSON array of {num_slides + 3} slides from the conversation history. Generate a narration script (1-2 sentences) for each slide, summarizing its content. Output ONLY a JSON array wrapped in ```json ... ``` with exactly {num_slides + 3} strings, one script per slide, in the same order. Ensure the JSON is valid and complete. After outputting, use the handoff_to_feynman_agent tool. If scripts cannot be generated, retry once.
|
363 |
+
Example for 1 content slide:
|
364 |
+
```json
|
365 |
+
[
|
366 |
+
"This slide covers the main topic.",
|
367 |
+
"Answer these quiz questions.",
|
368 |
+
"Complete this assignment.",
|
369 |
+
"Thank you for attending."
|
370 |
+
]
|
371 |
+
```""",
|
372 |
+
output_content_type=None,
|
373 |
+
reflect_on_tool_use=False
|
374 |
+
)
|
375 |
+
feynman_agent = AssistantAgent(
|
376 |
+
name="feynman_agent",
|
377 |
+
model_client=model_client,
|
378 |
+
handoffs=[],
|
379 |
+
system_message="You are Agent Feynman. Review the slides and scripts from the conversation history to ensure coherence and completeness. Output a confirmation message summarizing the number of slides and scripts received. If slides or scripts are missing or invalid, report the issue clearly. Use 'TERMINATE' to signal completion. Example: 'Received 5 slides and 5 scripts. Lecture is coherent. TERMINATE'"
|
380 |
+
)
|
381 |
+
|
382 |
+
swarm = Swarm(
|
383 |
+
participants=[research_agent, slide_agent, script_agent, feynman_agent],
|
384 |
+
termination_condition=HandoffTermination(target="user") | TextMentionTermination("TERMINATE")
|
385 |
+
)
|
386 |
+
|
387 |
+
progress = 0
|
388 |
+
label = "Research: in progress..."
|
389 |
+
yield html_with_progress(label, progress)
|
390 |
+
await asyncio.sleep(0.1)
|
391 |
+
|
392 |
+
initial_message = f"""
|
393 |
+
Lecture Title: {title}
|
394 |
+
Topic: {topic}
|
395 |
+
Additional Instructions: {instructions}
|
396 |
+
Audience: {lecture_type}
|
397 |
+
Number of Content Slides: {num_slides}
|
398 |
+
Please start by researching the topic.
|
399 |
+
"""
|
400 |
+
logger.info("Starting lecture generation for topic: %s", topic)
|
401 |
+
|
402 |
+
slides = None
|
403 |
+
scripts = None
|
404 |
+
error_html = """
|
405 |
+
<div style="display: flex; flex-direction: column; justify-content: center; align-items: center; height: 100%; min-height: 700px; padding: 20px; text-align: center; border: 1px solid #ddd; border-radius: 8px;">
|
406 |
+
<h2 style="color: #d9534f;">Failed to generate lecture materials</h2>
|
407 |
+
<p style="margin-top: 20px;">Please try again with different parameters or a different model.</p>
|
408 |
+
</div>
|
409 |
+
"""
|
410 |
+
|
411 |
+
try:
|
412 |
+
logger.info("Research Agent starting...")
|
413 |
+
task_result = await Console(swarm.run_stream(task=initial_message))
|
414 |
+
logger.info("Swarm execution completed")
|
415 |
+
|
416 |
+
script_retry_count = 0
|
417 |
+
max_script_retries = 2
|
418 |
+
|
419 |
+
for message in task_result.messages:
|
420 |
+
source = getattr(message, 'source', getattr(message, 'sender', None))
|
421 |
+
logger.debug("Processing message from %s, type: %s, content: %s", source, type(message), message.to_text() if hasattr(message, 'to_text') else str(message))
|
422 |
+
|
423 |
+
if isinstance(message, HandoffMessage):
|
424 |
+
logger.info("Handoff from %s to %s, Context: %s", source, message.target, message.context)
|
425 |
+
if source == "research_agent" and message.target == "slide_agent":
|
426 |
+
progress = 25
|
427 |
+
label = "Slides: generating..."
|
428 |
+
yield html_with_progress(label, progress)
|
429 |
+
await asyncio.sleep(0.1)
|
430 |
+
elif source == "slide_agent" and message.target == "script_agent":
|
431 |
+
if slides is None:
|
432 |
+
logger.warning("Slide Agent handoff without slides JSON")
|
433 |
+
extracted_json = extract_json_from_message(message)
|
434 |
+
if extracted_json:
|
435 |
+
slides = extracted_json
|
436 |
+
logger.info("Extracted slides JSON from HandoffMessage context: %s", slides)
|
437 |
+
if slides is None:
|
438 |
+
label = "Slides: failed to generate..."
|
439 |
+
yield html_with_progress(label, progress)
|
440 |
+
await asyncio.sleep(0.1)
|
441 |
+
progress = 50
|
442 |
+
label = "Scripts: generating..."
|
443 |
+
yield html_with_progress(label, progress)
|
444 |
+
await asyncio.sleep(0.1)
|
445 |
+
elif source == "script_agent" and message.target == "feynman_agent":
|
446 |
+
if scripts is None:
|
447 |
+
logger.warning("Script Agent handoff without scripts JSON")
|
448 |
+
extracted_json = extract_json_from_message(message)
|
449 |
+
if extracted_json:
|
450 |
+
scripts = extracted_json
|
451 |
+
logger.info("Extracted scripts JSON from HandoffMessage context: %s", scripts)
|
452 |
+
progress = 75
|
453 |
+
label = "Review: in progress..."
|
454 |
+
yield html_with_progress(label, progress)
|
455 |
+
await asyncio.sleep(0.1)
|
456 |
+
|
457 |
+
elif source == "research_agent" and isinstance(message, TextMessage) and "handoff_to_slide_agent" in message.content:
|
458 |
+
logger.info("Research Agent completed research")
|
459 |
+
progress = 25
|
460 |
+
label = "Slides: generating..."
|
461 |
+
yield html_with_progress(label, progress)
|
462 |
+
await asyncio.sleep(0.1)
|
463 |
+
|
464 |
+
elif source == "slide_agent" and isinstance(message, (TextMessage, StructuredMessage)):
|
465 |
+
logger.debug("Slide Agent message received: %s", message.to_text())
|
466 |
+
extracted_json = extract_json_from_message(message)
|
467 |
+
if extracted_json:
|
468 |
+
slides = extracted_json
|
469 |
+
logger.info("Slide Agent generated %d slides: %s", len(slides), slides)
|
470 |
+
# Save slide content to individual files
|
471 |
+
for i, slide in enumerate(slides):
|
472 |
+
content_file = os.path.join(OUTPUT_DIR, f"slide_{i+1}_content.txt")
|
473 |
+
try:
|
474 |
+
with open(content_file, "w", encoding="utf-8") as f:
|
475 |
+
f.write(slide["content"])
|
476 |
+
logger.info("Saved slide content to %s: %s", content_file, slide["content"])
|
477 |
+
except Exception as e:
|
478 |
+
logger.error("Error saving slide content to %s: %s", content_file, str(e))
|
479 |
+
progress = 50
|
480 |
+
label = "Scripts: generating..."
|
481 |
+
yield html_with_progress(label, progress)
|
482 |
+
await asyncio.sleep(0.1)
|
483 |
+
else:
|
484 |
+
logger.warning("No JSON extracted from slide_agent message: %s", message.to_text())
|
485 |
+
|
486 |
+
elif source == "script_agent" and isinstance(message, (TextMessage, StructuredMessage)):
|
487 |
+
logger.debug("Script Agent message received: %s", message.to_text())
|
488 |
+
extracted_json = extract_json_from_message(message)
|
489 |
+
if extracted_json:
|
490 |
+
scripts = extracted_json
|
491 |
+
logger.info("Script Agent generated scripts for %d slides: %s", len(scripts), scripts)
|
492 |
+
# Save raw scripts to individual files
|
493 |
+
for i, script in enumerate(scripts):
|
494 |
+
script_file = os.path.join(OUTPUT_DIR, f"slide_{i+1}_raw_script.txt")
|
495 |
+
try:
|
496 |
+
with open(script_file, "w", encoding="utf-8") as f:
|
497 |
+
f.write(script)
|
498 |
+
logger.info("Saved raw script to %s: %s", script_file, script)
|
499 |
+
except Exception as e:
|
500 |
+
logger.error("Error saving raw script to %s: %s", script_file, str(e))
|
501 |
+
progress = 75
|
502 |
+
label = "Scripts generated and saved. Reviewing..."
|
503 |
+
yield html_with_progress(label, progress)
|
504 |
+
await asyncio.sleep(0.1)
|
505 |
+
else:
|
506 |
+
logger.warning("No JSON extracted from script_agent message: %s", message.to_text())
|
507 |
+
if script_retry_count < max_script_retries:
|
508 |
+
script_retry_count += 1
|
509 |
+
logger.info("Retrying script generation (attempt %d/%d)", script_retry_count, max_script_retries)
|
510 |
+
# Re-prompt script agent
|
511 |
+
retry_message = TextMessage(
|
512 |
+
content="Please generate scripts for the slides as per your instructions.",
|
513 |
+
source="user",
|
514 |
+
recipient="script_agent"
|
515 |
+
)
|
516 |
+
task_result.messages.append(retry_message)
|
517 |
+
continue
|
518 |
+
|
519 |
+
elif source == "feynman_agent" and isinstance(message, TextMessage) and "TERMINATE" in message.content:
|
520 |
+
logger.info("Feynman Agent completed lecture review: %s", message.content)
|
521 |
+
progress = 100
|
522 |
+
label = "Lecture materials ready. Generating audio..."
|
523 |
+
yield html_with_progress(label, progress)
|
524 |
+
await asyncio.sleep(0.1)
|
525 |
+
|
526 |
+
logger.info("Slides state: %s", "Generated" if slides else "None")
|
527 |
+
logger.info("Scripts state: %s", "Generated" if scripts else "None")
|
528 |
+
if not slides or not scripts:
|
529 |
+
error_message = f"Failed to generate {'slides and scripts' if not slides and not scripts else 'slides' if not slides else 'scripts'}"
|
530 |
+
error_message += f". Received {len(slides) if slides else 0} slides and {len(scripts) if scripts else 0} scripts."
|
531 |
+
logger.error("%s", error_message)
|
532 |
+
logger.debug("Dumping all messages for debugging:")
|
533 |
+
for msg in task_result.messages:
|
534 |
+
source = getattr(msg, 'source', getattr(msg, 'sender', None))
|
535 |
+
logger.debug("Message from %s, type: %s, content: %s", source, type(msg), msg.to_text() if hasattr(msg, 'to_text') else str(msg))
|
536 |
+
yield f"""
|
537 |
+
<div style="display: flex; flex-direction: column; justify-content: center; align-items: center; height: 100%; min-height: 700px; padding: 20px; text-align: center; border: 1px solid #ddd; border-radius: 8px;">
|
538 |
+
<h2 style="color: #d9534f;">{error_message}</h2>
|
539 |
+
<p style="margin-top: 20px;">Please try again with a different model (e.g., Anthropic-claude-3-sonnet-20240229) or simplify the topic/instructions.</p>
|
540 |
+
</div>
|
541 |
+
"""
|
542 |
+
return
|
543 |
+
|
544 |
+
if not isinstance(scripts, list) or not all(isinstance(s, str) for s in scripts):
|
545 |
+
logger.error("Scripts are not a list of strings: %s", scripts)
|
546 |
+
yield f"""
|
547 |
+
<div style="display: flex; flex-direction: column; justify-content: center; align-items: center; height: 100%; min-height: 700px; padding: 20px; text-align: center; border: 1px solid #ddd; border-radius: 8px;">
|
548 |
+
<h2 style="color: #d9534f;">Invalid script format</h2>
|
549 |
+
<p style="margin-top: 20px;">Scripts must be a list of strings. Please try again.</p>
|
550 |
+
</div>
|
551 |
+
"""
|
552 |
+
return
|
553 |
+
|
554 |
+
if len(slides) != len(scripts):
|
555 |
+
logger.error("Mismatch between number of slides (%d) and scripts (%d)", len(slides), len(scripts))
|
556 |
+
yield f"""
|
557 |
+
<div style="display: flex; flex-direction: column; justify-content: center; align-items: center; height: 100%; min-height: 700px; padding: 20px; text-align: center; border: 1px solid #ddd; border-radius: 8px;">
|
558 |
+
<h2 style="color: #d9534f;">Mismatch in slides and scripts</h2>
|
559 |
+
<p style="margin-top: 20px;">Generated {len(slides)} slides but {len(scripts)} scripts. Please try again.</p>
|
560 |
+
</div>
|
561 |
+
"""
|
562 |
+
return
|
563 |
+
|
564 |
+
# Generate PDF from slides
|
565 |
+
pdf_file = generate_slides_pdf(slides)
|
566 |
+
pdf_path = f"/{pdf_file}"
|
567 |
+
|
568 |
+
audio_files = []
|
569 |
+
speaker_audio = speaker_audio if speaker_audio else "feynman.mp3"
|
570 |
+
validated_speaker_wav = await validate_and_convert_speaker_audio(speaker_audio)
|
571 |
+
if not validated_speaker_wav:
|
572 |
+
logger.error("Invalid speaker audio after conversion, skipping TTS")
|
573 |
+
yield f"""
|
574 |
+
<div style="display: flex; flex-direction: column; justify-content: center; align-items: center; height: 100%; min-height: 700px; padding: 20px; text-align: center; border: 1px solid #ddd; border-radius: 8px;">
|
575 |
+
<h2 style="color: #d9534f;">Invalid speaker audio</h2>
|
576 |
+
<p style="margin-top: 20px;">Please upload a valid MP3 or WAV audio file and try again.</p>
|
577 |
+
</div>
|
578 |
+
"""
|
579 |
+
return
|
580 |
+
|
581 |
+
# Verify XTTS-v2 model files
|
582 |
+
required_files = [
|
583 |
+
os.path.join(XTTS_MODEL_DIR, "model_se.pth.tar"),
|
584 |
+
os.path.join(XTTS_MODEL_DIR, "config_se.json")
|
585 |
+
]
|
586 |
+
for f in required_files:
|
587 |
+
if not os.path.exists(f):
|
588 |
+
logger.error("Missing XTTS-v2 model file: %s", f)
|
589 |
+
yield f"""
|
590 |
+
<div style="display: flex; flex-direction: column; justify-content: center; align-items: center; height: 100%; min-height: 700px; padding: 20px; text-align: center; border: 1px solid #ddd; border-radius: 8px;">
|
591 |
+
<h2 style="color: #d9534f;">Missing XTTS-v2 model files</h2>
|
592 |
+
<p style="margin-top: 20px;">Please ensure XTTS-v2 is downloaded to {XTTS_MODEL_DIR} and try again.</p>
|
593 |
+
</div>
|
594 |
+
"""
|
595 |
+
return
|
596 |
+
|
597 |
+
# Process audio generation sequentially with retries
|
598 |
+
for i, script in enumerate(scripts):
|
599 |
+
cleaned_script = clean_script_text(script)
|
600 |
+
audio_file = os.path.join(OUTPUT_DIR, f"slide_{i+1}.wav")
|
601 |
+
script_file = os.path.join(OUTPUT_DIR, f"slide_{i+1}_script.txt")
|
602 |
+
|
603 |
+
# Save cleaned script
|
604 |
+
try:
|
605 |
+
with open(script_file, "w", encoding="utf-8") as f:
|
606 |
+
f.write(cleaned_script or "")
|
607 |
+
logger.info("Saved cleaned script to %s: %s", script_file, cleaned_script)
|
608 |
+
except Exception as e:
|
609 |
+
logger.error("Error saving cleaned script to %s: %s", script_file, str(e))
|
610 |
+
|
611 |
+
if not cleaned_script:
|
612 |
+
logger.error("Skipping audio for slide %d due to empty or invalid script", i + 1)
|
613 |
+
audio_files.append(None)
|
614 |
+
continue
|
615 |
+
|
616 |
+
max_retries = 2
|
617 |
+
for attempt in range(max_retries + 1):
|
618 |
+
try:
|
619 |
+
current_text = cleaned_script
|
620 |
+
if attempt > 0:
|
621 |
+
sentences = re.split(r"[.!?]+", cleaned_script)
|
622 |
+
sentences = [s.strip() for s in sentences if s.strip()][:2]
|
623 |
+
current_text = ". ".join(sentences) + "."
|
624 |
+
logger.info("Retry %d for slide %d with simplified text: %s", attempt, i + 1, current_text)
|
625 |
+
|
626 |
+
success = generate_xtts_audio(current_text, validated_speaker_wav, audio_file)
|
627 |
+
if not success:
|
628 |
+
raise RuntimeError("tts command failed")
|
629 |
+
|
630 |
+
logger.info("Generated audio for slide %d: %s", i + 1, audio_file)
|
631 |
+
audio_files.append(audio_file)
|
632 |
+
yield f"""
|
633 |
+
<div style="display: flex; flex-direction: column; justify-content: center; align-items: center; height: 100%; min-height: 700px; padding: 20px; text-align: center; border: 1px solid #ddd; border-radius: 8px;">
|
634 |
+
<h2 style="font-style: italic; color: #555;">Generated audio for slide {i + 1}/{len(scripts)}...</h2>
|
635 |
+
</div>
|
636 |
+
"""
|
637 |
+
await asyncio.sleep(0.1)
|
638 |
+
break
|
639 |
+
except Exception as e:
|
640 |
+
logger.error("Error generating audio for slide %d (attempt %d): %s\n%s", i + 1, attempt, str(e), traceback.format_exc())
|
641 |
+
if attempt == max_retries:
|
642 |
+
logger.error("Max retries reached for slide %d, skipping", i + 1)
|
643 |
+
audio_files.append(None)
|
644 |
+
break
|
645 |
+
|
646 |
+
audio_files = [f"/{f}" if f else None for f in audio_files]
|
647 |
+
|
648 |
+
slides_info = json.dumps({"slides": [
|
649 |
+
{"title": slide["title"], "content": slide["content"]}
|
650 |
+
for slide in slides
|
651 |
+
], "audioFiles": audio_files})
|
652 |
+
|
653 |
+
html_output = f"""
|
654 |
+
<div id="lecture-container" style="height: 700px; border: 1px solid #ddd; border-radius: 8px; display: flex; flex-direction: column; justify-content: space-between;">
|
655 |
+
<div id="slide-content" style="flex: 1; overflow: hidden;">
|
656 |
+
<iframe id="pdf-viewer" src="https://mozilla.github.io/pdf.js/web/viewer.html?file={pdf_path}" style="width: 100%; height: 100%; border: none;"></iframe>
|
657 |
+
</div>
|
658 |
+
<div style="padding: 20px;">
|
659 |
+
<div id="progress-bar" style="width: 100%; height: 5px; background-color: #ddd; border-radius: 2px; margin-bottom: 10px;">
|
660 |
+
<div id="progress-fill" style="width: {(1/len(slides)*100)}%; height: 100%; background-color: #4CAF50; border-radius: 2px;"></div>
|
661 |
+
</div>
|
662 |
+
<div style="display: flex; justify-content: center; margin-bottom: 10px;">
|
663 |
+
<button onclick="prevSlide()" style="border-radius: 50%; width: 40px; height: 40px; margin: 0 5px; font-size: 1.2em; cursor: pointer;">⏮</button>
|
664 |
+
<button onclick="togglePlay()" style="border-radius: 50%; width: 40px; height: 40px; margin: 0 5px; font-size: 1.2em; cursor: pointer;">⏯</button>
|
665 |
+
<button onclick="nextSlide()" style="border-radius: 50%; width: 40px; height: 40px; margin: 0 5px; font-size: 1.2em; cursor: pointer;">⏭</button>
|
666 |
+
</div>
|
667 |
+
<p id="slide-counter" style="text-align: center;">Slide 1 of {len(slides)}</p>
|
668 |
+
</div>
|
669 |
+
</div>
|
670 |
+
<script>
|
671 |
+
const lectureData = {slides_info};
|
672 |
+
let currentSlide = 0;
|
673 |
+
const totalSlides = lectureData.slides.length;
|
674 |
+
const slideCounter = document.getElementById('slide-counter');
|
675 |
+
const progressFill = document.getElementById('progress-fill');
|
676 |
+
let audioElements = [];
|
677 |
+
let currentAudio = null;
|
678 |
+
const pdfViewer = document.getElementById('pdf-viewer');
|
679 |
+
|
680 |
+
for (let i = 0; i < totalSlides; i++) {{
|
681 |
+
if (lectureData.audioFiles && lectureData.audioFiles[i]) {{
|
682 |
+
const audio = new Audio(lectureData.audioFiles[i]);
|
683 |
+
audioElements.push(audio);
|
684 |
+
}} else {{
|
685 |
+
audioElements.push(null);
|
686 |
+
}}
|
687 |
+
}}
|
688 |
+
|
689 |
+
function updateSlide() {{
|
690 |
+
pdfViewer.src = `https://mozilla.github.io/pdf.js/web/viewer.html?file={pdf_path}#page=${{currentSlide + 1}}`;
|
691 |
+
slideCounter.textContent = `Slide ${{currentSlide + 1}} of ${{totalSlides}}`;
|
692 |
+
progressFill.style.width = `${{(currentSlide + 1) / totalSlides * 100}}%`;
|
693 |
+
|
694 |
+
if (currentAudio) {{
|
695 |
+
currentAudio.pause();
|
696 |
+
currentAudio.currentTime = 0;
|
697 |
+
}}
|
698 |
+
|
699 |
+
if (audioElements[currentSlide]) {{
|
700 |
+
currentAudio = audioElements[currentSlide];
|
701 |
+
currentAudio.play().catch(e => console.error('Audio play failed:', e));
|
702 |
+
}} else {{
|
703 |
+
currentAudio = null;
|
704 |
+
}}
|
705 |
+
}}
|
706 |
+
|
707 |
+
function prevSlide() {{
|
708 |
+
if (currentSlide > 0) {{
|
709 |
+
currentSlide--;
|
710 |
+
updateSlide();
|
711 |
+
}}
|
712 |
+
}}
|
713 |
+
|
714 |
+
function nextSlide() {{
|
715 |
+
if (currentSlide < totalSlides - 1) {{
|
716 |
+
currentSlide++;
|
717 |
+
updateSlide();
|
718 |
+
}}
|
719 |
+
}}
|
720 |
+
|
721 |
+
function togglePlay() {{
|
722 |
+
if (!audioElements[currentSlide]) return;
|
723 |
+
if (currentAudio.paused) {{
|
724 |
+
currentAudio.play().catch(e => console.error('Audio play failed:', e));
|
725 |
+
}} else {{
|
726 |
+
currentAudio.pause();
|
727 |
+
}}
|
728 |
+
}}
|
729 |
+
|
730 |
+
audioElements.forEach((audio, index) => {{
|
731 |
+
if (audio) {{
|
732 |
+
audio.addEventListener('ended', () => {{
|
733 |
+
if (index < totalSlides - 1) {{
|
734 |
+
nextSlide();
|
735 |
+
}}
|
736 |
+
}});
|
737 |
+
}}
|
738 |
+
}});
|
739 |
+
</script>
|
740 |
+
"""
|
741 |
+
logger.info("Lecture generation completed successfully")
|
742 |
+
yield html_output
|
743 |
+
|
744 |
+
except Exception as e:
|
745 |
+
logger.error("Error during lecture generation: %s\n%s", str(e), traceback.format_exc())
|
746 |
+
yield f"""
|
747 |
+
<div style="display: flex; flex-direction: column; justify-content: center; align-items: center; height: 100%; min-height: 700px; padding: 20px; text-align: center; border: 1px solid #ddd; border-radius: 8px;">
|
748 |
+
<h2 style="color: #d9534f;">Error during lecture generation</h2>
|
749 |
+
<p style="margin-top: 10px; font-size: 16px;">{str(e)}</p>
|
750 |
+
<p style="margin-top: 20px;">Please try again or adjust your inputs.</p>
|
751 |
+
</div>
|
752 |
+
"""
|
753 |
+
return
|
754 |
+
|
755 |
+
# Gradio interface
|
756 |
+
with gr.Blocks(title="Agent Feynman") as demo:
|
757 |
+
gr.Markdown("# <center>Learn Anything With Professor AI Feynman</center>")
|
758 |
+
with gr.Row():
|
759 |
+
with gr.Column(scale=1):
|
760 |
+
with gr.Group():
|
761 |
+
title = gr.Textbox(label="Lecture Title", placeholder="e.g. Introduction to AI")
|
762 |
+
topic = gr.Textbox(label="Topic", placeholder="e.g. Artificial Intelligence")
|
763 |
+
instructions = gr.Textbox(label="Additional Instructions", placeholder="e.g. Focus on recent advancements")
|
764 |
+
lecture_type = gr.Dropdown(["Conference", "University", "High school"], label="Audience", value="University")
|
765 |
+
api_service = gr.Dropdown(
|
766 |
+
choices=[
|
767 |
+
"OpenAI-gpt-4o-2024-08-06",
|
768 |
+
"Anthropic-claude-3-sonnet-20240229",
|
769 |
+
"Google-gemini-1.5-flash",
|
770 |
+
"Ollama-llama3.2"
|
771 |
+
],
|
772 |
+
label="Model",
|
773 |
+
value="Google-gemini-1.5-flash"
|
774 |
+
)
|
775 |
+
api_key = gr.Textbox(label="API Key", type="password", placeholder="Not required for Ollama")
|
776 |
+
serpapi_key = gr.Textbox(label="SerpApi Key", type="password", placeholder="Enter your SerpApi key")
|
777 |
+
num_slides = gr.Slider(1, 20, step=1, label="Number of Content Slides", value=3)
|
778 |
+
speaker_audio = gr.Audio(label="Speaker sample audio (MP3 or WAV)", type="filepath", elem_id="speaker-audio")
|
779 |
+
audio_preview = gr.Audio(label="Audio Preview", interactive=False)
|
780 |
+
generate_btn = gr.Button("Generate Lecture")
|
781 |
+
with gr.Column(scale=2):
|
782 |
+
default_slide_html = """
|
783 |
+
<div style="display: flex; flex-direction: column; justify-content: center; align-items: center; height: 100%; min-height: 700px; padding: 20px; text-align: center; border: 1px solid #ddd; border-radius: 8px;">
|
784 |
+
<h2 style="font-style: italic; color: #555;">Waiting for lecture content...</h2>
|
785 |
+
<p style="margin-top: 10px; font-size: 16px;">Please Generate lecture content via the form on the left first before lecture begins</p>
|
786 |
+
</div>
|
787 |
+
"""
|
788 |
+
slide_display = gr.HTML(label="Lecture Slides", value=default_slide_html)
|
789 |
+
|
790 |
+
speaker_audio.change(
|
791 |
+
fn=update_audio_preview,
|
792 |
+
inputs=speaker_audio,
|
793 |
+
outputs=audio_preview
|
794 |
+
)
|
795 |
+
|
796 |
+
generate_btn.click(
|
797 |
+
fn=on_generate,
|
798 |
+
inputs=[api_service, api_key, serpapi_key, title, topic, instructions, lecture_type, speaker_audio, num_slides],
|
799 |
+
outputs=[slide_display]
|
800 |
+
)
|
801 |
+
|
802 |
+
if __name__ == "__main__":
|
803 |
+
demo.launch()
|