Jaward commited on
Commit
c8b75a2
·
verified ·
1 Parent(s): 452750b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +117 -90
app.py CHANGED
@@ -16,7 +16,6 @@ from autogen_agentchat.messages import TextMessage, HandoffMessage, StructuredMe
16
  from autogen_ext.models.anthropic import AnthropicChatCompletionClient
17
  from autogen_ext.models.openai import OpenAIChatCompletionClient
18
  from autogen_ext.models.ollama import OllamaChatCompletionClient
19
- from markdown_pdf import MarkdownPdf, Section
20
  import traceback
21
  import soundfile as sf
22
  import tempfile
@@ -35,7 +34,6 @@ logging.basicConfig(
35
  logger = logging.getLogger(__name__)
36
 
37
  # Set up environment
38
- # For Huggingface Spaces, use /tmp for temporary storage
39
  if os.path.exists("/tmp"):
40
  OUTPUT_DIR = "/tmp/outputs" # Use /tmp for Huggingface Spaces
41
  else:
@@ -117,25 +115,21 @@ def clean_script_text(script):
117
  logger.error("Invalid script input: %s", script)
118
  return None
119
 
120
- # Minimal cleaning to preserve natural language
121
- script = re.sub(r"\*\*Slide \d+:.*?\*\*", "", script) # Remove slide headers
122
- script = re.sub(r"\[.*?\]", "", script) # Remove bracketed content
123
- script = re.sub(r"Title:.*?\n|Content:.*?\n", "", script) # Remove metadata
124
  script = script.replace("humanlike", "human-like").replace("problemsolving", "problem-solving")
125
- script = re.sub(r"\s+", " ", script).strip() # Normalize whitespace
126
 
127
- # Convert bullet points to spoken cues
128
  script = re.sub(r"^\s*-\s*", "So, ", script, flags=re.MULTILINE)
129
 
130
- # Add non-verbal words randomly (e.g., "um," "you know," "like")
131
  non_verbal = ["um, ", "you know, ", "like, "]
132
  words = script.split()
133
  for i in range(len(words) - 1, -1, -1):
134
- if random.random() < 0.1: # 10% chance per word
135
  words.insert(i, random.choice(non_verbal))
136
  script = " ".join(words)
137
 
138
- # Basic validation
139
  if len(script) < 10:
140
  logger.error("Cleaned script too short (%d characters): %s", len(script), script)
141
  return None
@@ -143,7 +137,7 @@ def clean_script_text(script):
143
  logger.info("Cleaned and naturalized script: %s", script)
144
  return script
145
 
146
- # Helper function to validate and convert speaker audio (MP3 or WAV)
147
  async def validate_and_convert_speaker_audio(speaker_audio):
148
  if not speaker_audio or not os.path.exists(speaker_audio):
149
  logger.warning("Speaker audio file does not exist: %s. Using default voice.", speaker_audio)
@@ -155,12 +149,10 @@ async def validate_and_convert_speaker_audio(speaker_audio):
155
  return None
156
 
157
  try:
158
- # Check file extension
159
  ext = os.path.splitext(speaker_audio)[1].lower()
160
  if ext == ".mp3":
161
  logger.info("Converting MP3 to WAV: %s", speaker_audio)
162
  audio = AudioSegment.from_mp3(speaker_audio)
163
- # Convert to mono, 22050 Hz
164
  audio = audio.set_channels(1).set_frame_rate(22050)
165
  with tempfile.NamedTemporaryFile(suffix=".wav", delete=False, dir=OUTPUT_DIR) as temp_file:
166
  audio.export(temp_file.name, format="wav")
@@ -171,7 +163,6 @@ async def validate_and_convert_speaker_audio(speaker_audio):
171
  logger.error("Unsupported audio format: %s", ext)
172
  return None
173
 
174
- # Validate WAV file
175
  data, samplerate = sf.read(speaker_wav)
176
  if samplerate < 16000 or samplerate > 48000:
177
  logger.error("Invalid sample rate for %s: %d Hz", speaker_wav, samplerate)
@@ -215,7 +206,6 @@ def extract_json_from_message(message):
215
  logger.warning("TextMessage content is not a string: %s", content)
216
  return None
217
 
218
- # Try standard JSON block with triple backticks
219
  pattern = r"```json\s*(.*?)\s*```"
220
  match = re.search(pattern, content, re.DOTALL)
221
  if match:
@@ -226,10 +216,9 @@ def extract_json_from_message(message):
226
  except json.JSONDecodeError as e:
227
  logger.error("Failed to parse JSON from code block: %s", e)
228
 
229
- # Try to find arrays or objects
230
  json_patterns = [
231
- r"\[\s*\{.*?\}\s*\]", # Array of objects
232
- r"\{\s*\".*?\"\s*:.*?\}", # Object
233
  ]
234
 
235
  for pattern in json_patterns:
@@ -242,7 +231,6 @@ def extract_json_from_message(message):
242
  except json.JSONDecodeError as e:
243
  logger.error("Failed to parse JSON with pattern %s: %s", pattern, e)
244
 
245
- # Try to find JSON anywhere in the content
246
  try:
247
  for i in range(len(content)):
248
  for j in range(len(content), i, -1):
@@ -290,8 +278,8 @@ def extract_json_from_message(message):
290
  logger.error("Failed to parse JSON from HandoffMessage: %s", e)
291
 
292
  json_patterns = [
293
- r"\[\s*\{.*?\}\s*\]", # Array of objects
294
- r"\{\s*\".*?\"\s*:.*?\}", # Object
295
  ]
296
 
297
  for pattern in json_patterns:
@@ -310,26 +298,36 @@ def extract_json_from_message(message):
310
  logger.warning("Unsupported message type for JSON extraction: %s", type(message))
311
  return None
312
 
313
- # Function to generate HTML slides
314
- def generate_html_slides(slides, title):
315
  try:
316
- slides_html = ""
317
-
318
  for i, slide in enumerate(slides):
319
- content_lines = slide['content'].replace('\n', '<br>')
320
- slide_html = f"""
321
- <div id="slide-{i+1}" class="slide" style="display: none; height: 100%; padding: 20px; text-align: center;">
322
- <h1 style="margin-bottom: 10px;">{slide['title']}</h1>
323
- <h3 style="margin-bottom: 20px; font-style: italic;">Prof. AI Feynman<br>Princeton University, April 26th, 2025</h3>
324
- <div style="font-size: 1.2em; line-height: 1.6;">{content_lines}</div>
325
- </div>
 
 
 
 
326
  """
327
- slides_html += slide_html
 
 
 
 
 
 
 
328
 
329
- logger.info(f"Generated HTML slides for: {title}")
330
- return slides_html
331
  except Exception as e:
332
- logger.error(f"Failed to generate HTML slides: {str(e)}")
333
  logger.error(traceback.format_exc())
334
  return None
335
 
@@ -351,7 +349,6 @@ async def on_generate(api_service, api_key, serpapi_key, title, topic, instructi
351
  """
352
  return
353
 
354
- # Initialize TTS model
355
  tts = None
356
  try:
357
  device = "cuda" if torch.cuda.is_available() else "cpu"
@@ -535,7 +532,6 @@ Example: 'Received {total_slides} slides and {total_slides} scripts. Lecture is
535
  )
536
  task_result.messages.append(retry_message)
537
  continue
538
- # Save slide content to individual files
539
  for i, slide in enumerate(slides):
540
  content_file = os.path.join(OUTPUT_DIR, f"slide_{i+1}_content.txt")
541
  try:
@@ -567,7 +563,6 @@ Example: 'Received {total_slides} slides and {total_slides} scripts. Lecture is
567
  if extracted_json:
568
  scripts = extracted_json
569
  logger.info("Script Agent generated scripts for %d slides", len(scripts))
570
- # Save raw scripts to individual files
571
  for i, script in enumerate(scripts):
572
  script_file = os.path.join(OUTPUT_DIR, f"slide_{i+1}_raw_script.txt")
573
  try:
@@ -648,10 +643,9 @@ Example: 'Received {total_slides} slides and {total_slides} scripts. Lecture is
648
  """
649
  return
650
 
651
- # Generate HTML slides
652
- slides_html = generate_html_slides(slides, title)
653
- if not slides_html:
654
- logger.error("Failed to generate HTML slides")
655
  yield f"""
656
  <div style="display: flex; flex-direction: column; justify-content: center; align-items: center; height: 100%; min-height: 700px; padding: 20px; text-align: center; border: 1px solid #ddd; border-radius: 8px;">
657
  <h2 style="color: #d9534f;">Failed to generate slides</h2>
@@ -672,13 +666,11 @@ Example: 'Received {total_slides} slides and {total_slides} scripts. Lecture is
672
  """
673
  return
674
 
675
- # Process audio generation sequentially with retries
676
  for i, script in enumerate(scripts):
677
  cleaned_script = clean_script_text(script)
678
- audio_file = os.path.join(OUTPUT_DIR, f"slide_{i+1}.wav")
679
  script_file = os.path.join(OUTPUT_DIR, f"slide_{i+1}_script.txt")
680
 
681
- # Save cleaned script
682
  try:
683
  with open(script_file, "w", encoding="utf-8") as f:
684
  f.write(cleaned_script or "")
@@ -727,36 +719,51 @@ Example: 'Received {total_slides} slides and {total_slides} scripts. Lecture is
727
  await asyncio.sleep(0.1)
728
  break
729
 
730
- slides_info = json.dumps({"slides": [
731
- {"title": slide["title"], "content": slide["content"]}
732
- for slide in slides
733
- ], "audioFiles": audio_files})
 
 
 
 
 
 
 
 
 
 
 
 
 
734
 
735
  html_output = f"""
736
  <div id="lecture-container" style="height: 700px; border: 1px solid #ddd; border-radius: 8px; display: flex; flex-direction: column; justify-content: space-between;">
737
- <div id="slide-content" style="flex: 1; overflow: auto;">
738
- {slides_html}
739
  </div>
740
  <div style="padding: 20px;">
741
- <div id="progress-bar" style="width: 100%; height: 5px; background-color: #ddd; border-radius: 2px; margin-bottom: 10px;">
742
- <div id="progress-fill" style="width: {(1/len(slides)*100)}%; height: 100%; background-color: #4CAF50; border-radius: 2px;"></div>
743
  </div>
744
  <div style="display: flex; justify-content: center; margin-bottom: 10px;">
745
  <button onclick="prevSlide()" style="border-radius: 50%; width: 40px; height: 40px; margin: 0 5px; font-size: 1.2em; cursor: pointer;">⏮</button>
746
- <button onclick="togglePlay()" style="border-radius: 50%; width: 40px; height: 40px; margin: 0 5px; font-size: 1.2em; cursor: pointer;">⏯</button>
747
  <button onclick="nextSlide()" style="border-radius: 50%; width: 40px; height: 40px; margin: 0 5px; font-size: 1.2em; cursor: pointer;">⏭</button>
 
748
  </div>
749
- <p id="slide-counter" style="text-align: center;">Slide 1 of {len(slides)}</p>
 
 
 
750
  </div>
751
  </div>
752
  <script>
753
  const lectureData = {slides_info};
754
  let currentSlide = 0;
755
  const totalSlides = lectureData.slides.length;
756
- const slideCounter = document.getElementById('slide-counter');
757
- const progressFill = document.getElementById('progress-fill');
758
  let audioElements = [];
759
- let currentAudio = null;
760
 
761
  for (let i = 0; i < totalSlides; i++) {{
762
  if (lectureData.audioFiles && lectureData.audioFiles[i]) {{
@@ -767,24 +774,19 @@ Example: 'Received {total_slides} slides and {total_slides} scripts. Lecture is
767
  }}
768
  }}
769
 
770
- function updateSlide() {{
771
- for (let i = 1; i <= totalSlides; i++) {{
772
- document.getElementById(`slide-${{i}}`).style.display = (i - 1 === currentSlide) ? 'block' : 'none';
773
- }}
774
- slideCounter.textContent = `Slide ${{currentSlide + 1}} of ${{totalSlides}}`;
775
- progressFill.style.width = `${{(currentSlide + 1) / totalSlides * 100}}%`;
776
-
777
- if (currentAudio) {{
778
- currentAudio.pause();
779
- currentAudio.currentTime = 0;
780
- }}
781
 
782
- if (audioElements[currentSlide]) {{
783
- currentAudio = audioElements[currentSlide];
784
- currentAudio.play().catch(e => console.error('Audio play failed:', e));
785
- }} else {{
786
- currentAudio = null;
787
- }}
 
 
788
  }}
789
 
790
  function prevSlide() {{
@@ -801,27 +803,52 @@ Example: 'Received {total_slides} slides and {total_slides} scripts. Lecture is
801
  }}
802
  }}
803
 
804
- function togglePlay() {{
805
- if (!audioElements[currentSlide]) return;
806
- if (currentAudio.paused) {{
807
- currentAudio.play().catch(e => console.error('Audio play failed:', e));
808
- }} else {{
809
- currentAudio.pause();
 
810
  }}
 
 
 
 
 
811
  }}
812
 
813
- audioElements.forEach((audio, index) => {{
 
 
 
 
 
 
814
  if (audio) {{
815
- audio.addEventListener('ended', () => {{
816
- if (index < totalSlides - 1) {{
817
- nextSlide();
818
- }}
 
 
 
 
 
 
 
 
 
 
819
  }});
 
 
 
820
  }}
821
- }});
822
 
823
  // Initialize first slide
824
- updateSlide();
825
  </script>
826
  """
827
  logger.info("Lecture generation completed successfully")
 
16
  from autogen_ext.models.anthropic import AnthropicChatCompletionClient
17
  from autogen_ext.models.openai import OpenAIChatCompletionClient
18
  from autogen_ext.models.ollama import OllamaChatCompletionClient
 
19
  import traceback
20
  import soundfile as sf
21
  import tempfile
 
34
  logger = logging.getLogger(__name__)
35
 
36
  # Set up environment
 
37
  if os.path.exists("/tmp"):
38
  OUTPUT_DIR = "/tmp/outputs" # Use /tmp for Huggingface Spaces
39
  else:
 
115
  logger.error("Invalid script input: %s", script)
116
  return None
117
 
118
+ script = re.sub(r"\*\*Slide \d+:.*?\*\*", "", script)
119
+ script = re.sub(r"\[.*?\]", "", script)
120
+ script = re.sub(r"Title:.*?\n|Content:.*?\n", "", script)
 
121
  script = script.replace("humanlike", "human-like").replace("problemsolving", "problem-solving")
122
+ script = re.sub(r"\s+", " ", script).strip()
123
 
 
124
  script = re.sub(r"^\s*-\s*", "So, ", script, flags=re.MULTILINE)
125
 
 
126
  non_verbal = ["um, ", "you know, ", "like, "]
127
  words = script.split()
128
  for i in range(len(words) - 1, -1, -1):
129
+ if random.random() < 0.1:
130
  words.insert(i, random.choice(non_verbal))
131
  script = " ".join(words)
132
 
 
133
  if len(script) < 10:
134
  logger.error("Cleaned script too short (%d characters): %s", len(script), script)
135
  return None
 
137
  logger.info("Cleaned and naturalized script: %s", script)
138
  return script
139
 
140
+ # Helper function to validate and convert speaker audio
141
  async def validate_and_convert_speaker_audio(speaker_audio):
142
  if not speaker_audio or not os.path.exists(speaker_audio):
143
  logger.warning("Speaker audio file does not exist: %s. Using default voice.", speaker_audio)
 
149
  return None
150
 
151
  try:
 
152
  ext = os.path.splitext(speaker_audio)[1].lower()
153
  if ext == ".mp3":
154
  logger.info("Converting MP3 to WAV: %s", speaker_audio)
155
  audio = AudioSegment.from_mp3(speaker_audio)
 
156
  audio = audio.set_channels(1).set_frame_rate(22050)
157
  with tempfile.NamedTemporaryFile(suffix=".wav", delete=False, dir=OUTPUT_DIR) as temp_file:
158
  audio.export(temp_file.name, format="wav")
 
163
  logger.error("Unsupported audio format: %s", ext)
164
  return None
165
 
 
166
  data, samplerate = sf.read(speaker_wav)
167
  if samplerate < 16000 or samplerate > 48000:
168
  logger.error("Invalid sample rate for %s: %d Hz", speaker_wav, samplerate)
 
206
  logger.warning("TextMessage content is not a string: %s", content)
207
  return None
208
 
 
209
  pattern = r"```json\s*(.*?)\s*```"
210
  match = re.search(pattern, content, re.DOTALL)
211
  if match:
 
216
  except json.JSONDecodeError as e:
217
  logger.error("Failed to parse JSON from code block: %s", e)
218
 
 
219
  json_patterns = [
220
+ r"\[\s*\{.*?\}\s*\]",
221
+ r"\{\s*\".*?\"\s*:.*?\}",
222
  ]
223
 
224
  for pattern in json_patterns:
 
231
  except json.JSONDecodeError as e:
232
  logger.error("Failed to parse JSON with pattern %s: %s", pattern, e)
233
 
 
234
  try:
235
  for i in range(len(content)):
236
  for j in range(len(content), i, -1):
 
278
  logger.error("Failed to parse JSON from HandoffMessage: %s", e)
279
 
280
  json_patterns = [
281
+ r"\[\s*\{.*?\}\s*\]",
282
+ r"\{\s*\".*?\"\s*:.*?\}",
283
  ]
284
 
285
  for pattern in json_patterns:
 
298
  logger.warning("Unsupported message type for JSON extraction: %s", type(message))
299
  return None
300
 
301
+ # Function to generate Markdown slides
302
+ def generate_markdown_slides(slides, title, speaker="Prof. AI Feynman", date="April 26th, 2025"):
303
  try:
304
+ markdown_slides = []
 
305
  for i, slide in enumerate(slides):
306
+ slide_number = i + 1
307
+ content = slide['content']
308
+
309
+ # First and last slides have no header/footer
310
+ if i == 0 or i == len(slides) - 1:
311
+ slide_md = f"""
312
+ # {slide['title']}
313
+ {content}
314
+
315
+ **{speaker}**
316
+ *{date}*
317
  """
318
+ else:
319
+ slide_md = f"""
320
+ ##### Slide {slide_number}, {slide['title']}
321
+ {content}
322
+
323
+ , {title} {speaker}, {date}
324
+ """
325
+ markdown_slides.append(slide_md)
326
 
327
+ logger.info(f"Generated Markdown slides for: {title}")
328
+ return markdown_slides
329
  except Exception as e:
330
+ logger.error(f"Failed to generate Markdown slides: {str(e)}")
331
  logger.error(traceback.format_exc())
332
  return None
333
 
 
349
  """
350
  return
351
 
 
352
  tts = None
353
  try:
354
  device = "cuda" if torch.cuda.is_available() else "cpu"
 
532
  )
533
  task_result.messages.append(retry_message)
534
  continue
 
535
  for i, slide in enumerate(slides):
536
  content_file = os.path.join(OUTPUT_DIR, f"slide_{i+1}_content.txt")
537
  try:
 
563
  if extracted_json:
564
  scripts = extracted_json
565
  logger.info("Script Agent generated scripts for %d slides", len(scripts))
 
566
  for i, script in enumerate(scripts):
567
  script_file = os.path.join(OUTPUT_DIR, f"slide_{i+1}_raw_script.txt")
568
  try:
 
643
  """
644
  return
645
 
646
+ markdown_slides = generate_markdown_slides(slides, title)
647
+ if not markdown_slides:
648
+ logger.error("Failed to generate Markdown slides")
 
649
  yield f"""
650
  <div style="display: flex; flex-direction: column; justify-content: center; align-items: center; height: 100%; min-height: 700px; padding: 20px; text-align: center; border: 1px solid #ddd; border-radius: 8px;">
651
  <h2 style="color: #d9534f;">Failed to generate slides</h2>
 
666
  """
667
  return
668
 
 
669
  for i, script in enumerate(scripts):
670
  cleaned_script = clean_script_text(script)
671
+ audio_file = os.path.join(OUTPUT_DIR, f"slide_{i+1}.mp3")
672
  script_file = os.path.join(OUTPUT_DIR, f"slide_{i+1}_script.txt")
673
 
 
674
  try:
675
  with open(script_file, "w", encoding="utf-8") as f:
676
  f.write(cleaned_script or "")
 
719
  await asyncio.sleep(0.1)
720
  break
721
 
722
+ # Collect .txt files for download
723
+ txt_files = [f for f in os.listdir(OUTPUT_DIR) if f.endswith('.txt')]
724
+ txt_files.sort() # Sort for consistent display
725
+ txt_links = ""
726
+ for txt_file in txt_files:
727
+ file_path = os.path.join(OUTPUT_DIR, txt_file)
728
+ txt_links += f'<a href="file/{file_path}" download>{txt_file}</a>&nbsp;&nbsp;'
729
+
730
+ # Generate audio timeline
731
+ audio_timeline = ""
732
+ for i, audio_file in enumerate(audio_files):
733
+ if audio_file:
734
+ audio_timeline += f'<span id="audio-{i+1}">{os.path.basename(audio_file)}</span>&nbsp;&nbsp;'
735
+ else:
736
+ audio_timeline += f'<span id="audio-{i+1}">slide_{i+1}.mp3</span>&nbsp;&nbsp;'
737
+
738
+ slides_info = json.dumps({"slides": markdown_slides, "audioFiles": audio_files})
739
 
740
  html_output = f"""
741
  <div id="lecture-container" style="height: 700px; border: 1px solid #ddd; border-radius: 8px; display: flex; flex-direction: column; justify-content: space-between;">
742
+ <div id="slide-content" style="flex: 1; overflow: auto; padding: 20px; text-align: center;">
743
+ <!-- Slides will be rendered here -->
744
  </div>
745
  <div style="padding: 20px;">
746
+ <div style="text-align: center; margin-bottom: 10px;">
747
+ {audio_timeline}
748
  </div>
749
  <div style="display: flex; justify-content: center; margin-bottom: 10px;">
750
  <button onclick="prevSlide()" style="border-radius: 50%; width: 40px; height: 40px; margin: 0 5px; font-size: 1.2em; cursor: pointer;">⏮</button>
751
+ <button onclick="playAll()" style="border-radius: 50%; width: 40px; height: 40px; margin: 0 5px; font-size: 1.2em; cursor: pointer;">⏯</button>
752
  <button onclick="nextSlide()" style="border-radius: 50%; width: 40px; height: 40px; margin: 0 5px; font-size: 1.2em; cursor: pointer;">⏭</button>
753
+ <button style="border-radius: 50%; width: 40px; height: 40px; margin: 0 5px; font-size: 1.2em; cursor: pointer;">☐</button>
754
  </div>
755
+ </div>
756
+ <div style="padding: 10px; text-align: center;">
757
+ <h4>Download Generated Files:</h4>
758
+ {txt_links}
759
  </div>
760
  </div>
761
  <script>
762
  const lectureData = {slides_info};
763
  let currentSlide = 0;
764
  const totalSlides = lectureData.slides.length;
 
 
765
  let audioElements = [];
766
+ let isPlayingAll = false;
767
 
768
  for (let i = 0; i < totalSlides; i++) {{
769
  if (lectureData.audioFiles && lectureData.audioFiles[i]) {{
 
774
  }}
775
  }}
776
 
777
+ function renderSlide() {{
778
+ const slideContent = document.getElementById('slide-content');
779
+ slideContent.innerHTML = lectureData.slides[currentSlide];
780
+ }}
 
 
 
 
 
 
 
781
 
782
+ function updateSlide() {{
783
+ renderSlide();
784
+ audioElements.forEach(audio => {{
785
+ if (audio) {{
786
+ audio.pause();
787
+ audio.currentTime = 0;
788
+ }}
789
+ }});
790
  }}
791
 
792
  function prevSlide() {{
 
803
  }}
804
  }}
805
 
806
+ function playAll() {{
807
+ if (isPlayingAll) {{
808
+ audioElements.forEach(audio => {{
809
+ if (audio) audio.pause();
810
+ }});
811
+ isPlayingAll = false;
812
+ return;
813
  }}
814
+
815
+ isPlayingAll = true;
816
+ currentSlide = 0;
817
+ updateSlide();
818
+ playCurrentSlide();
819
  }}
820
 
821
+ function playCurrentSlide() {{
822
+ if (!isPlayingAll || currentSlide >= totalSlides) {{
823
+ isPlayingAll = false;
824
+ return;
825
+ }}
826
+
827
+ const audio = audioElements[currentSlide];
828
  if (audio) {{
829
+ audio.play().then(() => {{
830
+ audio.addEventListener('ended', () => {{
831
+ currentSlide++;
832
+ if (currentSlide < totalSlides) {{
833
+ updateSlide();
834
+ playCurrentSlide();
835
+ }} else {{
836
+ isPlayingAll = false;
837
+ }}
838
+ }}, {{ once: true }});
839
+ }}).catch(e => {{
840
+ console.error('Audio play failed:', e);
841
+ currentSlide++;
842
+ playCurrentSlide();
843
  }});
844
+ }} else {{
845
+ currentSlide++;
846
+ playCurrentSlide();
847
  }}
848
+ }}
849
 
850
  // Initialize first slide
851
+ renderSlide();
852
  </script>
853
  """
854
  logger.info("Lecture generation completed successfully")