Prof-Hunt commited on
Commit
ac361c7
Β·
verified Β·
1 Parent(s): 15086d5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -9
app.py CHANGED
@@ -66,7 +66,7 @@ pipe_sd.scheduler = DPMSolverMultistepScheduler.from_config(pipe_sd.scheduler.co
66
  pipe_sd.enable_attention_slicing()
67
 
68
  @torch.inference_mode()
69
- @spaces.GPU(duration=30)
70
  def generate_image():
71
  """Generate a random landscape image."""
72
  clear_memory()
@@ -98,7 +98,7 @@ def generate_image():
98
  return None
99
 
100
  @torch.inference_mode()
101
- @spaces.GPU(duration=30)
102
  def analyze_image(image):
103
  if image is None:
104
  return "Please generate an image first."
@@ -191,7 +191,7 @@ def format_story_paragraphs(story_text, max_paragraphs=9):
191
  # Join paragraphs with a double newline
192
  return "\n\n".join(paragraphs)
193
 
194
- @spaces.GPU(duration=60)
195
  def generate_story(image_description):
196
  """Runs story generation safely on Hugging Face Spaces with Stateless GPU"""
197
  clear_memory()
@@ -240,7 +240,7 @@ def generate_story(image_description):
240
 
241
 
242
  @torch.inference_mode()
243
- @spaces.GPU(duration=30)
244
  def generate_image_prompts(story_text):
245
  clear_memory()
246
 
@@ -280,7 +280,7 @@ def generate_image_prompts(story_text):
280
  return "Error generating prompts. Please try again."
281
 
282
  @torch.inference_mode()
283
- @spaces.GPU(duration=60)
284
  def generate_story_image(prompt, seed=-1):
285
  clear_memory()
286
 
@@ -314,7 +314,7 @@ def generate_story_image(prompt, seed=-1):
314
  return None
315
 
316
  @torch.inference_mode()
317
- @spaces.GPU(duration=180)
318
  def generate_all_scenes(prompts_text):
319
  clear_memory()
320
 
@@ -393,7 +393,7 @@ def generate_all_scenes(prompts_text):
393
  # Final yield
394
  yield generated_images, "\n\n".join(formatted_prompts), update_progress()
395
 
396
- @spaces.GPU(duration=60)
397
  def add_text_to_scenes(gallery_images, prompts_text):
398
  """Add text overlays to all scenes"""
399
  print(f"Received gallery_images type: {type(gallery_images)}")
@@ -532,7 +532,7 @@ def overlay_text_on_image(image, text):
532
  print(f"Error in overlay_text_on_image: {e}")
533
  return None
534
 
535
- @spaces.GPU(duration=60)
536
  def generate_combined_audio_from_story(story_text, voice='af_heart', speed=1):
537
  """Generate a single audio file for all paragraphs in the story."""
538
 
@@ -698,7 +698,8 @@ def create_interface():
698
 
699
  Let's create something magical! ✨
700
 
701
- ## πŸ“Œ [Open in Google Colab](<{https://colab.research.google.com/drive/18-4frRwaMxxqJrRjiXh1Q069R77INMeA?usp=sharing}>) πŸš€
 
702
 
703
  """)
704
 
 
66
  pipe_sd.enable_attention_slicing()
67
 
68
  @torch.inference_mode()
69
+ #@spaces.GPU(duration=30)
70
  def generate_image():
71
  """Generate a random landscape image."""
72
  clear_memory()
 
98
  return None
99
 
100
  @torch.inference_mode()
101
+ #@spaces.GPU(duration=30)
102
  def analyze_image(image):
103
  if image is None:
104
  return "Please generate an image first."
 
191
  # Join paragraphs with a double newline
192
  return "\n\n".join(paragraphs)
193
 
194
+ #@spaces.GPU(duration=60)
195
  def generate_story(image_description):
196
  """Runs story generation safely on Hugging Face Spaces with Stateless GPU"""
197
  clear_memory()
 
240
 
241
 
242
  @torch.inference_mode()
243
+ #@spaces.GPU(duration=30)
244
  def generate_image_prompts(story_text):
245
  clear_memory()
246
 
 
280
  return "Error generating prompts. Please try again."
281
 
282
  @torch.inference_mode()
283
+ #@spaces.GPU(duration=60)
284
  def generate_story_image(prompt, seed=-1):
285
  clear_memory()
286
 
 
314
  return None
315
 
316
  @torch.inference_mode()
317
+ #@spaces.GPU(duration=180)
318
  def generate_all_scenes(prompts_text):
319
  clear_memory()
320
 
 
393
  # Final yield
394
  yield generated_images, "\n\n".join(formatted_prompts), update_progress()
395
 
396
+ #@spaces.GPU(duration=60)
397
  def add_text_to_scenes(gallery_images, prompts_text):
398
  """Add text overlays to all scenes"""
399
  print(f"Received gallery_images type: {type(gallery_images)}")
 
532
  print(f"Error in overlay_text_on_image: {e}")
533
  return None
534
 
535
+ #@spaces.GPU(duration=60)
536
  def generate_combined_audio_from_story(story_text, voice='af_heart', speed=1):
537
  """Generate a single audio file for all paragraphs in the story."""
538
 
 
698
 
699
  Let's create something magical! ✨
700
 
701
+ ## πŸ“Œ [Open in Google Colab](https://colab.research.google.com/drive/18-4frRwaMxxqJrRjiXh1Q069R77INMeA?usp=sharing) πŸš€
702
+
703
 
704
  """)
705