mrfakename commited on
Commit
24dd25f
·
verified ·
1 Parent(s): 2467ec6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -11
app.py CHANGED
@@ -1,7 +1,7 @@
1
  """
2
  VibeVoice Gradio Demo - High-Quality Dialogue Generation Interface with Streaming Support
3
  """
4
-
5
  import argparse
6
  import json
7
  import os
@@ -43,7 +43,8 @@ class VibeVoiceDemo:
43
  self.load_model()
44
  self.setup_voice_presets()
45
  self.load_example_scripts() # Load example scripts
46
-
 
47
  def load_model(self):
48
  """Load the VibeVoice model and processor."""
49
  print(f"Loading processor & model from {self.model_path}")
@@ -126,7 +127,8 @@ class VibeVoiceDemo:
126
  except Exception as e:
127
  print(f"Error reading audio {audio_path}: {e}")
128
  return np.array([])
129
-
 
130
  def generate_podcast_streaming(self,
131
  num_speakers: int,
132
  script: str,
@@ -418,7 +420,8 @@ class VibeVoiceDemo:
418
  import traceback
419
  traceback.print_exc()
420
  yield None, None, error_msg, gr.update(visible=False)
421
-
 
422
  def _generate_with_streamer(self, inputs, cfg_scale, audio_streamer):
423
  """Helper method to run generation with streamer in a separate thread."""
424
  try:
@@ -1137,9 +1140,9 @@ def main():
1137
 
1138
  # Initialize demo instance
1139
  demo_instance = VibeVoiceDemo(
1140
- model_path=args.model_path,
1141
- device=args.device,
1142
- inference_steps=args.inference_steps
1143
  )
1144
 
1145
  # Create interface
@@ -1155,11 +1158,7 @@ def main():
1155
  try:
1156
  interface.queue(
1157
  max_size=20, # Maximum queue size
1158
- default_concurrency_limit=1 # Process one request at a time
1159
  ).launch(
1160
- share=args.share,
1161
- # server_port=args.port,
1162
- server_name="0.0.0.0" if args.share else "127.0.0.1",
1163
  show_error=True,
1164
  show_api=False # Hide API docs for cleaner interface
1165
  )
 
1
  """
2
  VibeVoice Gradio Demo - High-Quality Dialogue Generation Interface with Streaming Support
3
  """
4
+ import spaces
5
  import argparse
6
  import json
7
  import os
 
43
  self.load_model()
44
  self.setup_voice_presets()
45
  self.load_example_scripts() # Load example scripts
46
+
47
+ @spaces.GPU
48
  def load_model(self):
49
  """Load the VibeVoice model and processor."""
50
  print(f"Loading processor & model from {self.model_path}")
 
127
  except Exception as e:
128
  print(f"Error reading audio {audio_path}: {e}")
129
  return np.array([])
130
+
131
+ @spaces.GPU
132
  def generate_podcast_streaming(self,
133
  num_speakers: int,
134
  script: str,
 
420
  import traceback
421
  traceback.print_exc()
422
  yield None, None, error_msg, gr.update(visible=False)
423
+
424
+ @spaces.GPU
425
  def _generate_with_streamer(self, inputs, cfg_scale, audio_streamer):
426
  """Helper method to run generation with streamer in a separate thread."""
427
  try:
 
1140
 
1141
  # Initialize demo instance
1142
  demo_instance = VibeVoiceDemo(
1143
+ model_path='microsoft/VibeVoice-1.5B',
1144
+ device='cuda',
1145
+ inference_steps=10
1146
  )
1147
 
1148
  # Create interface
 
1158
  try:
1159
  interface.queue(
1160
  max_size=20, # Maximum queue size
 
1161
  ).launch(
 
 
 
1162
  show_error=True,
1163
  show_api=False # Hide API docs for cleaner interface
1164
  )