rahul7star commited on
Commit
5962f83
·
verified ·
1 Parent(s): ce1bcc3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +20 -53
app.py CHANGED
@@ -130,68 +130,29 @@ def get_duration(
130
  return steps * 15
131
 
132
  @spaces.GPU(duration=get_duration)
 
133
  def generate_video(
134
  prompt,
135
  negative_prompt=default_negative_prompt,
136
- duration_seconds = MAX_DURATION,
137
- guidance_scale = 1,
138
- guidance_scale_2 = 3,
139
- steps = 4,
140
- seed = 42,
141
- randomize_seed = False,
142
  progress=gr.Progress(track_tqdm=True),
143
  ):
144
- """
145
- Generate a video from a text prompt using the Wan 2.2 14B T2V model with Lightning LoRA.
146
-
147
- This function takes an input prompt and generates a video animation based on the provided
148
- prompt and parameters. It uses an FP8 qunatized Wan 2.2 14B Text-to-Video model with Lightning LoRA
149
- for fast generation in 4-8 steps.
150
-
151
- Args:
152
- prompt (str): Text prompt describing the desired animation or motion.
153
- negative_prompt (str, optional): Negative prompt to avoid unwanted elements.
154
- Defaults to default_negative_prompt (contains unwanted visual artifacts).
155
- duration_seconds (float, optional): Duration of the generated video in seconds.
156
- Defaults to 2. Clamped between MIN_FRAMES_MODEL/FIXED_FPS and MAX_FRAMES_MODEL/FIXED_FPS.
157
- guidance_scale (float, optional): Controls adherence to the prompt. Higher values = more adherence.
158
- Defaults to 1.0. Range: 0.0-20.0.
159
- guidance_scale_2 (float, optional): Controls adherence to the prompt. Higher values = more adherence.
160
- Defaults to 1.0. Range: 0.0-20.0.
161
- steps (int, optional): Number of inference steps. More steps = higher quality but slower.
162
- Defaults to 4. Range: 1-30.
163
- seed (int, optional): Random seed for reproducible results. Defaults to 42.
164
- Range: 0 to MAX_SEED (2147483647).
165
- randomize_seed (bool, optional): Whether to use a random seed instead of the provided seed.
166
- Defaults to False.
167
- progress (gr.Progress, optional): Gradio progress tracker. Defaults to gr.Progress(track_tqdm=True).
168
-
169
- Returns:
170
- tuple: A tuple containing:
171
- - video_path (str): Path to the generated video file (.mp4)
172
- - current_seed (int): The seed used for generation (useful when randomize_seed=True)
173
-
174
- Raises:
175
- gr.Error: If input_image is None (no image uploaded).
176
-
177
- Note:
178
- - The function automatically resizes the input image to the target dimensions
179
- - Frame count is calculated as duration_seconds * FIXED_FPS (24)
180
- - Output dimensions are adjusted to be multiples of MOD_VALUE (32)
181
- - The function uses GPU acceleration via the @spaces.GPU decorator
182
- - Generation time varies based on steps and duration (see get_duration function)
183
- """
184
- print("promot is ")
185
- print(prompt)
186
-
187
  num_frames = np.clip(int(round(duration_seconds * FIXED_FPS)), MIN_FRAMES_MODEL, MAX_FRAMES_MODEL)
188
  current_seed = random.randint(0, MAX_SEED) if randomize_seed else int(seed)
189
 
 
190
  output_frames_list = pipe(
191
  prompt=prompt,
192
  negative_prompt=negative_prompt,
193
- height=480,
194
- width=832,
195
  num_frames=num_frames,
196
  guidance_scale=float(guidance_scale),
197
  guidance_scale_2=float(guidance_scale_2),
@@ -199,14 +160,20 @@ def generate_video(
199
  generator=torch.Generator(device="cuda").manual_seed(current_seed),
200
  ).frames[0]
201
 
 
 
 
 
202
 
 
 
 
203
 
204
- video_path = os.path.join(tempfile.gettempdir(), f"video_{current_seed}.mp4")
205
- export_to_video(output_frames_list, video_path, fps=FIXED_FPS)
206
  hf_folder = upload_to_hf(video_path, prompt)
207
 
208
  return video_path, current_seed
209
 
 
210
  with gr.Blocks() as demo:
211
  gr.Markdown("# Fast 4 steps Wan 2.2 T2V (14B) with Lightning LoRA")
212
  gr.Markdown("run Wan 2.2 in just 4-8 steps, with [Wan 2.2 Lightning LoRA](https://huggingface.co/Kijai/WanVideo_comfy/tree/main/Wan22-Lightning), fp8 quantization & AoT compilation - compatible with 🧨 diffusers and ZeroGPU⚡️")
 
130
  return steps * 15
131
 
132
  @spaces.GPU(duration=get_duration)
133
+
134
  def generate_video(
135
  prompt,
136
  negative_prompt=default_negative_prompt,
137
+ duration_seconds=MAX_DURATION,
138
+ guidance_scale=1,
139
+ guidance_scale_2=3,
140
+ steps=4,
141
+ seed=42,
142
+ randomize_seed=False,
143
  progress=gr.Progress(track_tqdm=True),
144
  ):
145
+ print("Prompt:", prompt)
146
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
147
  num_frames = np.clip(int(round(duration_seconds * FIXED_FPS)), MIN_FRAMES_MODEL, MAX_FRAMES_MODEL)
148
  current_seed = random.randint(0, MAX_SEED) if randomize_seed else int(seed)
149
 
150
+ # Generate frames
151
  output_frames_list = pipe(
152
  prompt=prompt,
153
  negative_prompt=negative_prompt,
154
+ height=LANDSCAPE_HEIGHT,
155
+ width=LANDSCAPE_WIDTH,
156
  num_frames=num_frames,
157
  guidance_scale=float(guidance_scale),
158
  guidance_scale_2=float(guidance_scale_2),
 
160
  generator=torch.Generator(device="cuda").manual_seed(current_seed),
161
  ).frames[0]
162
 
163
+
164
+ video_path_raw = os.path.join(tempfile.gettempdir(), f"video_raw_{uuid.uuid4().hex[:8]}.mp4")
165
+ export_to_video(output_frames_list, video_path_raw, fps=FIXED_FPS)
166
+
167
 
168
+ video_path = os.path.join(tempfile.gettempdir(), f"video_{uuid.uuid4().hex[:8]}.mp4")
169
+ improve_video_quality(video_path_raw, video_path, bitrate="8M")
170
+
171
 
 
 
172
  hf_folder = upload_to_hf(video_path, prompt)
173
 
174
  return video_path, current_seed
175
 
176
+
177
  with gr.Blocks() as demo:
178
  gr.Markdown("# Fast 4 steps Wan 2.2 T2V (14B) with Lightning LoRA")
179
  gr.Markdown("run Wan 2.2 in just 4-8 steps, with [Wan 2.2 Lightning LoRA](https://huggingface.co/Kijai/WanVideo_comfy/tree/main/Wan22-Lightning), fp8 quantization & AoT compilation - compatible with 🧨 diffusers and ZeroGPU⚡️")