Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -7,6 +7,7 @@ import torch
|
|
7 |
from einops import rearrange
|
8 |
from PIL import Image
|
9 |
from transformers import pipeline
|
|
|
10 |
|
11 |
from flux.cli import SamplingOptions
|
12 |
from flux.sampling import denoise, get_noise, get_schedule, prepare, unpack
|
@@ -15,7 +16,7 @@ from pulid.pipeline_flux import PuLIDPipeline
|
|
15 |
from pulid.utils import resize_numpy_image_long
|
16 |
from prompt_template import prompt_dict
|
17 |
from style_template import styles
|
18 |
-
|
19 |
NSFW_THRESHOLD = 0.85
|
20 |
PROMPTS_NAMES = list(prompt_dict.keys())
|
21 |
DEFAULT_PROMPT_NAME = "Paris"
|
@@ -29,11 +30,33 @@ def get_prompt (prompt_name : str):
|
|
29 |
prompt = prompt_dict.get(prompt_name , prompt_dict[DEFAULT_PROMPT_NAME])
|
30 |
return prompt[0]
|
31 |
|
|
|
|
|
32 |
|
33 |
def apply_style(style_name: str, positive: str, negative: str = "") -> tuple[str, str]:
|
34 |
p, n = styles.get(style_name, styles[DEFAULT_STYLE_NAME])
|
35 |
return p.replace("{prompt}", positive), n + ' ' + negative
|
36 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
37 |
|
38 |
def get_models(name: str, device: torch.device, offload: bool):
|
39 |
t5 = load_t5(device, max_length=128)
|
@@ -200,7 +223,7 @@ def create_demo(args, model_name: str, device: str = "cuda" if torch.cuda.is_ava
|
|
200 |
id_image = gr.Image(label="ID Image")
|
201 |
|
202 |
generate_btn = gr.Button("Generate")
|
203 |
-
|
204 |
id_weight = gr.Slider(0.0, 3.0, 1, step=0.05, label="id weight")
|
205 |
|
206 |
width = gr.Slider(256, 1536, 896, step=16, label="Width")
|
@@ -225,6 +248,7 @@ def create_demo(args, model_name: str, device: str = "cuda" if torch.cuda.is_ava
|
|
225 |
output_image = gr.Image(label="Generated Image", format='png')
|
226 |
seed_output = gr.Textbox(label="Used Seed")
|
227 |
intermediate_output = gr.Gallery(label='Output', elem_id="gallery", visible=args.dev)
|
|
|
228 |
|
229 |
|
230 |
generate_btn.click(
|
@@ -233,7 +257,11 @@ def create_demo(args, model_name: str, device: str = "cuda" if torch.cuda.is_ava
|
|
233 |
neg_prompt, timestep_to_start_cfg, max_sequence_length],
|
234 |
outputs=[output_image, seed_output, intermediate_output],
|
235 |
)
|
236 |
-
|
|
|
|
|
|
|
|
|
237 |
return demo
|
238 |
|
239 |
|
|
|
7 |
from einops import rearrange
|
8 |
from PIL import Image
|
9 |
from transformers import pipeline
|
10 |
+
from moviepy.editor import ImageClip, concatenate_videoclips, AudioFileClip
|
11 |
|
12 |
from flux.cli import SamplingOptions
|
13 |
from flux.sampling import denoise, get_noise, get_schedule, prepare, unpack
|
|
|
16 |
from pulid.utils import resize_numpy_image_long
|
17 |
from prompt_template import prompt_dict
|
18 |
from style_template import styles
|
19 |
+
from images import image_dict
|
20 |
NSFW_THRESHOLD = 0.85
|
21 |
PROMPTS_NAMES = list(prompt_dict.keys())
|
22 |
DEFAULT_PROMPT_NAME = "Paris"
|
|
|
30 |
prompt = prompt_dict.get(prompt_name , prompt_dict[DEFAULT_PROMPT_NAME])
|
31 |
return prompt[0]
|
32 |
|
33 |
+
def get_image(prompt_name):
|
34 |
+
return image_dict[prompt_name]
|
35 |
|
36 |
def apply_style(style_name: str, positive: str, negative: str = "") -> tuple[str, str]:
|
37 |
p, n = styles.get(style_name, styles[DEFAULT_STYLE_NAME])
|
38 |
return p.replace("{prompt}", positive), n + ' ' + negative
|
39 |
|
40 |
+
def generate_video(prompt_name , output_image):
|
41 |
+
images = get_image(prompt_name)
|
42 |
+
image_clips = [ImageClip(img_path).set_duration(2).resize((1024,1024))
|
43 |
+
for img_path in images]
|
44 |
+
|
45 |
+
video_clip =[]
|
46 |
+
try :
|
47 |
+
video_clip.append(output_image.crossfadein(0.5))
|
48 |
+
for i in range(len(image_clips)):
|
49 |
+
video_clip.append(image_clips[i].crossfadeout(0.5))
|
50 |
+
final_video = concatenate_videoclips(video_clip, method="compose")
|
51 |
+
final_video.write_videofile(filename=f"{prompt_name}.mp4", fps=24)
|
52 |
+
return final_video
|
53 |
+
except :
|
54 |
+
for i in range(len(image_clips)):
|
55 |
+
video_clip.append(image_clips[i].crossfadeout(0.5))
|
56 |
+
final_video = concatenate_videoclips(video_clip, method="compose")
|
57 |
+
final_video.write_videofile(filename=f"{prompt_name}.mp4", fps=24)
|
58 |
+
return final_video
|
59 |
+
|
60 |
|
61 |
def get_models(name: str, device: torch.device, offload: bool):
|
62 |
t5 = load_t5(device, max_length=128)
|
|
|
223 |
id_image = gr.Image(label="ID Image")
|
224 |
|
225 |
generate_btn = gr.Button("Generate")
|
226 |
+
video_btn = gr.Button("Generate Video")
|
227 |
id_weight = gr.Slider(0.0, 3.0, 1, step=0.05, label="id weight")
|
228 |
|
229 |
width = gr.Slider(256, 1536, 896, step=16, label="Width")
|
|
|
248 |
output_image = gr.Image(label="Generated Image", format='png')
|
249 |
seed_output = gr.Textbox(label="Used Seed")
|
250 |
intermediate_output = gr.Gallery(label='Output', elem_id="gallery", visible=args.dev)
|
251 |
+
output_video = gr.Video(label= "Generated Video" , format='mp4')
|
252 |
|
253 |
|
254 |
generate_btn.click(
|
|
|
257 |
neg_prompt, timestep_to_start_cfg, max_sequence_length],
|
258 |
outputs=[output_image, seed_output, intermediate_output],
|
259 |
)
|
260 |
+
video_btn.click(
|
261 |
+
fn=generate_video,
|
262 |
+
inputs=[prompt_name,output_image],
|
263 |
+
outputs=[output_video]
|
264 |
+
)
|
265 |
return demo
|
266 |
|
267 |
|