Spaces:
Running
on
Zero
Running
on
Zero
Commit
•
f1c3cc1
1
Parent(s):
9778625
Update app.py
Browse files
app.py
CHANGED
@@ -9,7 +9,7 @@ import numpy as np
|
|
9 |
import cv2
|
10 |
from PIL import Image
|
11 |
from diffusers.utils import load_image
|
12 |
-
from diffusers.utils import
|
13 |
import random
|
14 |
|
15 |
# load pipelines
|
@@ -21,7 +21,7 @@ pipe = FluxPipeline.from_pretrained(base_model,
|
|
21 |
torch_dtype=torch.bfloat16)
|
22 |
|
23 |
pipe.transformer.to(memory_format=torch.channels_last)
|
24 |
-
pipe.transformer = torch.compile(pipe.transformer, mode="max-autotune", fullgraph=True)
|
25 |
# pipe.enable_model_cpu_offload()
|
26 |
clip_slider = CLIPSliderFlux(pipe, device=torch.device("cuda"))
|
27 |
|
@@ -102,7 +102,7 @@ def generate(prompt,
|
|
102 |
post_generation_slider_update = gr.update(label=comma_concepts_x, value=0, minimum=scale_min, maximum=scale_max, interactive=True)
|
103 |
avg_diff_x = avg_diff.cpu()
|
104 |
|
105 |
-
return x_concept_1,x_concept_2, avg_diff_x,
|
106 |
|
107 |
def update_pre_generated_images(slider_value, total_images):
|
108 |
number_images = len(total_images)
|
@@ -134,7 +134,7 @@ intro = """
|
|
134 |
</p>
|
135 |
"""
|
136 |
css='''
|
137 |
-
#strip, #
|
138 |
#strip img{object-fit: cover}
|
139 |
'''
|
140 |
examples = [["a dog in the park", "winter", "summer", 1.5], ["a house", "USA suburb", "Europe", 2.5], ["a tomato", "rotten", "super fresh", 2.5]]
|
@@ -161,15 +161,16 @@ with gr.Blocks(css=css) as demo:
|
|
161 |
submit = gr.Button("Generate directions")
|
162 |
|
163 |
with gr.Column():
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
|
|
|
173 |
|
174 |
with gr.Accordion(label="Advanced options", open=False):
|
175 |
interm_steps = gr.Slider(label = "Num of intermediate images", minimum=3, value=7, maximum=65, step=2)
|
|
|
9 |
import cv2
|
10 |
from PIL import Image
|
11 |
from diffusers.utils import load_image
|
12 |
+
from diffusers.utils import export_to_video
|
13 |
import random
|
14 |
|
15 |
# load pipelines
|
|
|
21 |
torch_dtype=torch.bfloat16)
|
22 |
|
23 |
pipe.transformer.to(memory_format=torch.channels_last)
|
24 |
+
# pipe.transformer = torch.compile(pipe.transformer, mode="max-autotune", fullgraph=True)
|
25 |
# pipe.enable_model_cpu_offload()
|
26 |
clip_slider = CLIPSliderFlux(pipe, device=torch.device("cuda"))
|
27 |
|
|
|
102 |
post_generation_slider_update = gr.update(label=comma_concepts_x, value=0, minimum=scale_min, maximum=scale_max, interactive=True)
|
103 |
avg_diff_x = avg_diff.cpu()
|
104 |
|
105 |
+
return x_concept_1,x_concept_2, avg_diff_x, export_to_video(images, f"{uuid.uuid4()}.mp4", fps=5), canvas, images, images[scale_middle], post_generation_slider_update, seed
|
106 |
|
107 |
def update_pre_generated_images(slider_value, total_images):
|
108 |
number_images = len(total_images)
|
|
|
134 |
</p>
|
135 |
"""
|
136 |
css='''
|
137 |
+
#strip, #video{max-height: 170px; min-height: 65px}
|
138 |
#strip img{object-fit: cover}
|
139 |
'''
|
140 |
examples = [["a dog in the park", "winter", "summer", 1.5], ["a house", "USA suburb", "Europe", 2.5], ["a tomato", "rotten", "super fresh", 2.5]]
|
|
|
161 |
submit = gr.Button("Generate directions")
|
162 |
|
163 |
with gr.Column():
|
164 |
+
output_image = gr.Video(label="Looping video", elem_id="video", loop=True, autoplay=True)
|
165 |
+
#with gr.Row():
|
166 |
+
|
167 |
+
with gr.Column(scale=4, min_width=50):
|
168 |
+
image_seq = gr.Image(label="Strip", elem_id="strip", height=65)
|
169 |
+
|
170 |
+
with gr.Column(scale=2, min_width=50):
|
171 |
+
with gr.Group(elem_id="group"):
|
172 |
+
post_generation_image = gr.Image(label="Generated Images", type="filepath")
|
173 |
+
post_generation_slider = gr.Slider(minimum=-10, maximum=10, value=0, step=1, label="From 1st to 2nd direction")
|
174 |
|
175 |
with gr.Accordion(label="Advanced options", open=False):
|
176 |
interm_steps = gr.Slider(label = "Num of intermediate images", minimum=3, value=7, maximum=65, step=2)
|