Spaces:
Runtime error
Runtime error
hyper sdxl
Browse files
app.py
CHANGED
@@ -5,17 +5,11 @@ import tempfile
|
|
5 |
import numpy as np
|
6 |
from pathlib import Path
|
7 |
from PIL import Image
|
8 |
-
from diffusers import
|
9 |
-
ControlNetModel,
|
10 |
-
StableDiffusionXLControlNetPipeline,
|
11 |
-
UNet2DConditionModel,
|
12 |
-
EulerDiscreteScheduler,
|
13 |
-
)
|
14 |
import spaces
|
15 |
import gradio as gr
|
16 |
from huggingface_hub import hf_hub_download, snapshot_download
|
17 |
from ip_adapter import IPAdapterXL
|
18 |
-
from safetensors.torch import load_file
|
19 |
|
20 |
snapshot_download(
|
21 |
repo_id="h94/IP-Adapter", allow_patterns="sdxl_models/*", local_dir="."
|
@@ -36,7 +30,7 @@ controlnet = ControlNetModel.from_pretrained(
|
|
36 |
controlnet_path, use_safetensors=False, torch_dtype=torch.float16
|
37 |
).to(device)
|
38 |
|
39 |
-
# load
|
40 |
|
41 |
pipe = StableDiffusionXLControlNetPipeline.from_pretrained(
|
42 |
base_model_path,
|
@@ -46,17 +40,11 @@ pipe = StableDiffusionXLControlNetPipeline.from_pretrained(
|
|
46 |
add_watermarker=False,
|
47 |
).to(device)
|
48 |
pipe.set_progress_bar_config(disable=True)
|
49 |
-
pipe.scheduler =
|
50 |
-
|
51 |
-
)
|
52 |
-
pipe.unet.load_state_dict(
|
53 |
-
load_file(
|
54 |
-
hf_hub_download(
|
55 |
-
"ByteDance/SDXL-Lightning", "sdxl_lightning_2step_unet.safetensors"
|
56 |
-
),
|
57 |
-
device="cuda",
|
58 |
-
)
|
59 |
)
|
|
|
60 |
|
61 |
# load ip-adapter
|
62 |
# target_blocks=["block"] for original IP-Adapter
|
@@ -225,6 +213,7 @@ def create_image(
|
|
225 |
controlnet_conditioning_scale=float(control_scale),
|
226 |
neg_content_prompt=neg_content_prompt,
|
227 |
neg_content_scale=neg_content_scale,
|
|
|
228 |
)
|
229 |
else:
|
230 |
images = ip_model.generate(
|
@@ -238,6 +227,7 @@ def create_image(
|
|
238 |
seed=seed,
|
239 |
image=canny_map,
|
240 |
controlnet_conditioning_scale=float(control_scale),
|
|
|
241 |
)
|
242 |
image = images[0]
|
243 |
with tempfile.NamedTemporaryFile(suffix=".jpg", delete=False) as tmpfile:
|
@@ -258,7 +248,7 @@ title = r"""
|
|
258 |
|
259 |
description = r"""
|
260 |
<b>Forked from <a href='https://github.com/InstantStyle/InstantStyle' target='_blank'>InstantStyle: Free Lunch towards Style-Preserving in Text-to-Image Generation</a>.<br>
|
261 |
-
<b>Model by <a href='https://huggingface.co/ByteDance/
|
262 |
"""
|
263 |
|
264 |
article = r"""
|
@@ -347,10 +337,10 @@ with block:
|
|
347 |
label="guidance scale",
|
348 |
)
|
349 |
num_inference_steps = gr.Slider(
|
350 |
-
minimum=
|
351 |
-
maximum=
|
352 |
step=1.0,
|
353 |
-
value=
|
354 |
label="num inference steps",
|
355 |
)
|
356 |
seed = gr.Slider(
|
@@ -390,6 +380,10 @@ with block:
|
|
390 |
scale.input,
|
391 |
control_scale.input,
|
392 |
seed.input,
|
|
|
|
|
|
|
|
|
393 |
],
|
394 |
fn=create_image,
|
395 |
inputs=inputs,
|
|
|
5 |
import numpy as np
|
6 |
from pathlib import Path
|
7 |
from PIL import Image
|
8 |
+
from diffusers import ControlNetModel, StableDiffusionXLControlNetPipeline, TCDScheduler
|
|
|
|
|
|
|
|
|
|
|
9 |
import spaces
|
10 |
import gradio as gr
|
11 |
from huggingface_hub import hf_hub_download, snapshot_download
|
12 |
from ip_adapter import IPAdapterXL
|
|
|
13 |
|
14 |
snapshot_download(
|
15 |
repo_id="h94/IP-Adapter", allow_patterns="sdxl_models/*", local_dir="."
|
|
|
30 |
controlnet_path, use_safetensors=False, torch_dtype=torch.float16
|
31 |
).to(device)
|
32 |
|
33 |
+
# load Hyper SD
|
34 |
|
35 |
pipe = StableDiffusionXLControlNetPipeline.from_pretrained(
|
36 |
base_model_path,
|
|
|
40 |
add_watermarker=False,
|
41 |
).to(device)
|
42 |
pipe.set_progress_bar_config(disable=True)
|
43 |
+
pipe.scheduler = TCDScheduler.from_config(pipe.scheduler.config)
|
44 |
+
pipe.load_lora_weights(
|
45 |
+
hf_hub_download("ByteDance/Hyper-SD", "Hyper-SDXL-1step-lora.safetensors")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
46 |
)
|
47 |
+
eta = 1.0
|
48 |
|
49 |
# load ip-adapter
|
50 |
# target_blocks=["block"] for original IP-Adapter
|
|
|
213 |
controlnet_conditioning_scale=float(control_scale),
|
214 |
neg_content_prompt=neg_content_prompt,
|
215 |
neg_content_scale=neg_content_scale,
|
216 |
+
eta=1.0,
|
217 |
)
|
218 |
else:
|
219 |
images = ip_model.generate(
|
|
|
227 |
seed=seed,
|
228 |
image=canny_map,
|
229 |
controlnet_conditioning_scale=float(control_scale),
|
230 |
+
eta=1.0,
|
231 |
)
|
232 |
image = images[0]
|
233 |
with tempfile.NamedTemporaryFile(suffix=".jpg", delete=False) as tmpfile:
|
|
|
248 |
|
249 |
description = r"""
|
250 |
<b>Forked from <a href='https://github.com/InstantStyle/InstantStyle' target='_blank'>InstantStyle: Free Lunch towards Style-Preserving in Text-to-Image Generation</a>.<br>
|
251 |
+
<b>Model by <a href='https://huggingface.co/ByteDance/Hyper-SD' target='_blank'>Hyper-SD</a> and <a href='https://huggingface.co/h94/IP-Adapter' target='_blank'>IP-Adapter</a>.</b><br>
|
252 |
"""
|
253 |
|
254 |
article = r"""
|
|
|
337 |
label="guidance scale",
|
338 |
)
|
339 |
num_inference_steps = gr.Slider(
|
340 |
+
minimum=1,
|
341 |
+
maximum=10.0,
|
342 |
step=1.0,
|
343 |
+
value=1,
|
344 |
label="num inference steps",
|
345 |
)
|
346 |
seed = gr.Slider(
|
|
|
380 |
scale.input,
|
381 |
control_scale.input,
|
382 |
seed.input,
|
383 |
+
num_inference_steps.input,
|
384 |
+
target.input,
|
385 |
+
neg_content_prompt.input,
|
386 |
+
neg_content_scale.input,
|
387 |
],
|
388 |
fn=create_image,
|
389 |
inputs=inputs,
|