Spaces:
Runtime error
Runtime error
Commit
·
0711b9e
1
Parent(s):
d27799d
up
Browse files
app.py
CHANGED
@@ -5,12 +5,12 @@ from PIL import Image
|
|
5 |
import time
|
6 |
import psutil
|
7 |
import random
|
8 |
-
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
|
9 |
|
10 |
|
11 |
start_time = time.time()
|
12 |
current_steps = 25
|
13 |
-
|
14 |
PIPE = DiffusionPipeline.from_pretrained("timbrooks/instruct-pix2pix", torch_dtype=torch.float16, safety_checker=None)
|
15 |
|
16 |
device = "GPU 🔥" if torch.cuda.is_available() else "CPU 🥶"
|
@@ -26,7 +26,6 @@ def error_str(error, title="Error"):
|
|
26 |
|
27 |
|
28 |
def inference(
|
29 |
-
model_name,
|
30 |
prompt,
|
31 |
guidance,
|
32 |
steps,
|
@@ -49,7 +48,6 @@ def inference(
|
|
49 |
try:
|
50 |
return (
|
51 |
img_to_img(
|
52 |
-
model_name,
|
53 |
prompt,
|
54 |
n_images,
|
55 |
neg_prompt,
|
@@ -69,7 +67,6 @@ def inference(
|
|
69 |
|
70 |
|
71 |
def img_to_img(
|
72 |
-
model_name,
|
73 |
prompt,
|
74 |
n_images,
|
75 |
neg_prompt,
|
@@ -134,11 +131,6 @@ with gr.Blocks(css="style.css") as demo:
|
|
134 |
with gr.Row():
|
135 |
with gr.Column(scale=55):
|
136 |
with gr.Group():
|
137 |
-
model_name = gr.Dropdown(
|
138 |
-
label="Model",
|
139 |
-
choices=[m.name for m in models],
|
140 |
-
value=models[0].name,
|
141 |
-
)
|
142 |
with gr.Box(visible=False) as custom_model_group:
|
143 |
gr.HTML(
|
144 |
"<div><font size='2'>Custom models have to be downloaded first, so give it some time.</font></div>"
|
@@ -189,6 +181,14 @@ with gr.Blocks(css="style.css") as demo:
|
|
189 |
step=1,
|
190 |
)
|
191 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
192 |
seed = gr.Slider(
|
193 |
0, 2147483647, label="Seed (0 = random)", value=0, step=1
|
194 |
)
|
@@ -206,7 +206,6 @@ with gr.Blocks(css="style.css") as demo:
|
|
206 |
)
|
207 |
|
208 |
inputs = [
|
209 |
-
model_name,
|
210 |
prompt,
|
211 |
guidance,
|
212 |
steps,
|
@@ -224,7 +223,7 @@ with gr.Blocks(css="style.css") as demo:
|
|
224 |
|
225 |
ex = gr.Examples(
|
226 |
[],
|
227 |
-
inputs=[
|
228 |
outputs=outputs,
|
229 |
fn=inference,
|
230 |
cache_examples=True,
|
|
|
5 |
import time
|
6 |
import psutil
|
7 |
import random
|
8 |
+
# from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
|
9 |
|
10 |
|
11 |
start_time = time.time()
|
12 |
current_steps = 25
|
13 |
+
|
14 |
PIPE = DiffusionPipeline.from_pretrained("timbrooks/instruct-pix2pix", torch_dtype=torch.float16, safety_checker=None)
|
15 |
|
16 |
device = "GPU 🔥" if torch.cuda.is_available() else "CPU 🥶"
|
|
|
26 |
|
27 |
|
28 |
def inference(
|
|
|
29 |
prompt,
|
30 |
guidance,
|
31 |
steps,
|
|
|
48 |
try:
|
49 |
return (
|
50 |
img_to_img(
|
|
|
51 |
prompt,
|
52 |
n_images,
|
53 |
neg_prompt,
|
|
|
67 |
|
68 |
|
69 |
def img_to_img(
|
|
|
70 |
prompt,
|
71 |
n_images,
|
72 |
neg_prompt,
|
|
|
131 |
with gr.Row():
|
132 |
with gr.Column(scale=55):
|
133 |
with gr.Group():
|
|
|
|
|
|
|
|
|
|
|
134 |
with gr.Box(visible=False) as custom_model_group:
|
135 |
gr.HTML(
|
136 |
"<div><font size='2'>Custom models have to be downloaded first, so give it some time.</font></div>"
|
|
|
181 |
step=1,
|
182 |
)
|
183 |
|
184 |
+
with gr.Row():
|
185 |
+
width = gr.Slider(
|
186 |
+
label="Width", value=512, minimum=64, maximum=1024, step=8
|
187 |
+
)
|
188 |
+
height = gr.Slider(
|
189 |
+
label="Height", value=512, minimum=64, maximum=1024, step=8
|
190 |
+
)
|
191 |
+
|
192 |
seed = gr.Slider(
|
193 |
0, 2147483647, label="Seed (0 = random)", value=0, step=1
|
194 |
)
|
|
|
206 |
)
|
207 |
|
208 |
inputs = [
|
|
|
209 |
prompt,
|
210 |
guidance,
|
211 |
steps,
|
|
|
223 |
|
224 |
ex = gr.Examples(
|
225 |
[],
|
226 |
+
inputs=[prompt, guidance, steps, neg_prompt],
|
227 |
outputs=outputs,
|
228 |
fn=inference,
|
229 |
cache_examples=True,
|