prithivMLmods commited on
Commit
d2868aa
·
verified ·
1 Parent(s): 7798d98

update app

Browse files
Files changed (1) hide show
  1. app.py +543 -0
app.py ADDED
@@ -0,0 +1,543 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import spaces
2
+ import os
3
+ import gradio as gr
4
+ import torch
5
+ from PIL import Image
6
+ from diffusers import DiffusionPipeline, AutoencoderTiny, AutoencoderKL
7
+ import random
8
+ import uuid
9
+ from typing import Tuple, Union, List, Optional, Any, Dict
10
+ import numpy as np
11
+ import time
12
+ import zipfile
13
+ from transformers import CLIPTextModel, CLIPTokenizer, T5EncoderModel, T5TokenizerFast
14
+
15
+ # ---- CUDA Check ----
16
+ print("CUDA_VISIBLE_DEVICES=", os.environ.get("CUDA_VISIBLE_DEVICES"))
17
+ print("torch.__version__ =", torch.__version__)
18
+ print("torch.version.cuda =", torch.version.cuda)
19
+ print("cuda available:", torch.cuda.is_available())
20
+ print("cuda device count:", torch.cuda.device_count())
21
+ if torch.cuda.is_available():
22
+ print("current device:", torch.cuda.current_device())
23
+ print("device name:", torch.cuda.get_device_name(torch.cuda.current_device()))
24
+
25
+ # Description for the app
26
+ DESCRIPTION = """## flux realism hpc/."""
27
+
28
+ # Helper functions
29
+ def save_image(img):
30
+ unique_name = str(uuid.uuid4()) + ".png"
31
+ img.save(unique_name)
32
+ return unique_name
33
+
34
+ def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
35
+ if randomize_seed:
36
+ seed = random.randint(0, MAX_SEED)
37
+ return seed
38
+
39
+ MAX_SEED = np.iinfo(np.int32).max
40
+ MAX_IMAGE_SIZE = 2048
41
+
42
+ # Load pipelines for both models
43
+ # Flux.1-dev-realism
44
+ base_model_dev = "black-forest-labs/FLUX.1-dev"
45
+ pipe_dev = DiffusionPipeline.from_pretrained(base_model_dev, torch_dtype=torch.bfloat16)
46
+ lora_repo = "strangerzonehf/Flux-Super-Realism-LoRA"
47
+ trigger_word = "Super Realism"
48
+ pipe_dev.load_lora_weights(lora_repo)
49
+ pipe_dev.to("cuda")
50
+
51
+ # Flux.1-krea
52
+ dtype = torch.bfloat16
53
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
54
+
55
+ # --- Model Loading ---
56
+ taef1 = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype).to(device)
57
+ good_vae = AutoencoderKL.from_pretrained("black-forest-labs/FLUX.1-Krea-dev", subfolder="vae", torch_dtype=dtype).to(device)
58
+ pipe_krea = DiffusionPipeline.from_pretrained("black-forest-labs/FLUX.1-Krea-dev", torch_dtype=dtype, vae=taef1).to(device)
59
+
60
+ # Define the flux_pipe_call_that_returns_an_iterable_of_images for flux.1-krea
61
+ @torch.inference_mode()
62
+ def flux_pipe_call_that_returns_an_iterable_of_images(
63
+ self,
64
+ prompt: Union[str, List[str]] = None,
65
+ prompt_2: Optional[Union[str, List[str]]] = None,
66
+ height: Optional[int] = None,
67
+ width: Optional[int] = None,
68
+ num_inference_steps: int = 28,
69
+ timesteps: List[int] = None,
70
+ guidance_scale: float = 3.5,
71
+ num_images_per_prompt: Optional[int] = 1,
72
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
73
+ latents: Optional[torch.FloatTensor] = None,
74
+ prompt_embeds: Optional[torch.FloatTensor] = None,
75
+ pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
76
+ output_type: Optional[str] = "pil",
77
+ return_dict: bool = True,
78
+ joint_attention_kwargs: Optional[Dict[str, Any]] = None,
79
+ max_sequence_length: int = 512,
80
+ good_vae: Optional[Any] = None,
81
+ ):
82
+ height = height or self.default_sample_size * self.vae_scale_factor
83
+ width = width or self.default_sample_size * self.vae_scale_factor
84
+
85
+ self.check_inputs(
86
+ prompt,
87
+ prompt_2,
88
+ height,
89
+ width,
90
+ prompt_embeds=prompt_embeds,
91
+ pooled_prompt_embeds=pooled_prompt_embeds,
92
+ max_sequence_length=max_sequence_length,
93
+ )
94
+
95
+ self._guidance_scale = guidance_scale
96
+ self._joint_attention_kwargs = joint_attention_kwargs
97
+ self._interrupt = False
98
+
99
+ batch_size = 1 if isinstance(prompt, str) else len(prompt)
100
+ device = self._execution_device
101
+
102
+ lora_scale = joint_attention_kwargs.get("scale", None) if joint_attention_kwargs is not None else None
103
+ prompt_embeds, pooled_prompt_embeds, text_ids = self.encode_prompt(
104
+ prompt=prompt,
105
+ prompt_2=prompt_2,
106
+ prompt_embeds=prompt_embeds,
107
+ pooled_prompt_embeds=pooled_prompt_embeds,
108
+ device=device,
109
+ num_images_per_prompt=num_images_per_prompt,
110
+ max_sequence_length=max_sequence_length,
111
+ lora_scale=lora_scale,
112
+ )
113
+
114
+ num_channels_latents = self.transformer.config.in_channels // 4
115
+ latents, latent_image_ids = self.prepare_latents(
116
+ batch_size * num_images_per_prompt,
117
+ num_channels_latents,
118
+ height,
119
+ width,
120
+ prompt_embeds.dtype,
121
+ device,
122
+ generator,
123
+ latents,
124
+ )
125
+
126
+ sigmas = np.linspace(1.0, 1 / num_inference_steps, num_inference_steps)
127
+ image_seq_len = latents.shape[1]
128
+ mu = calculate_shift(
129
+ image_seq_len,
130
+ self.scheduler.config.base_image_seq_len,
131
+ self.scheduler.config.max_image_seq_len,
132
+ self.scheduler.config.base_shift,
133
+ self.scheduler.config.max_shift,
134
+ )
135
+ timesteps, num_inference_steps = retrieve_timesteps(
136
+ self.scheduler,
137
+ num_inference_steps,
138
+ device,
139
+ timesteps,
140
+ sigmas,
141
+ mu=mu,
142
+ )
143
+ self._num_timesteps = len(timesteps)
144
+
145
+ guidance = torch.full([1], guidance_scale, device=device, dtype=torch.float32).expand(latents.shape[0]) if self.transformer.config.guidance_embeds else None
146
+
147
+ for i, t in enumerate(timesteps):
148
+ if self.interrupt:
149
+ continue
150
+
151
+ timestep = t.expand(latents.shape[0]).to(latents.dtype)
152
+
153
+ noise_pred = self.transformer(
154
+ hidden_states=latents,
155
+ timestep=timestep / 1000,
156
+ guidance=guidance,
157
+ pooled_projections=pooled_prompt_embeds,
158
+ encoder_hidden_states=prompt_embeds,
159
+ txt_ids=text_ids,
160
+ img_ids=latent_image_ids,
161
+ joint_attention_kwargs=self.joint_attention_kwargs,
162
+ return_dict=False,
163
+ )[0]
164
+
165
+ latents_for_image = self._unpack_latents(latents, height, width, self.vae_scale_factor)
166
+ latents_for_image = (latents_for_image / self.vae.config.scaling_factor) + self.vae.config.shift_factor
167
+ image = self.vae.decode(latents_for_image, return_dict=False)[0]
168
+ yield self.image_processor.postprocess(image, output_type=output_type)[0]
169
+
170
+ latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0]
171
+ torch.cuda.empty_cache()
172
+
173
+ latents = self._unpack_latents(latents, height, width, self.vae_scale_factor)
174
+ latents = (latents / good_vae.config.scaling_factor) + good_vae.config.shift_factor
175
+ image = good_vae.decode(latents, return_dict=False)[0]
176
+ self.maybe_free_model_hooks()
177
+ torch.cuda.empty_cache()
178
+ yield self.image_processor.postprocess(image, output_type=output_type)[0]
179
+
180
+ pipe_krea.flux_pipe_call_that_returns_an_iterable_of_images = flux_pipe_call_that_returns_an_iterable_of_images.__get__(pipe_krea)
181
+
182
+ # Helper functions for flux.1-krea
183
+ def calculate_shift(
184
+ image_seq_len,
185
+ base_seq_len: int = 256,
186
+ max_seq_len: int = 4096,
187
+ base_shift: float = 0.5,
188
+ max_shift: float = 1.16,
189
+ ):
190
+ m = (max_shift - base_shift) / (max_seq_len - base_seq_len)
191
+ b = base_shift - m * base_seq_len
192
+ mu = image_seq_len * m + b
193
+ return mu
194
+
195
+ def retrieve_timesteps(
196
+ scheduler,
197
+ num_inference_steps: Optional[int] = None,
198
+ device: Optional[Union[str, torch.device]] = None,
199
+ timesteps: Optional[List[int]] = None,
200
+ sigmas: Optional[List[float]] = None,
201
+ **kwargs,
202
+ ):
203
+ if timesteps is not None and sigmas is not None:
204
+ raise ValueError("Only one of `timesteps` or `sigmas` can be passed.")
205
+ if timesteps is not None:
206
+ scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
207
+ timesteps = scheduler.timesteps
208
+ num_inference_steps = len(timesteps)
209
+ elif sigmas is not None:
210
+ scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs)
211
+ timesteps = scheduler.timesteps
212
+ num_inference_steps = len(timesteps)
213
+ else:
214
+ scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
215
+ timesteps = scheduler.timesteps
216
+ return timesteps, num_inference_steps
217
+
218
+ # Styles for flux.1-dev-realism
219
+ style_list = [
220
+ {"name": "3840 x 2160", "prompt": "hyper-realistic 8K image of {prompt}. ultra-detailed, lifelike, high-resolution, sharp, vibrant colors, photorealistic", "negative_prompt": ""},
221
+ {"name": "2560 x 1440", "prompt": "hyper-realistic 4K image of {prompt}. ultra-detailed, lifelike, high-resolution, sharp, vibrant colors, photorealistic", "negative_prompt": ""},
222
+ {"name": "HD+", "prompt": "hyper-realistic 2K image of {prompt}. ultra-detailed, lifelike, high-resolution, sharp, vibrant colors, photorealistic", "negative_prompt": ""},
223
+ {"name": "Style Zero", "prompt": "{prompt}", "negative_prompt": ""},
224
+ ]
225
+
226
+ styles = {k["name"]: (k["prompt"], k["negative_prompt"]) for k in style_list}
227
+ DEFAULT_STYLE_NAME = "3840 x 2160"
228
+ STYLE_NAMES = list(styles.keys())
229
+
230
+ def apply_style(style_name: str, positive: str) -> Tuple[str, str]:
231
+ p, n = styles.get(style_name, styles[DEFAULT_STYLE_NAME])
232
+ return p.replace("{prompt}", positive), n
233
+
234
+ # Generation function for flux.1-dev-realism
235
+ @spaces.GPU
236
+ def generate_dev(
237
+ prompt: str,
238
+ negative_prompt: str = "",
239
+ use_negative_prompt: bool = False,
240
+ seed: int = 0,
241
+ width: int = 1024,
242
+ height: int = 1024,
243
+ guidance_scale: float = 3,
244
+ randomize_seed: bool = False,
245
+ style_name: str = DEFAULT_STYLE_NAME,
246
+ num_inference_steps: int = 30,
247
+ num_images: int = 1,
248
+ zip_images: bool = False,
249
+ progress=gr.Progress(track_tqdm=True),
250
+ ):
251
+ positive_prompt, style_negative_prompt = apply_style(style_name, prompt)
252
+
253
+ if use_negative_prompt:
254
+ final_negative_prompt = style_negative_prompt + " " + negative_prompt
255
+ else:
256
+ final_negative_prompt = style_negative_prompt
257
+
258
+ final_negative_prompt = final_negative_prompt.strip()
259
+
260
+ if trigger_word:
261
+ positive_prompt = f"{trigger_word} {positive_prompt}"
262
+
263
+ seed = int(randomize_seed_fn(seed, randomize_seed))
264
+ generator = torch.Generator(device="cuda").manual_seed(seed)
265
+
266
+ start_time = time.time()
267
+
268
+ images = pipe_dev(
269
+ prompt=positive_prompt,
270
+ negative_prompt=final_negative_prompt if final_negative_prompt else None,
271
+ width=width,
272
+ height=height,
273
+ guidance_scale=guidance_scale,
274
+ num_inference_steps=num_inference_steps,
275
+ num_images_per_prompt=num_images,
276
+ generator=generator,
277
+ output_type="pil",
278
+ ).images
279
+
280
+ end_time = time.time()
281
+ duration = end_time - start_time
282
+
283
+ image_paths = [save_image(img) for img in images]
284
+
285
+ zip_path = None
286
+ if zip_images:
287
+ zip_name = str(uuid.uuid4()) + ".zip"
288
+ with zipfile.ZipFile(zip_name, 'w') as zipf:
289
+ for i, img_path in enumerate(image_paths):
290
+ zipf.write(img_path, arcname=f"Img_{i}.png")
291
+ zip_path = zip_name
292
+
293
+ return image_paths, seed, f"{duration:.2f}", zip_path
294
+
295
+ # Generation function for flux.1-krea
296
+ @spaces.GPU
297
+ def generate_krea(
298
+ prompt: str,
299
+ seed: int = 0,
300
+ width: int = 1024,
301
+ height: int = 1024,
302
+ guidance_scale: float = 4.5,
303
+ randomize_seed: bool = False,
304
+ num_inference_steps: int = 28,
305
+ num_images: int = 1,
306
+ zip_images: bool = False,
307
+ progress=gr.Progress(track_tqdm=True),
308
+ ):
309
+ if randomize_seed:
310
+ seed = random.randint(0, MAX_SEED)
311
+ generator = torch.Generator().manual_seed(seed)
312
+
313
+ start_time = time.time()
314
+
315
+ images = []
316
+ for _ in range(num_images):
317
+ final_img = list(pipe_krea.flux_pipe_call_that_returns_an_iterable_of_images(
318
+ prompt=prompt,
319
+ guidance_scale=guidance_scale,
320
+ num_inference_steps=num_inference_steps,
321
+ width=width,
322
+ height=height,
323
+ generator=generator,
324
+ output_type="pil",
325
+ good_vae=good_vae,
326
+ ))[-1] # Take the final image only
327
+ images.append(final_img)
328
+
329
+ end_time = time.time()
330
+ duration = end_time - start_time
331
+
332
+ image_paths = [save_image(img) for img in images]
333
+
334
+ zip_path = None
335
+ if zip_images:
336
+ zip_name = str(uuid.uuid4()) + ".zip"
337
+ with zipfile.ZipFile(zip_name, 'w') as zipf:
338
+ for i, img_path in enumerate(image_paths):
339
+ zipf.write(img_path, arcname=f"Img_{i}.png")
340
+ zip_path = zip_name
341
+
342
+ return image_paths, seed, f"{duration:.2f}", zip_path
343
+
344
+ # Main generation function to handle model choice
345
+ @spaces.GPU
346
+ def generate(
347
+ model_choice: str,
348
+ prompt: str,
349
+ negative_prompt: str = "",
350
+ use_negative_prompt: bool = False,
351
+ seed: int = 0,
352
+ width: int = 1024,
353
+ height: int = 1024,
354
+ guidance_scale: float = 3,
355
+ randomize_seed: bool = False,
356
+ style_name: str = DEFAULT_STYLE_NAME,
357
+ num_inference_steps: int = 30,
358
+ num_images: int = 1,
359
+ zip_images: bool = False,
360
+ progress=gr.Progress(track_tqdm=True),
361
+ ):
362
+ if model_choice == "flux.1-dev-realism":
363
+ return generate_dev(
364
+ prompt=prompt,
365
+ negative_prompt=negative_prompt,
366
+ use_negative_prompt=use_negative_prompt,
367
+ seed=seed,
368
+ width=width,
369
+ height=height,
370
+ guidance_scale=guidance_scale,
371
+ randomize_seed=randomize_seed,
372
+ style_name=style_name,
373
+ num_inference_steps=num_inference_steps,
374
+ num_images=num_images,
375
+ zip_images=zip_images,
376
+ progress=progress,
377
+ )
378
+ elif model_choice == "flux.1-krea-dev":
379
+ return generate_krea(
380
+ prompt=prompt,
381
+ seed=seed,
382
+ width=width,
383
+ height=height,
384
+ guidance_scale=guidance_scale,
385
+ randomize_seed=randomize_seed,
386
+ num_inference_steps=num_inference_steps,
387
+ num_images=num_images,
388
+ zip_images=zip_images,
389
+ progress=progress,
390
+ )
391
+ else:
392
+ raise ValueError("Invalid model choice")
393
+
394
+ # Examples (tailored for flux.1-dev-realism)
395
+ examples = [
396
+ "An attractive young woman with blue eyes lying face down on the bed, in the style of animated gifs, light white and light amber, jagged edges, the snapshot aesthetic, timeless beauty, goosepunk, sunrays shine upon it --no freckles --chaos 65 --ar 1:2 --profile yruxpc2 --stylize 750 --v 6.1",
397
+ "Headshot of handsome young man, wearing dark gray sweater with buttons and big shawl collar, brown hair and short beard, serious look on his face, black background, soft studio lighting, portrait photography --ar 85:128 --v 6.0 --style",
398
+ "Purple Dreamy, a medium-angle shot of a young woman with long brown hair, wearing a pair of eye-level glasses, stands in front of a backdrop of purple and white lights.",
399
+ "High-resolution photograph, woman, UHD, photorealistic, shot on a Sony A7III --chaos 20 --ar 1:2 --style raw --stylize 250"
400
+ ]
401
+
402
+ css = '''
403
+ .gradio-container {
404
+ max-width: 590px !important;
405
+ margin: 0 auto !important;
406
+ }
407
+ h1 {
408
+ text-align: center;
409
+ }
410
+ footer {
411
+ visibility: hidden;
412
+ }
413
+ '''
414
+
415
+ # Gradio interface
416
+ with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
417
+ gr.Markdown(DESCRIPTION)
418
+ with gr.Row():
419
+ prompt = gr.Text(
420
+ label="Prompt",
421
+ show_label=False,
422
+ max_lines=1,
423
+ placeholder="Enter your prompt",
424
+ container=False,
425
+ )
426
+ run_button = gr.Button("Run", scale=0, variant="primary")
427
+ result = gr.Gallery(label="Result", columns=1, show_label=False, preview=True)
428
+
429
+ with gr.Row():
430
+ # Model choice radio button above additional options
431
+ model_choice = gr.Radio(
432
+ choices=["flux.1-krea-dev", "flux.1-dev-realism"],
433
+ label="Select Model",
434
+ value="flux.1-krea-dev"
435
+ )
436
+
437
+ with gr.Accordion("Additional Options", open=False):
438
+ style_selection = gr.Dropdown(
439
+ label="Quality Style (for flux.1-dev-realism only)",
440
+ choices=STYLE_NAMES,
441
+ value=DEFAULT_STYLE_NAME,
442
+ interactive=True,
443
+ )
444
+ use_negative_prompt = gr.Checkbox(label="Use negative prompt (for flux.1-dev-realism only)", value=False)
445
+ negative_prompt = gr.Text(
446
+ label="Negative prompt",
447
+ max_lines=1,
448
+ placeholder="Enter a negative prompt",
449
+ visible=False,
450
+ )
451
+ seed = gr.Slider(
452
+ label="Seed",
453
+ minimum=0,
454
+ maximum=MAX_SEED,
455
+ step=1,
456
+ value=0,
457
+ )
458
+ randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
459
+ with gr.Row():
460
+ width = gr.Slider(
461
+ label="Width",
462
+ minimum=512,
463
+ maximum=2048,
464
+ step=64,
465
+ value=1024,
466
+ )
467
+ height = gr.Slider(
468
+ label="Height",
469
+ minimum=512,
470
+ maximum=2048,
471
+ step=64,
472
+ value=1024,
473
+ )
474
+ guidance_scale = gr.Slider(
475
+ label="Guidance Scale",
476
+ minimum=0.1,
477
+ maximum=20.0,
478
+ step=0.1,
479
+ value=4.5,
480
+ )
481
+ num_inference_steps = gr.Slider(
482
+ label="Number of inference steps",
483
+ minimum=1,
484
+ maximum=40,
485
+ step=1,
486
+ value=28,
487
+ )
488
+ num_images = gr.Slider(
489
+ label="Number of images",
490
+ minimum=1,
491
+ maximum=5,
492
+ step=1,
493
+ value=1,
494
+ )
495
+ zip_images = gr.Checkbox(label="Zip generated images", value=False)
496
+
497
+ gr.Markdown("### Output Information")
498
+ seed_display = gr.Textbox(label="Seed used", interactive=False)
499
+ generation_time = gr.Textbox(label="Generation time (seconds)", interactive=False)
500
+ zip_file = gr.File(label="Download ZIP")
501
+
502
+ gr.Examples(
503
+ examples=examples,
504
+ inputs=prompt,
505
+ outputs=[result, seed_display, generation_time, zip_file],
506
+ fn=generate,
507
+ cache_examples=False,
508
+ )
509
+
510
+ use_negative_prompt.change(
511
+ fn=lambda x: gr.update(visible=x),
512
+ inputs=use_negative_prompt,
513
+ outputs=negative_prompt,
514
+ api_name=False,
515
+ )
516
+
517
+ gr.on(
518
+ triggers=[
519
+ prompt.submit,
520
+ run_button.click,
521
+ ],
522
+ fn=generate,
523
+ inputs=[
524
+ model_choice,
525
+ prompt,
526
+ negative_prompt,
527
+ use_negative_prompt,
528
+ seed,
529
+ width,
530
+ height,
531
+ guidance_scale,
532
+ randomize_seed,
533
+ style_selection,
534
+ num_inference_steps,
535
+ num_images,
536
+ zip_images,
537
+ ],
538
+ outputs=[result, seed_display, generation_time, zip_file],
539
+ api_name="run",
540
+ )
541
+
542
+ if __name__ == "__main__":
543
+ demo.queue(max_size=30).launch(mcp_server=True, ssr_mode=False, show_error=True)