LPX55 commited on
Commit
ff57491
·
verified ·
1 Parent(s): 457e12a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +69 -146
app.py CHANGED
@@ -18,57 +18,47 @@ MODELS = {
18
  "SatPony-Lightning": "John6666/satpony-lightning-v2-sdxl"
19
  }
20
 
21
- def init_pipeline(model_name):
22
- config_file = hf_hub_download(
23
- "xinsir/controlnet-union-sdxl-1.0",
24
- filename="config_promax.json",
25
- )
26
- config = ControlNetModel_Union.load_config(config_file)
27
- controlnet_model = ControlNetModel_Union.from_config(config)
28
- model_file = hf_hub_download(
29
- "xinsir/controlnet-union-sdxl-1.0",
30
- filename="diffusion_pytorch_model_promax.safetensors",
31
- )
32
- state_dict = load_state_dict(model_file)
33
- model, _,_, _,_ = ControlNetModel_Union._load_pretrained_model(
34
- controlnet_model, state_dict, model_file, "xinsir/controlnet-union-sdxl-1.0"
35
- )
36
- model.to(device="cuda", dtype=torch.float16)
37
- vae = AutoencoderKL.from_pretrained(
38
- "madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16
39
- ).to("cuda")
40
- pipe = StableDiffusionXLFillPipeline.from_pretrained(
41
- MODELS[model_name],
42
- torch_dtype=torch.float16,
43
- vae=vae,
44
- controlnet=model,
45
- variant="fp16",
46
- )
47
- pipe.scheduler = TCDScheduler.from_config(pipe.scheduler.config)
48
- pipe.to("cuda")
49
- return pipe
50
-
51
- # Initialize with the default model
52
- default_model_name = "RealVisXL V5.0 Lightning"
53
- pipe = init_pipeline(default_model_name)
54
- loaded_model_name = default_model_name # Track the loaded model
55
-
56
- def update_pipeline(model_selection):
57
- global pipe, loaded_model_name
58
- if model_selection != loaded_model_name:
59
- print(f"Loading new model: {model_selection}")
60
- pipe = init_pipeline(model_selection)
61
- loaded_model_name = model_selection
62
- return pipe
63
-
64
  @spaces.GPU(duration=12)
65
  def fill_image(prompt, image, model_selection, paste_back):
66
- global pipe
67
- update_pipeline(model_selection)
68
  print(f"Received image: {image}")
69
  if image is None:
70
  yield None, None
71
  return
 
72
  (
73
  prompt_embeds,
74
  negative_prompt_embeds,
@@ -81,6 +71,7 @@ def fill_image(prompt, image, model_selection, paste_back):
81
  binary_mask = alpha_channel.point(lambda p: p > 0 and 255)
82
  cnet_image = source.copy()
83
  cnet_image.paste(0, (0, 0), binary_mask)
 
84
  for image in pipe(
85
  prompt_embeds=prompt_embeds,
86
  negative_prompt_embeds=negative_prompt_embeds,
@@ -89,6 +80,7 @@ def fill_image(prompt, image, model_selection, paste_back):
89
  image=cnet_image,
90
  ):
91
  yield image, cnet_image
 
92
  print(f"{model_selection=}")
93
  print(f"{paste_back=}")
94
  if paste_back:
@@ -197,9 +189,15 @@ def preview_image_and_mask(image, width, height, overlap_percentage, resize_opti
197
  return preview
198
 
199
  @spaces.GPU(duration=12)
200
- def inpaint(prompt, image, model_selection, paste_back):
201
  global pipe
202
- update_pipeline(model_selection)
 
 
 
 
 
 
203
  mask = Image.fromarray(image["mask"]).convert("L")
204
  image = Image.fromarray(image["image"])
205
  inpaint_final_prompt = f"score_9, score_8_up, score_7_up, {prompt}"
@@ -207,7 +205,7 @@ def inpaint(prompt, image, model_selection, paste_back):
207
  if paste_back:
208
  result.paste(image, (0, 0), Image.fromarray(255 - np.array(mask)))
209
  return result
210
-
211
  @spaces.GPU(duration=12)
212
  def outpaint(image, width, height, overlap_percentage, num_inference_steps, resize_option, custom_resize_percentage, prompt_input, alignment, overlap_left, overlap_right, overlap_top, overlap_bottom):
213
  background, mask = prepare_image_and_mask(image, width, height, overlap_percentage, resize_option, custom_resize_percentage, alignment, overlap_left, overlap_right, overlap_top, overlap_bottom)
@@ -237,8 +235,6 @@ def outpaint(image, width, height, overlap_percentage, num_inference_steps, resi
237
 
238
  @spaces.GPU(duration=12)
239
  def infer(image, width, height, overlap_percentage, num_inference_steps, resize_option, custom_resize_percentage, prompt_input, alignment, overlap_left, overlap_right, overlap_top, overlap_bottom):
240
- global pipe
241
- update_pipeline(model_selection) # Ensure model_selection is defined
242
  background, mask = prepare_image_and_mask(image, width, height, overlap_percentage, resize_option, custom_resize_percentage, alignment, overlap_left, overlap_right, overlap_top, overlap_bottom)
243
  if not can_expand(background.width, background.height, width, height, alignment):
244
  alignment = "Middle"
@@ -263,7 +259,7 @@ def infer(image, width, height, overlap_percentage, num_inference_steps, resize_
263
  image = image.convert("RGBA")
264
  cnet_image.paste(image, (0, 0), mask)
265
  yield background, cnet_image
266
-
267
  def use_output_as_input(output_image):
268
  return gr.update(value=output_image[1])
269
 
@@ -311,16 +307,13 @@ def update_history(new_image, history):
311
  return history
312
 
313
  css = """
314
- .nukgradio-container {
315
  width: 86vw !important;
316
  }
317
- .contain {
318
  overflow-y: scroll !important;
319
  padding: 10px 40px !important;
320
  }
321
- div#component-8 {
322
- min-height: fit-content !important;
323
- }
324
  div#component-17 {
325
  height: auto !important;
326
  }
@@ -338,41 +331,35 @@ title = """<h1 align="center">Diffusers Image Outpaint</h1>
338
  """
339
 
340
  with gr.Blocks(css=css, fill_height=True) as demo:
341
- gr.Markdown("# SDXL Lightning Diffusers Inpaint and Outpaint")
342
  with gr.Tabs():
343
  with gr.TabItem("Inpaint"):
344
  with gr.Column():
345
  with gr.Row():
346
- with gr.Column():
347
- input_image = gr.ImageMask(
348
- type="pil", label="Input Image", layers=True
349
- )
350
-
351
  with gr.Column():
352
  prompt = gr.Textbox(
353
  label="Prompt",
354
  info="Describe what to inpaint the mask with",
355
  lines=3,
356
  )
 
357
  model_selection = gr.Dropdown(
358
  choices=list(MODELS.keys()),
359
- value=default_model_name,
360
  label="Model",
361
  )
362
- paste_back = gr.Checkbox(True, label="Paste back original")
363
- loading_message = gr.Label(label="Status", value="", visible=False) # Added loading message label
364
-
365
- result = ImageSlider(
366
- interactive=False,
367
- label="Generated Image",
368
- )
369
-
370
-
371
- run_button = gr.Button("Generate")
372
-
373
-
374
  use_as_input_button = gr.Button("Use as Input Image", visible=False)
375
-
376
  use_as_input_button.click(
377
  fn=use_output_as_input, inputs=[result], outputs=[input_image]
378
  )
@@ -384,24 +371,10 @@ with gr.Blocks(css=css, fill_height=True) as demo:
384
  fn=lambda: gr.update(visible=False),
385
  inputs=None,
386
  outputs=use_as_input_button,
387
- ).then(
388
- fn=lambda: gr.update(value="Loading Model...", visible=True), # Show loading message
389
- inputs=None,
390
- outputs=loading_message
391
  ).then(
392
  fn=fill_image,
393
  inputs=[prompt, input_image, model_selection, paste_back],
394
  outputs=[result],
395
- ).then(
396
- fn=lambda: gr.update(value="Model Loaded", visible=True), # Show loaded message
397
- inputs=None,
398
- outputs=loading_message,
399
- queue=False
400
- ).then(
401
- fn=lambda: gr.update(value="", visible=False), # Hide loading message
402
- inputs=None,
403
- outputs=loading_message,
404
- queue=False
405
  ).then(
406
  fn=lambda: gr.update(visible=True),
407
  inputs=None,
@@ -415,24 +388,10 @@ with gr.Blocks(css=css, fill_height=True) as demo:
415
  fn=lambda: gr.update(visible=False),
416
  inputs=None,
417
  outputs=use_as_input_button,
418
- ).then(
419
- fn=lambda: gr.update(value="Loading Model...", visible=True), # Show loading message
420
- inputs=None,
421
- outputs=loading_message
422
  ).then(
423
  fn=fill_image,
424
  inputs=[prompt, input_image, model_selection, paste_back],
425
  outputs=[result],
426
- ).then(
427
- fn=lambda: gr.update(value="Model Loaded", visible=True), # Show loaded message
428
- inputs=None,
429
- outputs=loading_message,
430
- queue=False
431
- ).then(
432
- fn=lambda: gr.update(value="", visible=False), # Hide loading message
433
- inputs=None,
434
- outputs=loading_message,
435
- queue=False
436
  ).then(
437
  fn=lambda: gr.update(visible=True),
438
  inputs=None,
@@ -485,7 +444,7 @@ with gr.Blocks(css=css, fill_height=True) as demo:
485
  overlap_percentage = gr.Slider(
486
  label="Mask overlap (%)",
487
  minimum=1,
488
- maximum=100,
489
  value=10,
490
  step=1
491
  )
@@ -525,13 +484,9 @@ with gr.Blocks(css=css, fill_height=True) as demo:
525
  interactive=False,
526
  label="Generated Image",
527
  )
528
- loading_message_outpaint = gr.Label(label="Status", value="", visible=False) # Added loading message label
529
  use_as_input_button_outpaint = gr.Button("Use as Input Image", visible=False)
530
- with gr.Accordion():
531
- history_gallery = gr.Gallery(label="History", columns=6, object_fit="contain", interactive=False)
532
- preview_image = gr.Image(label="Preview")
533
-
534
-
535
 
536
  target_ratio.change(
537
  fn=preload_presets,
@@ -566,76 +521,43 @@ with gr.Blocks(css=css, fill_height=True) as demo:
566
  inputs=[result_outpaint],
567
  outputs=[input_image_outpaint]
568
  )
569
-
570
  runout_button.click(
571
  fn=clear_result,
572
  inputs=None,
573
  outputs=result_outpaint,
574
- ).then(
575
- fn=lambda: gr.update(value="Loading Model...", visible=True), # Show loading message
576
- inputs=None,
577
- outputs=loading_message_outpaint
578
  ).then(
579
  fn=infer,
580
  inputs=[input_image_outpaint, width_slider, height_slider, overlap_percentage, num_inference_steps,
581
  resize_option, custom_resize_percentage, prompt_input, alignment_dropdown,
582
  overlap_left, overlap_right, overlap_top, overlap_bottom],
583
  outputs=[result_outpaint],
584
- ).then(
585
- fn=lambda: gr.update(value="Model Loaded", visible=True), # Show loaded message
586
- inputs=None,
587
- outputs=loading_message_outpaint,
588
- queue=False
589
  ).then(
590
  fn=lambda x, history: update_history(x[1], history),
591
  inputs=[result_outpaint, history_gallery],
592
  outputs=history_gallery,
593
- ).then(
594
- fn=lambda: gr.update(value="", visible=False), # Hide loading message
595
- inputs=None,
596
- outputs=loading_message_outpaint,
597
- queue=False
598
  ).then(
599
  fn=lambda: gr.update(visible=True),
600
  inputs=None,
601
- outputs=use_as_input_button_outpaint,
602
  )
603
  prompt_input.submit(
604
  fn=clear_result,
605
  inputs=None,
606
  outputs=result_outpaint,
607
- ).then(
608
- fn=lambda: gr.update(visible=False),
609
- inputs=None,
610
- outputs=use_as_input_button_outpaint,
611
- ).then(
612
- fn=lambda: gr.update(value="Loading Model...", visible=True), # Show loading message
613
- inputs=None,
614
- outputs=loading_message_outpaint
615
  ).then(
616
  fn=infer,
617
  inputs=[input_image_outpaint, width_slider, height_slider, overlap_percentage, num_inference_steps,
618
  resize_option, custom_resize_percentage, prompt_input, alignment_dropdown,
619
  overlap_left, overlap_right, overlap_top, overlap_bottom],
620
  outputs=[result_outpaint],
621
- ).then(
622
- fn=lambda: gr.update(value="Model Loaded", visible=True), # Show loaded message
623
- inputs=None,
624
- outputs=loading_message_outpaint,
625
- queue=False
626
  ).then(
627
  fn=lambda x, history: update_history(x[1], history),
628
  inputs=[result_outpaint, history_gallery],
629
  outputs=history_gallery,
630
- ).then(
631
- fn=lambda: gr.update(value="", visible=False), # Hide loading message
632
- inputs=None,
633
- outputs=loading_message_outpaint,
634
- queue=False
635
  ).then(
636
  fn=lambda: gr.update(visible=True),
637
  inputs=None,
638
- outputs=use_as_input_button_outpaint,
639
  )
640
  preview_button.click(
641
  fn=preview_image_and_mask,
@@ -644,4 +566,5 @@ with gr.Blocks(css=css, fill_height=True) as demo:
644
  outputs=[preview_image],
645
  queue=False
646
  )
 
647
  demo.launch(show_error=True)
 
18
  "SatPony-Lightning": "John6666/satpony-lightning-v2-sdxl"
19
  }
20
 
21
+ config_file = hf_hub_download(
22
+ "xinsir/controlnet-union-sdxl-1.0",
23
+ filename="config_promax.json",
24
+ )
25
+ config = ControlNetModel_Union.load_config(config_file)
26
+ controlnet_model = ControlNetModel_Union.from_config(config)
27
+ model_file = hf_hub_download(
28
+ "xinsir/controlnet-union-sdxl-1.0",
29
+ filename="diffusion_pytorch_model_promax.safetensors",
30
+ )
31
+ state_dict = load_state_dict(model_file)
32
+ model, _, _, _, _ = ControlNetModel_Union._load_pretrained_model(
33
+ controlnet_model, state_dict, model_file, "xinsir/controlnet-union-sdxl-1.0"
34
+ )
35
+ model.to(device="cuda", dtype=torch.float16)
36
+ vae = AutoencoderKL.from_pretrained(
37
+ "madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16
38
+ ).to("cuda")
39
+ pipe = StableDiffusionXLFillPipeline.from_pretrained(
40
+ "SG161222/RealVisXL_V5.0_Lightning",
41
+ torch_dtype=torch.float16,
42
+ vae=vae,
43
+ controlnet=model,
44
+ variant="fp16",
45
+ )
46
+ pipe = StableDiffusionXLFillPipeline.from_pretrained(
47
+ "GraydientPlatformAPI/lustify-lightning",
48
+ torch_dtype=torch.float16,
49
+ vae=vae,
50
+ controlnet=model,
51
+ )
52
+ pipe.scheduler = TCDScheduler.from_config(pipe.scheduler.config)
53
+ pipe.to("cuda")
54
+
 
 
 
 
 
 
 
 
 
55
  @spaces.GPU(duration=12)
56
  def fill_image(prompt, image, model_selection, paste_back):
 
 
57
  print(f"Received image: {image}")
58
  if image is None:
59
  yield None, None
60
  return
61
+
62
  (
63
  prompt_embeds,
64
  negative_prompt_embeds,
 
71
  binary_mask = alpha_channel.point(lambda p: p > 0 and 255)
72
  cnet_image = source.copy()
73
  cnet_image.paste(0, (0, 0), binary_mask)
74
+
75
  for image in pipe(
76
  prompt_embeds=prompt_embeds,
77
  negative_prompt_embeds=negative_prompt_embeds,
 
80
  image=cnet_image,
81
  ):
82
  yield image, cnet_image
83
+
84
  print(f"{model_selection=}")
85
  print(f"{paste_back=}")
86
  if paste_back:
 
189
  return preview
190
 
191
  @spaces.GPU(duration=12)
192
+ def inpaint(prompt, image, inpaint_model, paste_back):
193
  global pipe
194
+ if pipe.config.model_name != MODELS[model_name]:
195
+ pipe = StableDiffusionXLFillPipeline.from_pretrained(
196
+ MODELS[model_name],
197
+ torch_dtype=torch.float16,
198
+ vae=vae,
199
+ controlnet=model,
200
+ ).to("cuda")
201
  mask = Image.fromarray(image["mask"]).convert("L")
202
  image = Image.fromarray(image["image"])
203
  inpaint_final_prompt = f"score_9, score_8_up, score_7_up, {prompt}"
 
205
  if paste_back:
206
  result.paste(image, (0, 0), Image.fromarray(255 - np.array(mask)))
207
  return result
208
+
209
  @spaces.GPU(duration=12)
210
  def outpaint(image, width, height, overlap_percentage, num_inference_steps, resize_option, custom_resize_percentage, prompt_input, alignment, overlap_left, overlap_right, overlap_top, overlap_bottom):
211
  background, mask = prepare_image_and_mask(image, width, height, overlap_percentage, resize_option, custom_resize_percentage, alignment, overlap_left, overlap_right, overlap_top, overlap_bottom)
 
235
 
236
  @spaces.GPU(duration=12)
237
  def infer(image, width, height, overlap_percentage, num_inference_steps, resize_option, custom_resize_percentage, prompt_input, alignment, overlap_left, overlap_right, overlap_top, overlap_bottom):
 
 
238
  background, mask = prepare_image_and_mask(image, width, height, overlap_percentage, resize_option, custom_resize_percentage, alignment, overlap_left, overlap_right, overlap_top, overlap_bottom)
239
  if not can_expand(background.width, background.height, width, height, alignment):
240
  alignment = "Middle"
 
259
  image = image.convert("RGBA")
260
  cnet_image.paste(image, (0, 0), mask)
261
  yield background, cnet_image
262
+
263
  def use_output_as_input(output_image):
264
  return gr.update(value=output_image[1])
265
 
 
307
  return history
308
 
309
  css = """
310
+ .nulgradio-container {
311
  width: 86vw !important;
312
  }
313
+ .nulcontain {
314
  overflow-y: scroll !important;
315
  padding: 10px 40px !important;
316
  }
 
 
 
317
  div#component-17 {
318
  height: auto !important;
319
  }
 
331
  """
332
 
333
  with gr.Blocks(css=css, fill_height=True) as demo:
334
+ gr.Markdown("# Diffusers Inpaint and Outpaint")
335
  with gr.Tabs():
336
  with gr.TabItem("Inpaint"):
337
  with gr.Column():
338
  with gr.Row():
 
 
 
 
 
339
  with gr.Column():
340
  prompt = gr.Textbox(
341
  label="Prompt",
342
  info="Describe what to inpaint the mask with",
343
  lines=3,
344
  )
345
+ with gr.Column():
346
  model_selection = gr.Dropdown(
347
  choices=list(MODELS.keys()),
348
+ value="RealVisXL V5.0 Lightning",
349
  label="Model",
350
  )
351
+ with gr.Row():
352
+ run_button = gr.Button("Generate")
353
+ paste_back = gr.Checkbox(True, label="Paste back original")
354
+ with gr.Row(equal_height=False):
355
+ input_image = gr.ImageMask(
356
+ type="pil", label="Input Image", layers=True
357
+ )
358
+ result = ImageSlider(
359
+ interactive=False,
360
+ label="Generated Image",
361
+ )
 
362
  use_as_input_button = gr.Button("Use as Input Image", visible=False)
 
363
  use_as_input_button.click(
364
  fn=use_output_as_input, inputs=[result], outputs=[input_image]
365
  )
 
371
  fn=lambda: gr.update(visible=False),
372
  inputs=None,
373
  outputs=use_as_input_button,
 
 
 
 
374
  ).then(
375
  fn=fill_image,
376
  inputs=[prompt, input_image, model_selection, paste_back],
377
  outputs=[result],
 
 
 
 
 
 
 
 
 
 
378
  ).then(
379
  fn=lambda: gr.update(visible=True),
380
  inputs=None,
 
388
  fn=lambda: gr.update(visible=False),
389
  inputs=None,
390
  outputs=use_as_input_button,
 
 
 
 
391
  ).then(
392
  fn=fill_image,
393
  inputs=[prompt, input_image, model_selection, paste_back],
394
  outputs=[result],
 
 
 
 
 
 
 
 
 
 
395
  ).then(
396
  fn=lambda: gr.update(visible=True),
397
  inputs=None,
 
444
  overlap_percentage = gr.Slider(
445
  label="Mask overlap (%)",
446
  minimum=1,
447
+ maximum=50,
448
  value=10,
449
  step=1
450
  )
 
484
  interactive=False,
485
  label="Generated Image",
486
  )
 
487
  use_as_input_button_outpaint = gr.Button("Use as Input Image", visible=False)
488
+ history_gallery = gr.Gallery(label="History", columns=6, object_fit="contain", interactive=False)
489
+ preview_image = gr.Image(label="Preview")
 
 
 
490
 
491
  target_ratio.change(
492
  fn=preload_presets,
 
521
  inputs=[result_outpaint],
522
  outputs=[input_image_outpaint]
523
  )
 
524
  runout_button.click(
525
  fn=clear_result,
526
  inputs=None,
527
  outputs=result_outpaint,
 
 
 
 
528
  ).then(
529
  fn=infer,
530
  inputs=[input_image_outpaint, width_slider, height_slider, overlap_percentage, num_inference_steps,
531
  resize_option, custom_resize_percentage, prompt_input, alignment_dropdown,
532
  overlap_left, overlap_right, overlap_top, overlap_bottom],
533
  outputs=[result_outpaint],
 
 
 
 
 
534
  ).then(
535
  fn=lambda x, history: update_history(x[1], history),
536
  inputs=[result_outpaint, history_gallery],
537
  outputs=history_gallery,
 
 
 
 
 
538
  ).then(
539
  fn=lambda: gr.update(visible=True),
540
  inputs=None,
541
+ outputs=[use_as_input_button_outpaint],
542
  )
543
  prompt_input.submit(
544
  fn=clear_result,
545
  inputs=None,
546
  outputs=result_outpaint,
 
 
 
 
 
 
 
 
547
  ).then(
548
  fn=infer,
549
  inputs=[input_image_outpaint, width_slider, height_slider, overlap_percentage, num_inference_steps,
550
  resize_option, custom_resize_percentage, prompt_input, alignment_dropdown,
551
  overlap_left, overlap_right, overlap_top, overlap_bottom],
552
  outputs=[result_outpaint],
 
 
 
 
 
553
  ).then(
554
  fn=lambda x, history: update_history(x[1], history),
555
  inputs=[result_outpaint, history_gallery],
556
  outputs=history_gallery,
 
 
 
 
 
557
  ).then(
558
  fn=lambda: gr.update(visible=True),
559
  inputs=None,
560
+ outputs=[use_as_input_button_outpaint],
561
  )
562
  preview_button.click(
563
  fn=preview_image_and_mask,
 
566
  outputs=[preview_image],
567
  queue=False
568
  )
569
+
570
  demo.launch(show_error=True)