linoyts HF Staff commited on
Commit
9c5b32a
·
verified ·
1 Parent(s): 20bc7cb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +24 -5
app.py CHANGED
@@ -5,7 +5,12 @@ import torch
5
  import random
6
  import os
7
 
8
- from diffusers import QwenImageEditInpaintPipeline
 
 
 
 
 
9
  from PIL import Image
10
 
11
  # Set environment variable for parallel loading
@@ -16,8 +21,13 @@ MAX_IMAGE_SIZE = 2048
16
 
17
  # Initialize Qwen Image Edit pipeline
18
  pipe = QwenImageEditInpaintPipeline.from_pretrained("Qwen/Qwen-Image-Edit", torch_dtype=torch.bfloat16).to("cuda")
 
 
 
 
 
19
 
20
- @spaces.GPU
21
  def infer(edit_images, prompt, negative_prompt="", seed=42, randomize_seed=False, strength=1.0, num_inference_steps=35, true_cfg_scale=4.0, progress=gr.Progress(track_tqdm=True)):
22
  image = edit_images["background"]
23
  mask = edit_images["layers"][0]
@@ -55,9 +65,18 @@ css="""
55
  with gr.Blocks(css=css) as demo:
56
 
57
  with gr.Column(elem_id="col-container"):
58
- gr.Markdown(f"""# Qwen Image Edit Inpainting
59
- Advanced image inpainting using Qwen's Image Edit model
60
- [[model](https://huggingface.co/Qwen/Qwen-Image-Edit)] [[paper](https://arxiv.org/abs/2412.20710)]
 
 
 
 
 
 
 
 
 
61
  """)
62
  with gr.Row():
63
  with gr.Column():
 
5
  import random
6
  import os
7
 
8
+ # from diffusers import QwenImageEditInpaintPipeline
9
+ from optimization import optimize_pipeline_
10
+ from qwenimage.pipeline_qwen_image_edit import QwenImageEditInpaintPipeline
11
+ from qwenimage.transformer_qwenimage import QwenImageTransformer2DModel
12
+ from qwenimage.qwen_fa3_processor import QwenDoubleStreamAttnProcessorFA3
13
+
14
  from PIL import Image
15
 
16
  # Set environment variable for parallel loading
 
21
 
22
  # Initialize Qwen Image Edit pipeline
23
  pipe = QwenImageEditInpaintPipeline.from_pretrained("Qwen/Qwen-Image-Edit", torch_dtype=torch.bfloat16).to("cuda")
24
+ pipe.transformer.__class__ = QwenImageTransformer2DModel
25
+ pipe.transformer.set_attn_processor(QwenDoubleStreamAttnProcessorFA3())
26
+
27
+ # --- Ahead-of-time compilation ---
28
+ optimize_pipeline_(pipe, image=Image.new("RGB", (1024, 1024)), prompt="prompt")
29
 
30
+ @spaces.GPU(duration=120)
31
  def infer(edit_images, prompt, negative_prompt="", seed=42, randomize_seed=False, strength=1.0, num_inference_steps=35, true_cfg_scale=4.0, progress=gr.Progress(track_tqdm=True)):
32
  image = edit_images["background"]
33
  mask = edit_images["layers"][0]
 
65
  with gr.Blocks(css=css) as demo:
66
 
67
  with gr.Column(elem_id="col-container"):
68
+ gr.HTML("""
69
+ <div id="logo-title">
70
+ <img src="https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-Image/qwen_image_edit_logo.png" alt="Qwen-Image Edit Logo" width="400" style="display: block; margin: 0 auto;">
71
+ <h2 style="font-style: italic;color: #5b47d1;margin-top: -27px !important;margin-left: 133px;">Inapint</h2>
72
+ </div>
73
+ """)
74
+ gr.Markdown("""
75
+
76
+ Inpaint images with Qwen Image Edit. [Learn more](https://github.com/QwenLM/Qwen-Image) about the Qwen-Image series.
77
+
78
+ This demo uses the [Qwen-Image-Lightning](https://huggingface.co/lightx2v/Qwen-Image-Lightning) LoRA with AoT compilation and FA3 for accelerated 8-step inference.
79
+ Try on [Qwen Chat](https://chat.qwen.ai/), or [download model](https://huggingface.co/Qwen/Qwen-Image-Edit) to run locally with ComfyUI or diffusers.
80
  """)
81
  with gr.Row():
82
  with gr.Column():