Files changed (6) hide show
  1. .DS_Store +0 -0
  2. README.md +2 -2
  3. app.py +30 -168
  4. lora_models.json +2 -7
  5. readme.md +2 -2
  6. requirements.txt +1 -4
.DS_Store DELETED
Binary file (6.15 kB)
 
README.md CHANGED
@@ -4,9 +4,9 @@ emoji: πŸ†
4
  colorFrom: blue
5
  colorTo: purple
6
  sdk: gradio
7
- sdk_version: 4.39.0
8
  app_file: app.py
9
  pinned: true
10
  ---
11
 
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
4
  colorFrom: blue
5
  colorTo: purple
6
  sdk: gradio
7
+ sdk_version: 5.14.0
8
  app_file: app.py
9
  pinned: true
10
  ---
11
 
12
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py CHANGED
@@ -1,106 +1,57 @@
1
- import spaces
2
-
3
  import gradio as gr
4
  import numpy as np
5
  import os
 
6
  import random
7
  import json
 
8
  from PIL import Image
9
  import torch
10
  from torchvision import transforms
11
- import zipfile
12
 
13
  from diffusers import FluxFillPipeline, AutoencoderKL
14
  from PIL import Image
15
- # from samgeo.text_sam import LangSAM
16
 
17
  MAX_SEED = np.iinfo(np.int32).max
18
  MAX_IMAGE_SIZE = 2048
19
 
20
- # device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
21
- # sam = LangSAM(model_type="sam2-hiera-large").to(device)
22
-
23
  pipe = FluxFillPipeline.from_pretrained("black-forest-labs/FLUX.1-Fill-dev", torch_dtype=torch.bfloat16).to("cuda")
 
 
24
 
25
- # with open("lora_models.json", "r") as f:
26
- # lora_models = json.load(f)
27
-
28
- # def download_model(model_name, model_path):
29
- # print(f"Downloading model: {model_name} from {model_path}")
30
- # try:
31
- # pipe.load_lora_weights(model_path)
32
- # print(f"Successfully downloaded model: {model_name}")
33
- # except Exception as e:
34
- # print(f"Failed to download model: {model_name}. Error: {e}")
35
-
36
- # # Iterate through the models and download each one
37
- # for model_name, model_path in lora_models.items():
38
- # download_model(model_name, model_path)
39
-
40
- # lora_models["None"] = None
41
 
42
- # def calculate_optimal_dimensions(image: Image.Image):
43
- # # Extract the original dimensions
44
- # original_width, original_height = image.size
 
 
 
 
45
 
46
- # # Set constants
47
- # MIN_ASPECT_RATIO = 9 / 16
48
- # MAX_ASPECT_RATIO = 16 / 9
49
- # FIXED_DIMENSION = 1024
50
 
51
- # # Calculate the aspect ratio of the original image
52
- # original_aspect_ratio = original_width / original_height
53
-
54
- # # Determine which dimension to fix
55
- # if original_aspect_ratio > 1: # Wider than tall
56
- # width = FIXED_DIMENSION
57
- # height = round(FIXED_DIMENSION / original_aspect_ratio)
58
- # else: # Taller than wide
59
- # height = FIXED_DIMENSION
60
- # width = round(FIXED_DIMENSION * original_aspect_ratio)
61
-
62
- # # Ensure dimensions are multiples of 8
63
- # width = (width // 8) * 8
64
- # height = (height // 8) * 8
65
-
66
- # # Enforce aspect ratio limits
67
- # calculated_aspect_ratio = width / height
68
- # if calculated_aspect_ratio > MAX_ASPECT_RATIO:
69
- # width = (height * MAX_ASPECT_RATIO // 8) * 8
70
- # elif calculated_aspect_ratio < MIN_ASPECT_RATIO:
71
- # height = (width / MIN_ASPECT_RATIO // 8) * 8
72
-
73
- # # Ensure width and height remain above the minimum dimensions
74
- # width = max(width, 576) if width == FIXED_DIMENSION else width
75
- # height = max(height, 576) if height == FIXED_DIMENSION else height
76
-
77
- # return width, height
78
 
79
  @spaces.GPU(durations=300)
80
- # def infer(edit_images, prompt, width, height, lora_model, strength, seed=42, randomize_seed=False, guidance_scale=3.5, num_inference_steps=28, progress=gr.Progress(track_tqdm=True)):
81
- def infer(edit_images, prompt, width, height, strength, seed=42, randomize_seed=False, guidance_scale=3.5, num_inference_steps=28, progress=gr.Progress(track_tqdm=True)):
82
  # pipe.enable_xformers_memory_efficient_attention()
83
- gr.Info("Infering")
84
-
85
- # if lora_model != "None":
86
- # pipe.load_lora_weights(lora_models[lora_model])
87
- # pipe.enable_lora()
88
 
89
- gr.Info("starting checks")
 
 
90
 
91
  image = edit_images["background"]
92
- mask = edit_images["layers"][0]
93
-
94
- if not image:
95
- gr.Info("Please upload an image.")
96
- return None, None
97
-
98
  # width, height = calculate_optimal_dimensions(image)
 
99
  if randomize_seed:
100
  seed = random.randint(0, MAX_SEED)
101
 
102
  # controlImage = processor(image)
103
- gr.Info("generating image")
104
  image = pipe(
105
  # mask_image_latent=vae.encode(controlImage),
106
  prompt=prompt,
@@ -110,10 +61,8 @@ def infer(edit_images, prompt, width, height, strength, seed=42, randomize_seed=
110
  height=height,
111
  width=width,
112
  guidance_scale=guidance_scale,
113
- # strength=strength,
114
  num_inference_steps=num_inference_steps,
115
  generator=torch.Generator(device='cuda').manual_seed(seed),
116
- # generator=torch.Generator().manual_seed(seed),
117
  # lora_scale=0.75 // not supported in this version
118
  ).images[0]
119
 
@@ -123,55 +72,6 @@ def infer(edit_images, prompt, width, height, strength, seed=42, randomize_seed=
123
  return output_image_jpg, seed
124
  # return image, seed
125
 
126
- def download_image(image):
127
- if isinstance(image, np.ndarray):
128
- image = Image.fromarray(image)
129
- image.save("output.png", "PNG")
130
- return "output.png"
131
-
132
- def save_details(result, edit_image, prompt, strength, seed, guidance_scale, num_inference_steps):
133
- image = edit_image["background"]
134
- mask = edit_image["layers"][0]
135
-
136
- if isinstance(result, np.ndarray):
137
- result = Image.fromarray(result)
138
- if isinstance(image, np.ndarray):
139
- image = Image.fromarray(image)
140
- if isinstance(mask, np.ndarray):
141
- mask = Image.fromarray(mask)
142
-
143
- result.save("saved_result.png", "PNG")
144
- image.save("saved_image.png", "PNG")
145
- mask.save("saved_mask.png", "PNG")
146
-
147
- details = {
148
- "prompt": prompt,
149
- "strength": strength,
150
- "seed": seed,
151
- "guidance_scale": guidance_scale,
152
- "num_inference_steps": num_inference_steps
153
- }
154
-
155
- with open("details.json", "w") as f:
156
- json.dump(details, f)
157
-
158
- # Create a ZIP file
159
- with zipfile.ZipFile("output.zip", "w") as zipf:
160
- zipf.write("saved_result.png")
161
- zipf.write("saved_image.png")
162
- zipf.write("saved_mask.png")
163
- zipf.write("details.json")
164
-
165
- return "output.zip"
166
-
167
- def set_image_as_inpaint(image):
168
- return image
169
-
170
- # def generate_mask(image, click_x, click_y):
171
- # text_prompt = "face"
172
- # mask = sam.predict(image, text_prompt, box_threshold=0.24, text_threshold=0.24)
173
- # return mask
174
-
175
  examples = [
176
  "photography of a young woman, accent lighting, (front view:1.4), "
177
  # "a tiny astronaut hatching from an egg on the moon",
@@ -210,11 +110,11 @@ with gr.Blocks(css=css) as demo:
210
  container=False,
211
  )
212
 
213
- # lora_model = gr.Dropdown(
214
- # label="Select LoRA Model",
215
- # choices=list(lora_models.keys()),
216
- # value="None",
217
- # )
218
 
219
  run_button = gr.Button("Run")
220
 
@@ -250,16 +150,6 @@ with gr.Blocks(css=css) as demo:
250
  value=28,
251
  )
252
 
253
- with gr.Row():
254
-
255
- strength = gr.Slider(
256
- label="Strength",
257
- minimum=0,
258
- maximum=1,
259
- step=0.01,
260
- value=0.85,
261
- )
262
-
263
  with gr.Row():
264
 
265
  width = gr.Slider(
@@ -281,38 +171,10 @@ with gr.Blocks(css=css) as demo:
281
  gr.on(
282
  triggers=[run_button.click, prompt.submit],
283
  fn = infer,
284
- inputs = [edit_image, prompt, width, height, strength, seed, randomize_seed, guidance_scale, num_inference_steps],
285
  outputs = [result, seed]
286
  )
287
 
288
- download_button = gr.Button("Download Image as PNG")
289
- set_inpaint_button = gr.Button("Set Image as Inpaint")
290
- save_button = gr.Button("Save Details")
291
-
292
- download_button.click(
293
- fn=download_image,
294
- inputs=[result],
295
- outputs=gr.File(label="Download Image")
296
- )
297
-
298
- set_inpaint_button.click(
299
- fn=set_image_as_inpaint,
300
- inputs=[result],
301
- outputs=[edit_image]
302
- )
303
-
304
- save_button.click(
305
- fn=save_details,
306
- inputs=[result, edit_image, prompt, strength, seed, guidance_scale, num_inference_steps],
307
- outputs=gr.File(label="Download/Save Status")
308
- )
309
-
310
- # edit_image.select(
311
- # fn=generate_mask,
312
- # inputs=[edit_image, gr.Number(), gr.Number()],
313
- # outputs=[edit_image]
314
- # )
315
-
316
  # demo.launch()
317
  PASSWORD = os.getenv("GRADIO_PASSWORD")
318
  USERNAME = os.getenv("GRADIO_USERNAME")
@@ -325,4 +187,4 @@ def authenticate(username, password):
325
  return False
326
  # Launch the app with authentication
327
 
328
- demo.launch(debug=True, auth=authenticate)
 
 
 
1
  import gradio as gr
2
  import numpy as np
3
  import os
4
+ import spaces
5
  import random
6
  import json
7
+ # from image_gen_aux import DepthPreprocessor
8
  from PIL import Image
9
  import torch
10
  from torchvision import transforms
 
11
 
12
  from diffusers import FluxFillPipeline, AutoencoderKL
13
  from PIL import Image
14
+
15
 
16
  MAX_SEED = np.iinfo(np.int32).max
17
  MAX_IMAGE_SIZE = 2048
18
 
 
 
 
19
  pipe = FluxFillPipeline.from_pretrained("black-forest-labs/FLUX.1-Fill-dev", torch_dtype=torch.bfloat16).to("cuda")
20
+ # pipe.load_lora_weights("Himanshu806/testLora")
21
+ # pipe.enable_lora()
22
 
23
+ with open("lora_models.json", "r") as f:
24
+ lora_models = json.load(f)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
 
26
+ def download_model(model_name, model_path):
27
+ print(f"Downloading model: {model_name} from {model_path}")
28
+ try:
29
+ pipe.load_lora_weights(model_path)
30
+ print(f"Successfully downloaded model: {model_name}")
31
+ except Exception as e:
32
+ print(f"Failed to download model: {model_name}. Error: {e}")
33
 
34
+ # Iterate through the models and download each one
35
+ for model_name, model_path in lora_models.items():
36
+ download_model(model_name, model_path)
 
37
 
38
+ lora_models["None"] = None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
39
 
40
  @spaces.GPU(durations=300)
41
+ def infer(edit_images, prompt, width, height, lora_model, seed=42, randomize_seed=False, guidance_scale=3.5, num_inference_steps=28, progress=gr.Progress(track_tqdm=True)):
 
42
  # pipe.enable_xformers_memory_efficient_attention()
 
 
 
 
 
43
 
44
+ if lora_model != "None":
45
+ pipe.load_lora_weights(lora_models[lora_model])
46
+ pipe.enable_lora()
47
 
48
  image = edit_images["background"]
 
 
 
 
 
 
49
  # width, height = calculate_optimal_dimensions(image)
50
+ mask = edit_images["layers"][0]
51
  if randomize_seed:
52
  seed = random.randint(0, MAX_SEED)
53
 
54
  # controlImage = processor(image)
 
55
  image = pipe(
56
  # mask_image_latent=vae.encode(controlImage),
57
  prompt=prompt,
 
61
  height=height,
62
  width=width,
63
  guidance_scale=guidance_scale,
 
64
  num_inference_steps=num_inference_steps,
65
  generator=torch.Generator(device='cuda').manual_seed(seed),
 
66
  # lora_scale=0.75 // not supported in this version
67
  ).images[0]
68
 
 
72
  return output_image_jpg, seed
73
  # return image, seed
74
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
75
  examples = [
76
  "photography of a young woman, accent lighting, (front view:1.4), "
77
  # "a tiny astronaut hatching from an egg on the moon",
 
110
  container=False,
111
  )
112
 
113
+ lora_model = gr.Dropdown(
114
+ label="Select LoRA Model",
115
+ choices=list(lora_models.keys()),
116
+ value="None",
117
+ )
118
 
119
  run_button = gr.Button("Run")
120
 
 
150
  value=28,
151
  )
152
 
 
 
 
 
 
 
 
 
 
 
153
  with gr.Row():
154
 
155
  width = gr.Slider(
 
171
  gr.on(
172
  triggers=[run_button.click, prompt.submit],
173
  fn = infer,
174
+ inputs = [edit_image, prompt, width, height, lora_model, seed, randomize_seed, guidance_scale, num_inference_steps],
175
  outputs = [result, seed]
176
  )
177
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
178
  # demo.launch()
179
  PASSWORD = os.getenv("GRADIO_PASSWORD")
180
  USERNAME = os.getenv("GRADIO_USERNAME")
 
187
  return False
188
  # Launch the app with authentication
189
 
190
+ demo.launch(auth=authenticate)
lora_models.json CHANGED
@@ -1,9 +1,4 @@
1
  {
2
- "RahulFineTuned (qwertyui)": "Himanshu806/testLora",
3
- "femaleIndian (indmodelf)": "Himanshu806/ind-f-model",
4
- "KodaRealistic (flmft style)": "alvdansen/flux-koda",
5
- "superRealism (Super Realism)": "strangerzonehf/Flux-Super-Realism-LoRA",
6
- "ThirdMarch blue (blueThirdMarchDress)": "Himanshu806/bluethirdmarchdress",
7
- "manosouthf (manoSouthf)": "Himanshu806/manosouthf",
8
- "greenDress (onlyGreenDress)": "Himanshu806/onlygreendress"
9
  }
 
1
  {
2
+ "RahulFineTuned": "Himanshu806/testLora",
3
+ "KodaRealistic": "alvdansen/flux-koda"
 
 
 
 
 
4
  }
readme.md CHANGED
@@ -1,10 +1,10 @@
1
  ---
2
- title: Inpainting Test UI
3
  emoji: πŸ†
4
  colorFrom: blue
5
  colorTo: purple
6
  sdk: gradio
7
- sdk_version: 5.8.0
8
  app_file: app.py
9
  pinned: true
10
  ---
 
1
  ---
2
+ title: Inpainting
3
  emoji: πŸ†
4
  colorFrom: blue
5
  colorTo: purple
6
  sdk: gradio
7
+ sdk_version: 4.39.0
8
  app_file: app.py
9
  pinned: true
10
  ---
requirements.txt CHANGED
@@ -1,5 +1,5 @@
1
  git+https://github.com/asomoza/image_gen_aux.git
2
- git+https://github.com/huggingface/diffusers
3
  transformers
4
  accelerate
5
  safetensors
@@ -8,6 +8,3 @@ peft
8
  xformers
9
  torchvision
10
  torch
11
- opencv-python
12
- segment-geospatial
13
- groundingdino-py
 
1
  git+https://github.com/asomoza/image_gen_aux.git
2
+ git+https://github.com/huggingface/diffusers.git
3
  transformers
4
  accelerate
5
  safetensors
 
8
  xformers
9
  torchvision
10
  torch