Spaces:
Running
on
Zero
Running
on
Zero
Upload 2 files
Browse files- app.py +2 -2
- requirements.txt +5 -5
app.py
CHANGED
|
@@ -382,7 +382,7 @@ def remove_custom_lora(selected_indices, current_loras, gallery):
|
|
| 382 |
*lora_image
|
| 383 |
)
|
| 384 |
|
| 385 |
-
@spaces.GPU(duration=
|
| 386 |
@torch.inference_mode()
|
| 387 |
def generate_image(prompt_mash: str, steps: int, seed: int, cfg_scale: float, width: int, height: int, sigmas_factor: float, cn_on: bool, progress=gr.Progress(track_tqdm=True)):
|
| 388 |
global pipe, taef1, good_vae, controlnet, controlnet_union
|
|
@@ -438,7 +438,7 @@ def generate_image(prompt_mash: str, steps: int, seed: int, cfg_scale: float, wi
|
|
| 438 |
print(e)
|
| 439 |
raise gr.Error(f"Inference Error: {e}") from e
|
| 440 |
|
| 441 |
-
@spaces.GPU(duration=
|
| 442 |
@torch.inference_mode()
|
| 443 |
def generate_image_to_image(prompt_mash: str, image_input_path_dict: dict, image_strength: float, task_type: str, blur_mask: bool, blur_factor: float,
|
| 444 |
steps: int, cfg_scale: float, width: int, height: int, sigmas_factor: float, seed: int, cn_on: bool, progress=gr.Progress(track_tqdm=True)):
|
|
|
|
| 382 |
*lora_image
|
| 383 |
)
|
| 384 |
|
| 385 |
+
@spaces.GPU(duration=59)
|
| 386 |
@torch.inference_mode()
|
| 387 |
def generate_image(prompt_mash: str, steps: int, seed: int, cfg_scale: float, width: int, height: int, sigmas_factor: float, cn_on: bool, progress=gr.Progress(track_tqdm=True)):
|
| 388 |
global pipe, taef1, good_vae, controlnet, controlnet_union
|
|
|
|
| 438 |
print(e)
|
| 439 |
raise gr.Error(f"Inference Error: {e}") from e
|
| 440 |
|
| 441 |
+
@spaces.GPU(duration=59)
|
| 442 |
@torch.inference_mode()
|
| 443 |
def generate_image_to_image(prompt_mash: str, image_input_path_dict: dict, image_strength: float, task_type: str, blur_mask: bool, blur_factor: float,
|
| 444 |
steps: int, cfg_scale: float, width: int, height: int, sigmas_factor: float, seed: int, cn_on: bool, progress=gr.Progress(track_tqdm=True)):
|
requirements.txt
CHANGED
|
@@ -1,8 +1,8 @@
|
|
| 1 |
torch==2.4.0
|
| 2 |
-
torchao>=0.9.0
|
| 3 |
#git+https://github.com/huggingface/diffusers.git
|
| 4 |
-
diffusers
|
| 5 |
-
transformers
|
| 6 |
#git+https://github.com/huggingface/peft.git
|
| 7 |
peft
|
| 8 |
#git+https://github.com/huggingface/accelerate.git
|
|
@@ -25,6 +25,6 @@ translatepy
|
|
| 25 |
unidecode
|
| 26 |
bitsandbytes
|
| 27 |
gguf
|
| 28 |
-
triton
|
| 29 |
-
para-attn
|
| 30 |
pydantic==2.10.6
|
|
|
|
| 1 |
torch==2.4.0
|
| 2 |
+
#torchao>=0.9.0
|
| 3 |
#git+https://github.com/huggingface/diffusers.git
|
| 4 |
+
diffusers<=0.32.0
|
| 5 |
+
transformers<=4.48.3
|
| 6 |
#git+https://github.com/huggingface/peft.git
|
| 7 |
peft
|
| 8 |
#git+https://github.com/huggingface/accelerate.git
|
|
|
|
| 25 |
unidecode
|
| 26 |
bitsandbytes
|
| 27 |
gguf
|
| 28 |
+
#triton
|
| 29 |
+
#para-attn
|
| 30 |
pydantic==2.10.6
|