Spaces:
Running
on
Zero
Running
on
Zero
Upload 3 files
Browse files- app.py +2 -2
- env.py +6 -1
- requirements.txt +1 -1
app.py
CHANGED
|
@@ -18,7 +18,7 @@ import pandas as pd
|
|
| 18 |
import numpy as np
|
| 19 |
from pathlib import Path
|
| 20 |
|
| 21 |
-
from env import models, models_dev, models_schnell, models_fill, models_canny, models_depth, models_edit, num_loras, num_cns, MAX_LORA, HF_TOKEN, single_file_base_models
|
| 22 |
from mod import (clear_cache, get_repo_safetensors, is_repo_name, is_repo_exists, get_model_trigger,
|
| 23 |
description_ui, compose_lora_json, is_valid_lora, fuse_loras, turbo_loras, save_image, preprocess_i2i_image,
|
| 24 |
get_trigger_word, enhance_prompt, set_control_union_image, get_canny_image, get_depth_image,
|
|
@@ -95,7 +95,7 @@ def load_pipeline(pipe, pipe_i2i, repo_id: str, cn_on: bool, model_type: str, ta
|
|
| 95 |
controlnet_model_union_repo = 'InstantX/FLUX.1-dev-Controlnet-Union'
|
| 96 |
if task == "Flux Fill" or repo_id in models_fill:
|
| 97 |
model_type = "fill"
|
| 98 |
-
if repo_id in set(models_dev + models_schnell): repo_id = models_fill[0]
|
| 99 |
if dtype_str == "BF16": dtype = torch.bfloat16
|
| 100 |
else: dtype = torch.bfloat16
|
| 101 |
single_file_base_model = single_file_base_models.get(model_type, models[0])
|
|
|
|
| 18 |
import numpy as np
|
| 19 |
from pathlib import Path
|
| 20 |
|
| 21 |
+
from env import models, models_dev, models_schnell, models_fill, models_krea, models_canny, models_depth, models_edit, num_loras, num_cns, MAX_LORA, HF_TOKEN, single_file_base_models
|
| 22 |
from mod import (clear_cache, get_repo_safetensors, is_repo_name, is_repo_exists, get_model_trigger,
|
| 23 |
description_ui, compose_lora_json, is_valid_lora, fuse_loras, turbo_loras, save_image, preprocess_i2i_image,
|
| 24 |
get_trigger_word, enhance_prompt, set_control_union_image, get_canny_image, get_depth_image,
|
|
|
|
| 95 |
controlnet_model_union_repo = 'InstantX/FLUX.1-dev-Controlnet-Union'
|
| 96 |
if task == "Flux Fill" or repo_id in models_fill:
|
| 97 |
model_type = "fill"
|
| 98 |
+
if repo_id in set(models_dev + models_schnell + models_krea): repo_id = models_fill[0]
|
| 99 |
if dtype_str == "BF16": dtype = torch.bfloat16
|
| 100 |
else: dtype = torch.bfloat16
|
| 101 |
single_file_base_model = single_file_base_models.get(model_type, models[0])
|
env.py
CHANGED
|
@@ -65,6 +65,8 @@ models_dev = [
|
|
| 65 |
"John6666/fluxescore-dev-v10fp16-fp8-flux",
|
| 66 |
"John6666/2758-flux-asian-utopian-v30fp8noclip-fp8-flux",
|
| 67 |
"trongg/FLUX_dev2pro_nsfw_context_lora",
|
|
|
|
|
|
|
| 68 |
"https://huggingface.co/StableDiffusionVN/SDVN11-Ghibli-Flux/blob/main/SDVN11-Ghibli-Flux_fp8-hyper.safetensors",
|
| 69 |
"https://huggingface.co/datasets/John6666/flux1-backup-202410/blob/main/iniverseMixXLSFWNSFW_f1dFP16V10.safetensors",
|
| 70 |
"https://huggingface.co/city96/FLUX.1-dev-gguf/blob/main/flux1-dev-Q2_K.gguf",
|
|
@@ -79,13 +81,15 @@ models_fill = ["fuliucansheng/FLUX.1-Fill-dev-diffusers"]
|
|
| 79 |
|
| 80 |
models_dedistill = []
|
| 81 |
|
|
|
|
|
|
|
| 82 |
models_canny = ["fuliucansheng/FLUX.1-Canny-dev-diffusers", "sayakpaul/FLUX.1-Canny-dev-nf4"]
|
| 83 |
|
| 84 |
models_depth = ["fuliucansheng/FLUX.1-Depth-dev-diffusers", "sayakpaul/FLUX.1-Depth-dev-nf4"]
|
| 85 |
|
| 86 |
models_edit = ["sayakpaul/edit-control-lr_1e-4-wd_1e-4-gs_15.0-cd_0.1"]
|
| 87 |
|
| 88 |
-
models = models_dev + models_schnell + models_fill
|
| 89 |
|
| 90 |
model_trigger = {
|
| 91 |
"Raelina/Raemu-Flux": "anime",
|
|
@@ -97,6 +101,7 @@ single_file_base_models = {
|
|
| 97 |
"dev": "camenduru/FLUX.1-dev-diffusers",
|
| 98 |
"schnell": "black-forest-labs/FLUX.1-schnell",
|
| 99 |
"fill": "fuliucansheng/FLUX.1-Fill-dev-diffusers",
|
|
|
|
| 100 |
}
|
| 101 |
|
| 102 |
# List all Models for specified user
|
|
|
|
| 65 |
"John6666/fluxescore-dev-v10fp16-fp8-flux",
|
| 66 |
"John6666/2758-flux-asian-utopian-v30fp8noclip-fp8-flux",
|
| 67 |
"trongg/FLUX_dev2pro_nsfw_context_lora",
|
| 68 |
+
'NikolaSigmoid/FLUX.1-Krea-dev',
|
| 69 |
+
'prithivMLmods/Flux.1-Krea-Merged-Dev',
|
| 70 |
"https://huggingface.co/StableDiffusionVN/SDVN11-Ghibli-Flux/blob/main/SDVN11-Ghibli-Flux_fp8-hyper.safetensors",
|
| 71 |
"https://huggingface.co/datasets/John6666/flux1-backup-202410/blob/main/iniverseMixXLSFWNSFW_f1dFP16V10.safetensors",
|
| 72 |
"https://huggingface.co/city96/FLUX.1-dev-gguf/blob/main/flux1-dev-Q2_K.gguf",
|
|
|
|
| 81 |
|
| 82 |
models_dedistill = []
|
| 83 |
|
| 84 |
+
models_krea = ["NikolaSigmoid/FLUX.1-Krea-dev"]
|
| 85 |
+
|
| 86 |
models_canny = ["fuliucansheng/FLUX.1-Canny-dev-diffusers", "sayakpaul/FLUX.1-Canny-dev-nf4"]
|
| 87 |
|
| 88 |
models_depth = ["fuliucansheng/FLUX.1-Depth-dev-diffusers", "sayakpaul/FLUX.1-Depth-dev-nf4"]
|
| 89 |
|
| 90 |
models_edit = ["sayakpaul/edit-control-lr_1e-4-wd_1e-4-gs_15.0-cd_0.1"]
|
| 91 |
|
| 92 |
+
models = models_dev + models_schnell + models_fill + models_krea
|
| 93 |
|
| 94 |
model_trigger = {
|
| 95 |
"Raelina/Raemu-Flux": "anime",
|
|
|
|
| 101 |
"dev": "camenduru/FLUX.1-dev-diffusers",
|
| 102 |
"schnell": "black-forest-labs/FLUX.1-schnell",
|
| 103 |
"fill": "fuliucansheng/FLUX.1-Fill-dev-diffusers",
|
| 104 |
+
"krea": "NikolaSigmoid/FLUX.1-Krea-dev",
|
| 105 |
}
|
| 106 |
|
| 107 |
# List all Models for specified user
|
requirements.txt
CHANGED
|
@@ -1,7 +1,7 @@
|
|
| 1 |
torch==2.4.0
|
| 2 |
#torchao>=0.9.0
|
| 3 |
#git+https://github.com/huggingface/diffusers.git
|
| 4 |
-
diffusers<=0.
|
| 5 |
transformers<=4.48.3
|
| 6 |
#git+https://github.com/huggingface/peft.git
|
| 7 |
peft
|
|
|
|
| 1 |
torch==2.4.0
|
| 2 |
#torchao>=0.9.0
|
| 3 |
#git+https://github.com/huggingface/diffusers.git
|
| 4 |
+
diffusers<=0.34.0
|
| 5 |
transformers<=4.48.3
|
| 6 |
#git+https://github.com/huggingface/peft.git
|
| 7 |
peft
|