Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -28,16 +28,19 @@ torch.backends.cuda.matmul.allow_tf32 = True
|
|
| 28 |
|
| 29 |
pipe = FluxPipeline.from_pretrained("AlekseyCalvin/HistoricColorSoonr_v2_FluxSchnell_Diffusers", ignore_mismatched_sizes=True, torch_dtype=torch.bfloat16)
|
| 30 |
pipe.to(device="cuda", dtype=torch.bfloat16)
|
| 31 |
-
clipmodel = '
|
| 32 |
-
selectedprompt = 'long' # 'tiny' (51 tokens), 'short' (75), 'med' (116), 'long' (203)
|
| 33 |
|
| 34 |
if clipmodel == "long":
|
| 35 |
model_id = "zer0int/LongCLIP-GmP-ViT-L-14"
|
| 36 |
config = CLIPConfig.from_pretrained(model_id)
|
| 37 |
-
maxtokens =
|
|
|
|
|
|
|
|
|
|
|
|
|
| 38 |
clip_model = CLIPModel.from_pretrained(model_id, torch_dtype=torch.bfloat16, config=config, ignore_mismatched_sizes=True).to("cuda")
|
| 39 |
clip_processor = CLIPProcessor.from_pretrained(model_id, padding="max_length", max_length=maxtokens, ignore_mismatched_sizes=True, return_tensors="pt", truncation=True)
|
| 40 |
-
config.text_config.max_position_embeddings =
|
| 41 |
|
| 42 |
pipe.tokenizer = clip_processor.tokenizer
|
| 43 |
pipe.text_encoder = clip_model.text_model
|
|
@@ -189,17 +192,17 @@ with gr.Blocks(theme=gr.themes.Soft(), css=css) as app:
|
|
| 189 |
with gr.Accordion("Advanced Settings", open=True):
|
| 190 |
with gr.Column():
|
| 191 |
with gr.Row():
|
| 192 |
-
cfg_scale = gr.Slider(label="CFG Scale", minimum=
|
| 193 |
steps = gr.Slider(label="Steps", minimum=1, maximum=50, step=1, value=6)
|
| 194 |
|
| 195 |
with gr.Row():
|
| 196 |
-
width = gr.Slider(label="Width", minimum=256, maximum=1536, step=64, value=
|
| 197 |
height = gr.Slider(label="Height", minimum=256, maximum=1536, step=64, value=1024)
|
| 198 |
|
| 199 |
with gr.Row():
|
| 200 |
randomize_seed = gr.Checkbox(True, label="Randomize seed")
|
| 201 |
seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0, randomize=True)
|
| 202 |
-
lora_scale = gr.Slider(label="LoRA Scale", minimum=0, maximum=2.0, step=0.01, value=0.
|
| 203 |
|
| 204 |
gallery.select(
|
| 205 |
update_selection,
|
|
|
|
| 28 |
|
| 29 |
pipe = FluxPipeline.from_pretrained("AlekseyCalvin/HistoricColorSoonr_v2_FluxSchnell_Diffusers", ignore_mismatched_sizes=True, torch_dtype=torch.bfloat16)
|
| 30 |
pipe.to(device="cuda", dtype=torch.bfloat16)
|
| 31 |
+
clipmodel = 'norm' # 'norm', 'long' (my fine-tunes) - 'oai', 'orgL' (OpenAI / BeichenZhang original)
|
|
|
|
| 32 |
|
| 33 |
if clipmodel == "long":
|
| 34 |
model_id = "zer0int/LongCLIP-GmP-ViT-L-14"
|
| 35 |
config = CLIPConfig.from_pretrained(model_id)
|
| 36 |
+
maxtokens = 77
|
| 37 |
+
if clipmodel == "norm":
|
| 38 |
+
model_id = "zer0int/CLIP-GmP-ViT-L-14"
|
| 39 |
+
config = CLIPConfig.from_pretrained(model_id)
|
| 40 |
+
maxtokens = 77
|
| 41 |
clip_model = CLIPModel.from_pretrained(model_id, torch_dtype=torch.bfloat16, config=config, ignore_mismatched_sizes=True).to("cuda")
|
| 42 |
clip_processor = CLIPProcessor.from_pretrained(model_id, padding="max_length", max_length=maxtokens, ignore_mismatched_sizes=True, return_tensors="pt", truncation=True)
|
| 43 |
+
config.text_config.max_position_embeddings = 77
|
| 44 |
|
| 45 |
pipe.tokenizer = clip_processor.tokenizer
|
| 46 |
pipe.text_encoder = clip_model.text_model
|
|
|
|
| 192 |
with gr.Accordion("Advanced Settings", open=True):
|
| 193 |
with gr.Column():
|
| 194 |
with gr.Row():
|
| 195 |
+
cfg_scale = gr.Slider(label="CFG Scale", minimum=0, maximum=20, step=0.5, value=0.5)
|
| 196 |
steps = gr.Slider(label="Steps", minimum=1, maximum=50, step=1, value=6)
|
| 197 |
|
| 198 |
with gr.Row():
|
| 199 |
+
width = gr.Slider(label="Width", minimum=256, maximum=1536, step=64, value=1024)
|
| 200 |
height = gr.Slider(label="Height", minimum=256, maximum=1536, step=64, value=1024)
|
| 201 |
|
| 202 |
with gr.Row():
|
| 203 |
randomize_seed = gr.Checkbox(True, label="Randomize seed")
|
| 204 |
seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0, randomize=True)
|
| 205 |
+
lora_scale = gr.Slider(label="LoRA Scale", minimum=0, maximum=2.0, step=0.01, value=0.9)
|
| 206 |
|
| 207 |
gallery.select(
|
| 208 |
update_selection,
|