Update app.py
Browse files
app.py
CHANGED
@@ -10,7 +10,7 @@ from huggingface_hub import InferenceClient
|
|
10 |
|
11 |
# Project by Nymbo
|
12 |
|
13 |
-
def query_with_auto_routing(prompt, model, custom_lora, is_negative=False, steps=35, cfg_scale=7, sampler="DPM++ 2M Karras", seed=-1, strength=0.7, width=1024, height=1024
|
14 |
"""
|
15 |
Generate images using HF's automatic provider routing
|
16 |
"""
|
@@ -18,7 +18,6 @@ def query_with_auto_routing(prompt, model, custom_lora, is_negative=False, steps
|
|
18 |
print(f"Prompt: {prompt}")
|
19 |
print(f"Model: {model}")
|
20 |
print(f"Custom LoRA: {custom_lora}")
|
21 |
-
print(f"Input Image: {input_image is not None}")
|
22 |
print(f"Parameters - Steps: {steps}, CFG Scale: {cfg_scale}, Seed: {seed}, Strength: {strength}, Width: {width}, Height: {height}")
|
23 |
|
24 |
# Check if the prompt is empty or None
|
@@ -40,6 +39,9 @@ def query_with_auto_routing(prompt, model, custom_lora, is_negative=False, steps
|
|
40 |
if not api_key:
|
41 |
raise gr.Error("HF_READ_TOKEN not found. Please set your Hugging Face API token.")
|
42 |
|
|
|
|
|
|
|
43 |
# Determine the model to use
|
44 |
if custom_lora.strip() != "":
|
45 |
model_id = custom_lora.strip()
|
@@ -51,34 +53,16 @@ def query_with_auto_routing(prompt, model, custom_lora, is_negative=False, steps
|
|
51 |
# Apply model-specific prompt enhancements
|
52 |
enhanced_prompt = apply_model_prompt_enhancements(model, enhanced_prompt)
|
53 |
|
54 |
-
# Generate image using
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
width=width,
|
65 |
-
height=height,
|
66 |
-
num_inference_steps=steps,
|
67 |
-
guidance_scale=cfg_scale,
|
68 |
-
# Note: strength parameter is handled differently in image_to_image
|
69 |
-
)
|
70 |
-
else:
|
71 |
-
# Text-to-image generation - use automatic provider selection
|
72 |
-
client = InferenceClient(api_key=api_key)
|
73 |
-
image = client.text_to_image(
|
74 |
-
prompt=enhanced_prompt,
|
75 |
-
model=model_id,
|
76 |
-
width=width,
|
77 |
-
height=height,
|
78 |
-
num_inference_steps=steps,
|
79 |
-
guidance_scale=cfg_scale,
|
80 |
-
seed=seed if seed != -1 else None,
|
81 |
-
)
|
82 |
|
83 |
print(f'Generation {key} completed with automatic routing!')
|
84 |
return image
|
@@ -347,11 +331,11 @@ def apply_model_prompt_enhancements(model_name, prompt):
|
|
347 |
|
348 |
return prompt
|
349 |
|
350 |
-
def query(prompt, model, custom_lora, is_negative=False, steps=35, cfg_scale=7, sampler="DPM++ 2M Karras", seed=-1, strength=0.7, width=1024, height=1024
|
351 |
"""
|
352 |
Main query function - now uses automatic provider routing
|
353 |
"""
|
354 |
-
return query_with_auto_routing(prompt, model, custom_lora, is_negative, steps, cfg_scale, sampler, seed, strength, width, height
|
355 |
|
356 |
# Custom CSS to hide the footer in the interface
|
357 |
css = """
|
@@ -370,24 +354,12 @@ with gr.Blocks(theme='Nymbo/Alyx_Theme') as dalle:
|
|
370 |
with gr.Row():
|
371 |
# Textbox for user to input the prompt
|
372 |
text_prompt = gr.Textbox(label="Prompt", placeholder="Enter a prompt here", lines=3, elem_id="prompt-text-input")
|
373 |
-
with gr.Row():
|
374 |
-
# Checkbox for enabling image editing
|
375 |
-
enable_image_editing = gr.Checkbox(label="Enable Image Editing", value=False, elem_id="enable-image-editing")
|
376 |
with gr.Row():
|
377 |
# Textbox for custom LoRA input
|
378 |
custom_lora = gr.Textbox(label="Custom LoRA", info="LoRA Hugging Face path (optional)", placeholder="multimodalart/vintage-ads-flux")
|
379 |
-
with gr.Row():
|
380 |
-
# Image upload component (hidden by default)
|
381 |
-
image_upload = gr.Image(
|
382 |
-
label="Upload Image for Editing",
|
383 |
-
type="pil",
|
384 |
-
visible=False,
|
385 |
-
elem_id="image-upload"
|
386 |
-
)
|
387 |
with gr.Row():
|
388 |
# Accordion for selecting the model
|
389 |
-
|
390 |
-
with featured_models_accordion:
|
391 |
# Textbox for searching models
|
392 |
model_search = gr.Textbox(label="Filter Models", placeholder="Search for a featured model...", lines=1, elem_id="model-search-input")
|
393 |
models_list = (
|
@@ -509,28 +481,6 @@ with gr.Blocks(theme='Nymbo/Alyx_Theme') as dalle:
|
|
509 |
# Update model list when search box is used
|
510 |
model_search.change(filter_models, inputs=model_search, outputs=model)
|
511 |
|
512 |
-
# Function to toggle image editing mode
|
513 |
-
def toggle_image_editing(enable_editing):
|
514 |
-
if enable_editing:
|
515 |
-
return (
|
516 |
-
gr.update(visible=True), # Show image upload
|
517 |
-
gr.update(visible=False), # Hide featured models accordion
|
518 |
-
gr.update(value="black-forest-labs/FLUX.1-Kontext-dev") # Set custom LoRA
|
519 |
-
)
|
520 |
-
else:
|
521 |
-
return (
|
522 |
-
gr.update(visible=False), # Hide image upload
|
523 |
-
gr.update(visible=True), # Show featured models accordion
|
524 |
-
gr.update(value="") # Clear custom LoRA
|
525 |
-
)
|
526 |
-
|
527 |
-
# Set up the toggle functionality
|
528 |
-
enable_image_editing.change(
|
529 |
-
toggle_image_editing,
|
530 |
-
inputs=[enable_image_editing],
|
531 |
-
outputs=[image_upload, featured_models_accordion, custom_lora]
|
532 |
-
)
|
533 |
-
|
534 |
# Tab for advanced settings
|
535 |
with gr.Tab("Advanced Settings"):
|
536 |
with gr.Row():
|
@@ -618,9 +568,6 @@ with gr.Blocks(theme='Nymbo/Alyx_Theme') as dalle:
|
|
618 |
with gr.Accordion("Advanced Settings Overview", open=False):
|
619 |
gr.Markdown(
|
620 |
"""
|
621 |
-
## Enable Image Editing
|
622 |
-
###### When enabled, this feature allows you to upload an image and use the FLUX.1-Kontext-dev model to edit it. This model specializes in image-to-image transformations, allowing you to modify existing images based on your text prompts. The system automatically uses the Replicate provider for image-to-image generation as it supports this functionality.
|
623 |
-
|
624 |
## Negative Prompt
|
625 |
###### This box is for telling the AI what you don't want in your images. Think of it as a way to avoid certain elements. For instance, if you don't want blurry images or extra limbs showing up, this is where you'd mention it.
|
626 |
|
@@ -654,7 +601,7 @@ with gr.Blocks(theme='Nymbo/Alyx_Theme') as dalle:
|
|
654 |
image_output = gr.Image(type="pil", label="Image Output", elem_id="gallery")
|
655 |
|
656 |
# Set up button click event to call the main query function
|
657 |
-
text_button.click(query, inputs=[text_prompt, model, custom_lora, negative_prompt, steps, cfg, method, seed, strength, width, height
|
658 |
|
659 |
print("Launching Gradio interface...") # Debug log
|
660 |
# Launch the Gradio interface without showing the API or sharing externally
|
|
|
10 |
|
11 |
# Project by Nymbo
|
12 |
|
13 |
+
def query_with_auto_routing(prompt, model, custom_lora, is_negative=False, steps=35, cfg_scale=7, sampler="DPM++ 2M Karras", seed=-1, strength=0.7, width=1024, height=1024):
|
14 |
"""
|
15 |
Generate images using HF's automatic provider routing
|
16 |
"""
|
|
|
18 |
print(f"Prompt: {prompt}")
|
19 |
print(f"Model: {model}")
|
20 |
print(f"Custom LoRA: {custom_lora}")
|
|
|
21 |
print(f"Parameters - Steps: {steps}, CFG Scale: {cfg_scale}, Seed: {seed}, Strength: {strength}, Width: {width}, Height: {height}")
|
22 |
|
23 |
# Check if the prompt is empty or None
|
|
|
39 |
if not api_key:
|
40 |
raise gr.Error("HF_READ_TOKEN not found. Please set your Hugging Face API token.")
|
41 |
|
42 |
+
# Initialize client with automatic provider selection (default is "auto")
|
43 |
+
client = InferenceClient(api_key=api_key)
|
44 |
+
|
45 |
# Determine the model to use
|
46 |
if custom_lora.strip() != "":
|
47 |
model_id = custom_lora.strip()
|
|
|
53 |
# Apply model-specific prompt enhancements
|
54 |
enhanced_prompt = apply_model_prompt_enhancements(model, enhanced_prompt)
|
55 |
|
56 |
+
# Generate image using automatic provider routing
|
57 |
+
image = client.text_to_image(
|
58 |
+
prompt=enhanced_prompt,
|
59 |
+
model=model_id,
|
60 |
+
width=width,
|
61 |
+
height=height,
|
62 |
+
num_inference_steps=steps,
|
63 |
+
guidance_scale=cfg_scale,
|
64 |
+
seed=seed if seed != -1 else None,
|
65 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
66 |
|
67 |
print(f'Generation {key} completed with automatic routing!')
|
68 |
return image
|
|
|
331 |
|
332 |
return prompt
|
333 |
|
334 |
+
def query(prompt, model, custom_lora, is_negative=False, steps=35, cfg_scale=7, sampler="DPM++ 2M Karras", seed=-1, strength=0.7, width=1024, height=1024):
|
335 |
"""
|
336 |
Main query function - now uses automatic provider routing
|
337 |
"""
|
338 |
+
return query_with_auto_routing(prompt, model, custom_lora, is_negative, steps, cfg_scale, sampler, seed, strength, width, height)
|
339 |
|
340 |
# Custom CSS to hide the footer in the interface
|
341 |
css = """
|
|
|
354 |
with gr.Row():
|
355 |
# Textbox for user to input the prompt
|
356 |
text_prompt = gr.Textbox(label="Prompt", placeholder="Enter a prompt here", lines=3, elem_id="prompt-text-input")
|
|
|
|
|
|
|
357 |
with gr.Row():
|
358 |
# Textbox for custom LoRA input
|
359 |
custom_lora = gr.Textbox(label="Custom LoRA", info="LoRA Hugging Face path (optional)", placeholder="multimodalart/vintage-ads-flux")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
360 |
with gr.Row():
|
361 |
# Accordion for selecting the model
|
362 |
+
with gr.Accordion("Featured Models", open=False):
|
|
|
363 |
# Textbox for searching models
|
364 |
model_search = gr.Textbox(label="Filter Models", placeholder="Search for a featured model...", lines=1, elem_id="model-search-input")
|
365 |
models_list = (
|
|
|
481 |
# Update model list when search box is used
|
482 |
model_search.change(filter_models, inputs=model_search, outputs=model)
|
483 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
484 |
# Tab for advanced settings
|
485 |
with gr.Tab("Advanced Settings"):
|
486 |
with gr.Row():
|
|
|
568 |
with gr.Accordion("Advanced Settings Overview", open=False):
|
569 |
gr.Markdown(
|
570 |
"""
|
|
|
|
|
|
|
571 |
## Negative Prompt
|
572 |
###### This box is for telling the AI what you don't want in your images. Think of it as a way to avoid certain elements. For instance, if you don't want blurry images or extra limbs showing up, this is where you'd mention it.
|
573 |
|
|
|
601 |
image_output = gr.Image(type="pil", label="Image Output", elem_id="gallery")
|
602 |
|
603 |
# Set up button click event to call the main query function
|
604 |
+
text_button.click(query, inputs=[text_prompt, model, custom_lora, negative_prompt, steps, cfg, method, seed, strength, width, height], outputs=image_output)
|
605 |
|
606 |
print("Launching Gradio interface...") # Debug log
|
607 |
# Launch the Gradio interface without showing the API or sharing externally
|