Spestly commited on
Commit
e592ca3
Β·
verified Β·
1 Parent(s): 8c48e8f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +41 -35
app.py CHANGED
@@ -3,30 +3,31 @@ from diffusers import DiffusionPipeline
3
  import torch
4
  import spaces
5
 
6
- # Load the model
7
- # The model will be downloaded and cached the first time the app runs.
8
- pipe = DiffusionPipeline.from_pretrained(
9
- "Spestly/OdysseyXL-V2.5",
10
- torch_dtype=torch.float16,
11
- use_safetensors=True
12
- )
13
- # Move the pipeline to the GPU
14
- pipe.to("cuda")
15
 
16
- @spaces.GPU
17
- def generate_image(prompt, negative_prompt, guidance_scale, num_inference_steps):
18
- """
19
- Generates an image from a text prompt using the OdysseyXL V2.5 model.
20
 
21
- Args:
22
- prompt (str): The text prompt to generate the image from.
23
- negative_prompt (str): The negative text prompt.
24
- guidance_scale (float): The guidance scale for the generation.
25
- num_inference_steps (int): The number of inference steps.
 
 
 
 
 
 
26
 
27
- Returns:
28
- PIL.Image.Image: The generated image.
29
- """
30
  image = pipe(
31
  prompt=prompt,
32
  negative_prompt=negative_prompt,
@@ -35,14 +36,18 @@ def generate_image(prompt, negative_prompt, guidance_scale, num_inference_steps)
35
  ).images[0]
36
  return image
37
 
38
- # --- Gradio Interface ---
39
-
40
  with gr.Blocks(css="style.css") as demo:
41
- gr.Markdown("# 🎨 OdysseyXL V2.5 Image Generation")
42
- gr.Markdown("A Gradio UI for the [Spestly/OdysseyXL V2.5](https://huggingface.co/Spestly/OdysseyXL%20V2.5) SDXL model, optimized for ZeroGPU.")
43
-
44
  with gr.Row():
45
  with gr.Column(scale=2):
 
 
 
 
 
46
  prompt = gr.Textbox(
47
  label="Prompt",
48
  show_label=False,
@@ -70,27 +75,28 @@ with gr.Blocks(css="style.css") as demo:
70
  value=30
71
  )
72
  run_button = gr.Button("Generate Image", variant="primary")
 
73
  with gr.Column(scale=1):
74
  image_output = gr.Image(label="Generated Image", show_label=False)
75
 
76
  gr.Examples(
77
  examples=[
78
- ["A futuristic cityscape, vibrant neon colors, ultra-realistic, 8K", "blurry, low quality", 7.5, 30],
79
- ["A majestic lion with a crown of stars, cosmic background, fantasy art", "cartoon, sketch", 8.0, 40],
80
- ["An enchanted forest at night, glowing mushrooms, fireflies, mystical atmosphere", "daytime, bright", 7.0, 35],
81
- ["A delicious-looking gourmet burger on a wooden table, hyperrealistic food photography", "messy, unappetizing", 7.5, 25]
82
  ],
83
- inputs=[prompt, negative_prompt, guidance_scale, num_inference_steps],
84
  outputs=image_output,
85
  fn=generate_image,
86
- cache_examples=True,
87
  )
88
 
89
  run_button.click(
90
- fn=generate_image,
91
- inputs=[prompt, negative_prompt, guidance_scale, num_inference_steps],
92
  outputs=image_output
93
  )
94
 
95
  if __name__ == "__main__":
96
- demo.launch()
 
3
  import torch
4
  import spaces
5
 
6
+ model_options = {
7
+ "OdysseyXL V2.5": "Spestly/OdysseyXL-V2.5",
8
+ "OdysseyXL V2": "Spestly/OdysseyXL-V2",
9
+ "OdysseyXL V1": "Spestly/OdysseyXL-V1",
10
+ "OdysseyXL Origin": "Spestly/OdysseyXL-Origin"
11
+ }
 
 
 
12
 
13
+ # Cache for loaded pipelines
14
+ loaded_pipelines = {}
 
 
15
 
16
+ def load_model(model_name):
17
+ model_id = model_options[model_name]
18
+ if model_name not in loaded_pipelines:
19
+ pipe = DiffusionPipeline.from_pretrained(
20
+ model_id,
21
+ torch_dtype=torch.float16,
22
+ use_safetensors=True
23
+ )
24
+ pipe.to("cuda")
25
+ loaded_pipelines[model_name] = pipe
26
+ return loaded_pipelines[model_name]
27
 
28
+ @spaces.GPU
29
+ def generate_image(model_name, prompt, negative_prompt, guidance_scale, num_inference_steps):
30
+ pipe = load_model(model_name)
31
  image = pipe(
32
  prompt=prompt,
33
  negative_prompt=negative_prompt,
 
36
  ).images[0]
37
  return image
38
 
39
+ # --- Gradio UI ---
 
40
  with gr.Blocks(css="style.css") as demo:
41
+ gr.Markdown("# 🎨 OdysseyXL Image Generation")
42
+ gr.Markdown("Choose from multiple OdysseyXL model versions running on ZeroGPU (H200).")
43
+
44
  with gr.Row():
45
  with gr.Column(scale=2):
46
+ model_name = gr.Dropdown(
47
+ label="Model Version",
48
+ choices=list(model_options.keys()),
49
+ value="OdysseyXL V2.5"
50
+ )
51
  prompt = gr.Textbox(
52
  label="Prompt",
53
  show_label=False,
 
75
  value=30
76
  )
77
  run_button = gr.Button("Generate Image", variant="primary")
78
+
79
  with gr.Column(scale=1):
80
  image_output = gr.Image(label="Generated Image", show_label=False)
81
 
82
  gr.Examples(
83
  examples=[
84
+ ["OdysseyXL V2.5", "A futuristic cityscape, vibrant neon colors, ultra-realistic, 8K", "blurry, low quality", 7.5, 30],
85
+ ["OdysseyXL V2.1", "A majestic lion with a crown of stars, cosmic background, fantasy art", "cartoon, sketch", 8.0, 40],
86
+ ["OdysseyXL V1.9", "An enchanted forest at night, glowing mushrooms, fireflies, mystical atmosphere", "daytime, bright", 7.0, 35],
87
+ ["OdysseyXL V1.5", "A delicious-looking gourmet burger on a wooden table, hyperrealistic food photography", "messy, unappetizing", 7.5, 25]
88
  ],
89
+ inputs=[model_name, prompt, negative_prompt, guidance_scale, num_inference_steps],
90
  outputs=image_output,
91
  fn=generate_image,
92
+ cache_examples=True
93
  )
94
 
95
  run_button.click(
96
+ fn=generate_image,
97
+ inputs=[model_name, prompt, negative_prompt, guidance_scale, num_inference_steps],
98
  outputs=image_output
99
  )
100
 
101
  if __name__ == "__main__":
102
+ demo.launch()