Spaces:
Sleeping
Sleeping
Upload 2 files
Browse files- app.py +84 -0
- requirements.txt +7 -0
app.py
ADDED
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
import gradio as gr
|
3 |
+
import torch
|
4 |
+
from diffusers import AutoPipelineForText2Image, DDIMScheduler
|
5 |
+
from transformers import CLIPVisionModelWithProjection
|
6 |
+
from diffusers.utils import load_image
|
7 |
+
import os
|
8 |
+
from PIL import Image
|
9 |
+
|
10 |
+
STYLE_MAP = {
|
11 |
+
"pixar": [
|
12 |
+
"https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/style_ziggy/img0.png",
|
13 |
+
"https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/style_ziggy/img1.png",
|
14 |
+
"https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/style_ziggy/img2.png",
|
15 |
+
"https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/style_ziggy/img3.png",
|
16 |
+
"https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/style_ziggy/img4.png"
|
17 |
+
]
|
18 |
+
}
|
19 |
+
|
20 |
+
torch_dtype = torch.float16
|
21 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
22 |
+
|
23 |
+
image_encoder = CLIPVisionModelWithProjection.from_pretrained(
|
24 |
+
"h94/IP-Adapter",
|
25 |
+
subfolder="models/image_encoder",
|
26 |
+
torch_dtype=torch_dtype,
|
27 |
+
)
|
28 |
+
|
29 |
+
pipeline = AutoPipelineForText2Image.from_pretrained(
|
30 |
+
"stabilityai/stable-diffusion-xl-base-1.0",
|
31 |
+
torch_dtype=torch_dtype,
|
32 |
+
image_encoder=image_encoder,
|
33 |
+
variant="fp16"
|
34 |
+
).to(device)
|
35 |
+
|
36 |
+
pipeline.scheduler = DDIMScheduler.from_config(pipeline.scheduler.config)
|
37 |
+
pipeline.load_ip_adapter(
|
38 |
+
"h94/IP-Adapter",
|
39 |
+
subfolder="sdxl_models",
|
40 |
+
weight_name=[
|
41 |
+
"ip-adapter-plus_sdxl_vit-h.safetensors",
|
42 |
+
"ip-adapter-plus-face_sdxl_vit-h.safetensors"
|
43 |
+
]
|
44 |
+
)
|
45 |
+
pipeline.set_ip_adapter_scale([0.7, 0.3])
|
46 |
+
pipeline.enable_model_cpu_offload()
|
47 |
+
|
48 |
+
os.makedirs("outputs", exist_ok=True)
|
49 |
+
|
50 |
+
def generate_storybook(data):
|
51 |
+
character_image_url = data["character_image_url"]
|
52 |
+
style = data["style"]
|
53 |
+
scenes = data["scenes"]
|
54 |
+
|
55 |
+
face_image = load_image(character_image_url)
|
56 |
+
style_images = [load_image(url) for url in STYLE_MAP[style]]
|
57 |
+
|
58 |
+
result_paths = []
|
59 |
+
for i, prompt in enumerate(scenes):
|
60 |
+
image = pipeline(
|
61 |
+
prompt=prompt,
|
62 |
+
ip_adapter_image=[style_images, face_image],
|
63 |
+
negative_prompt="blurry, bad anatomy",
|
64 |
+
width=768,
|
65 |
+
height=1024,
|
66 |
+
guidance_scale=7.5,
|
67 |
+
num_inference_steps=30,
|
68 |
+
generator=torch.Generator(device).manual_seed(i + 42)
|
69 |
+
).images[0]
|
70 |
+
|
71 |
+
path = f"outputs/scene_{i+1}.png"
|
72 |
+
image.save(path)
|
73 |
+
result_paths.append(path)
|
74 |
+
|
75 |
+
return result_paths
|
76 |
+
|
77 |
+
iface = gr.Interface(
|
78 |
+
fn=generate_storybook,
|
79 |
+
inputs=gr.JSON(),
|
80 |
+
outputs=gr.JSON(),
|
81 |
+
title="AI Storybook Generator"
|
82 |
+
)
|
83 |
+
|
84 |
+
iface.launch()
|
requirements.txt
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
torch
|
2 |
+
diffusers
|
3 |
+
transformers
|
4 |
+
accelerate
|
5 |
+
safetensors
|
6 |
+
opencv-python
|
7 |
+
gradio
|