Spaces:
Running
on
A10G
Running
on
A10G
Code Improvement.
Browse files- app.py +1 -3
- webui/__init__.py +1 -1
- webui/images/1.jpg +0 -0
- webui/images/14.jpg +0 -0
- webui/images/16.jpg +0 -0
- webui/images/17.jpg +0 -0
- webui/images/18.jpg +0 -0
- webui/images/3.jpg +0 -0
- webui/images/31.jpg +0 -0
- webui/images/35.jpg +0 -0
- webui/images/4.jpg +0 -0
- webui/images/45.jpg +0 -0
- webui/images/48.jpg +0 -0
- webui/images/5.png +0 -0
- webui/images/7.png +0 -0
- webui/images/8.jpg +0 -0
- webui/images/9.jpg +0 -0
- webui/images/deer1.jpg +0 -0
- webui/images/sketch.png +0 -0
- webui/runner.py +3 -1
- webui/tab_style_transfer.py +18 -6
app.py
CHANGED
@@ -3,10 +3,8 @@ from webui import (
|
|
3 |
create_interface_texture_synthesis,
|
4 |
create_interface_style_t2i,
|
5 |
create_interface_style_transfer,
|
6 |
-
Runner
|
7 |
)
|
8 |
-
|
9 |
-
|
10 |
|
11 |
|
12 |
def main():
|
|
|
3 |
create_interface_texture_synthesis,
|
4 |
create_interface_style_t2i,
|
5 |
create_interface_style_transfer,
|
|
|
6 |
)
|
7 |
+
from webui.runner import Runner
|
|
|
8 |
|
9 |
|
10 |
def main():
|
webui/__init__.py
CHANGED
@@ -2,4 +2,4 @@
|
|
2 |
from .tab_style_t2i import create_interface_style_t2i
|
3 |
from .tab_style_transfer import create_interface_style_transfer
|
4 |
from .tab_texture_synthesis import create_interface_texture_synthesis
|
5 |
-
from .runner import Runner
|
|
|
2 |
from .tab_style_t2i import create_interface_style_t2i
|
3 |
from .tab_style_transfer import create_interface_style_transfer
|
4 |
from .tab_texture_synthesis import create_interface_texture_synthesis
|
5 |
+
# from .runner import Runner
|
webui/images/1.jpg
ADDED
![]() |
webui/images/14.jpg
ADDED
![]() |
webui/images/16.jpg
ADDED
![]() |
webui/images/17.jpg
ADDED
![]() |
webui/images/18.jpg
ADDED
![]() |
webui/images/3.jpg
ADDED
![]() |
webui/images/31.jpg
ADDED
![]() |
webui/images/35.jpg
ADDED
![]() |
webui/images/4.jpg
ADDED
![]() |
webui/images/45.jpg
ADDED
![]() |
webui/images/48.jpg
ADDED
![]() |
webui/images/5.png
ADDED
![]() |
webui/images/7.png
ADDED
![]() |
webui/images/8.jpg
ADDED
![]() |
webui/images/9.jpg
ADDED
![]() |
webui/images/deer1.jpg
ADDED
![]() |
webui/images/sketch.png
ADDED
![]() |
webui/runner.py
CHANGED
@@ -2,7 +2,7 @@ import torch
|
|
2 |
from PIL import Image
|
3 |
from diffusers import DDIMScheduler
|
4 |
from accelerate.utils import set_seed
|
5 |
-
from torchvision.transforms.functional import to_pil_image, to_tensor
|
6 |
|
7 |
from pipeline_sd import ADPipeline
|
8 |
from pipeline_sdxl import ADPipeline as ADXLPipeline
|
@@ -30,6 +30,8 @@ class Runner:
|
|
30 |
self.sd15.classifier = self.sd15.unet
|
31 |
|
32 |
def preprocecss(self, image: Image.Image, height=None, width=None):
|
|
|
|
|
33 |
if width is None or height is None:
|
34 |
width, height = image.size
|
35 |
new_width = (width // 64) * 64
|
|
|
2 |
from PIL import Image
|
3 |
from diffusers import DDIMScheduler
|
4 |
from accelerate.utils import set_seed
|
5 |
+
from torchvision.transforms.functional import to_pil_image, to_tensor, resize
|
6 |
|
7 |
from pipeline_sd import ADPipeline
|
8 |
from pipeline_sdxl import ADPipeline as ADXLPipeline
|
|
|
30 |
self.sd15.classifier = self.sd15.unet
|
31 |
|
32 |
def preprocecss(self, image: Image.Image, height=None, width=None):
|
33 |
+
image = resize(image, size=512)
|
34 |
+
|
35 |
if width is None or height is None:
|
36 |
width, height = image.size
|
37 |
new_width = (width // 64) * 64
|
webui/tab_style_transfer.py
CHANGED
@@ -13,10 +13,8 @@ def create_interface_style_transfer(runner):
|
|
13 |
with gr.Row():
|
14 |
with gr.Column():
|
15 |
with gr.Row():
|
16 |
-
content_image = gr.Image(label='Input Content Image', type='pil', interactive=True,
|
17 |
-
|
18 |
-
style_image = gr.Image(label='Input Style Image', type='pil', interactive=True,
|
19 |
-
value=Image.open('examples/s1.jpg').convert('RGB') if os.path.exists('examples/s1.jpg') else None)
|
20 |
|
21 |
run_button = gr.Button(value='Run')
|
22 |
|
@@ -33,9 +31,23 @@ def create_interface_style_transfer(runner):
|
|
33 |
with gr.Column():
|
34 |
gr.Markdown('#### Output Image:\n')
|
35 |
result_gallery = gr.Gallery(label='Output', elem_id='gallery', columns=2, height='auto', preview=True)
|
36 |
-
|
|
|
|
|
|
|
|
|
37 |
gr.Examples(
|
38 |
-
[
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
39 |
[content_image, style_image, num_steps, content_weight]
|
40 |
)
|
41 |
|
|
|
13 |
with gr.Row():
|
14 |
with gr.Column():
|
15 |
with gr.Row():
|
16 |
+
content_image = gr.Image(label='Input Content Image', type='pil', interactive=True, value=None)
|
17 |
+
style_image = gr.Image(label='Input Style Image', type='pil', interactive=True, value=None)
|
|
|
|
|
18 |
|
19 |
run_button = gr.Button(value='Run')
|
20 |
|
|
|
31 |
with gr.Column():
|
32 |
gr.Markdown('#### Output Image:\n')
|
33 |
result_gallery = gr.Gallery(label='Output', elem_id='gallery', columns=2, height='auto', preview=True)
|
34 |
+
gr.Markdown(
|
35 |
+
'Notes:\n'
|
36 |
+
'* If you find the style effect insufficient, you can try increasing the `Number of Steps` or decreasing the `Content Weight`\n'
|
37 |
+
'* For face stylization, we generally recommend using a `Content Weight` of `0.26` for most faces.'
|
38 |
+
)
|
39 |
gr.Examples(
|
40 |
+
[
|
41 |
+
[Image.open('./webui/images/deer1.jpg').convert('RGB'), Image.open('./webui/images/35.jpg').convert('RGB'), 200, 0.25],
|
42 |
+
[Image.open('./webui/images/4.jpg').convert('RGB'), Image.open('./webui/images/17.jpg').convert('RGB'), 200, 0.25],
|
43 |
+
[Image.open('./webui/images/18.jpg').convert('RGB'), Image.open('./webui/images/3.jpg').convert('RGB'), 200, 0.25],
|
44 |
+
[Image.open('./webui/images/8.jpg').convert('RGB'), Image.open('./webui/images/sketch.png').convert('RGB'), 300, 0.2],
|
45 |
+
[Image.open('./webui/images/5.png').convert('RGB'), Image.open('./webui/images/45.jpg').convert('RGB'), 200, 0.25],
|
46 |
+
[Image.open('./webui/images/9.jpg').convert('RGB'), Image.open('./webui/images/16.jpg').convert('RGB'), 200, 0.25],
|
47 |
+
[Image.open('./webui/images/7.png').convert('RGB'), Image.open('./webui/images/31.jpg').convert('RGB'), 200, 0.25],
|
48 |
+
[Image.open('./webui/images/14.jpg').convert('RGB'), Image.open('./webui/images/48.jpg').convert('RGB'), 200, 0.25],
|
49 |
+
[Image.open('./webui/images/lecun.png').convert('RGB'), Image.open('./webui/images/40.jpg').convert('RGB'), 300, 0.23],
|
50 |
+
],
|
51 |
[content_image, style_image, num_steps, content_weight]
|
52 |
)
|
53 |
|