Spaces:
Runtime error
Runtime error
attemp to adapt to newer version of gradio
Browse files- .gitignore +2 -0
- README.md +2 -2
- app.py +185 -185
- cusomized_gradio_blocks.py +1 -3
- lib/cfg_helper.py +0 -1
- lib/model_zoo/common/utils.py +0 -2
- requirements.txt +11 -11
.gitignore
CHANGED
@@ -9,3 +9,5 @@ pretrained/
|
|
9 |
pretrained
|
10 |
gradio_cached_examples/
|
11 |
gradio_cached_examples
|
|
|
|
|
|
9 |
pretrained
|
10 |
gradio_cached_examples/
|
11 |
gradio_cached_examples
|
12 |
+
.gradio/
|
13 |
+
.gradio
|
README.md
CHANGED
@@ -4,11 +4,11 @@ emoji:
|
|
4 |
colorFrom: blue
|
5 |
colorTo: purple
|
6 |
sdk: gradio
|
7 |
-
sdk_version:
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
license: mit
|
11 |
-
python_version: 3.
|
12 |
---
|
13 |
|
14 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
4 |
colorFrom: blue
|
5 |
colorTo: purple
|
6 |
sdk: gradio
|
7 |
+
sdk_version: 5.15.0
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
license: mit
|
11 |
+
python_version: 3.10.16
|
12 |
---
|
13 |
|
14 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
CHANGED
@@ -593,7 +593,7 @@ def t2i_interface(with_example=False):
|
|
593 |
seed = gr.Number(20, label="Seed", precision=0)
|
594 |
button = gr.Button("Run")
|
595 |
with gr.Column():
|
596 |
-
img_output = gr.Gallery(label="Image Result", elem_id='customized_imbox')
|
597 |
|
598 |
button.click(
|
599 |
vd_inference.inference_t2i,
|
@@ -625,7 +625,7 @@ def i2i_interface(with_example=False):
|
|
625 |
seed = gr.Number(20, label="Seed", precision=0)
|
626 |
button = gr.Button("Run")
|
627 |
with gr.Column():
|
628 |
-
img_output = gr.Gallery(label="Image Result", elem_id='customized_imbox')
|
629 |
|
630 |
sim_flag.change(
|
631 |
fn=lambda x: {
|
@@ -757,183 +757,183 @@ class example_visible_only_hack(object):
|
|
757 |
if bi.value != vi:
|
758 |
fi.which_update = 'visible_oneoff'
|
759 |
|
760 |
-
def dcg_interface(with_example=False):
|
761 |
-
|
762 |
-
|
763 |
-
|
764 |
-
|
765 |
-
|
766 |
-
|
767 |
-
|
768 |
-
|
769 |
-
|
770 |
-
|
771 |
-
|
772 |
-
|
773 |
-
|
774 |
-
|
775 |
-
|
776 |
-
|
777 |
-
|
778 |
-
|
779 |
-
|
780 |
-
|
781 |
-
|
782 |
-
|
783 |
-
|
784 |
-
|
785 |
-
|
786 |
-
|
787 |
-
|
788 |
-
|
789 |
-
|
790 |
-
|
791 |
-
|
792 |
-
|
793 |
-
|
794 |
-
|
795 |
-
def tcg_interface(with_example=False):
|
796 |
-
|
797 |
-
|
798 |
-
|
799 |
-
|
800 |
-
|
801 |
-
|
802 |
-
|
803 |
-
|
804 |
-
|
805 |
-
|
806 |
-
|
807 |
-
|
808 |
-
|
809 |
-
|
810 |
-
|
811 |
-
|
812 |
-
|
813 |
-
|
814 |
-
|
815 |
-
|
816 |
-
|
817 |
-
|
818 |
-
|
819 |
-
|
820 |
-
|
821 |
-
|
822 |
-
|
823 |
-
|
824 |
-
|
825 |
-
|
826 |
-
|
827 |
-
|
828 |
-
|
829 |
-
|
830 |
-
|
831 |
-
|
832 |
-
|
833 |
-
|
834 |
-
|
835 |
-
|
836 |
-
|
837 |
-
|
838 |
-
|
839 |
-
|
840 |
-
|
841 |
-
|
842 |
-
|
843 |
-
|
844 |
-
|
845 |
-
|
846 |
-
|
847 |
-
|
848 |
-
|
849 |
-
|
850 |
-
|
851 |
-
|
852 |
-
|
853 |
-
|
854 |
-
|
855 |
-
|
856 |
-
|
857 |
-
|
858 |
-
|
859 |
-
|
860 |
-
|
861 |
-
|
862 |
-
|
863 |
-
|
864 |
-
|
865 |
-
|
866 |
-
|
867 |
-
|
868 |
-
|
869 |
-
|
870 |
-
|
871 |
-
|
872 |
-
|
873 |
-
|
874 |
-
def mcg_interface(with_example=False):
|
875 |
-
|
876 |
-
|
877 |
-
|
878 |
-
|
879 |
-
|
880 |
-
|
881 |
-
|
882 |
-
|
883 |
-
|
884 |
-
|
885 |
-
|
886 |
-
|
887 |
-
|
888 |
-
|
889 |
-
|
890 |
-
|
891 |
-
|
892 |
-
|
893 |
-
|
894 |
-
|
895 |
-
|
896 |
-
|
897 |
-
|
898 |
-
|
899 |
-
|
900 |
-
|
901 |
-
|
902 |
-
|
903 |
-
|
904 |
-
|
905 |
-
|
906 |
-
|
907 |
-
|
908 |
-
|
909 |
-
|
910 |
-
|
911 |
-
|
912 |
-
|
913 |
-
|
914 |
-
|
915 |
-
|
916 |
-
|
917 |
-
|
918 |
-
|
919 |
-
|
920 |
-
|
921 |
-
|
922 |
-
|
923 |
-
|
924 |
-
|
925 |
-
|
926 |
-
|
927 |
-
|
928 |
-
|
929 |
-
|
930 |
-
|
931 |
-
|
932 |
-
|
933 |
-
|
934 |
-
|
935 |
-
|
936 |
-
|
937 |
|
938 |
###########
|
939 |
# Example #
|
@@ -1080,12 +1080,12 @@ if True:
|
|
1080 |
i2t_interface(with_example=True)
|
1081 |
with gr.Tab('Text-Variation'):
|
1082 |
t2t_interface(with_example=True)
|
1083 |
-
with gr.Tab('Dual-Context Image-Generation'):
|
1084 |
-
|
1085 |
-
with gr.Tab('Triple-Context Image-Blender'):
|
1086 |
-
|
1087 |
-
with gr.Tab('Multi-Context Image-Blender'):
|
1088 |
-
|
1089 |
|
1090 |
gr.HTML(
|
1091 |
"""
|
|
|
593 |
seed = gr.Number(20, label="Seed", precision=0)
|
594 |
button = gr.Button("Run")
|
595 |
with gr.Column():
|
596 |
+
img_output = gr.Gallery(label="Image Result", elem_id='customized_imbox')
|
597 |
|
598 |
button.click(
|
599 |
vd_inference.inference_t2i,
|
|
|
625 |
seed = gr.Number(20, label="Seed", precision=0)
|
626 |
button = gr.Button("Run")
|
627 |
with gr.Column():
|
628 |
+
img_output = gr.Gallery(label="Image Result", elem_id='customized_imbox')
|
629 |
|
630 |
sim_flag.change(
|
631 |
fn=lambda x: {
|
|
|
757 |
if bi.value != vi:
|
758 |
fi.which_update = 'visible_oneoff'
|
759 |
|
760 |
+
# def dcg_interface(with_example=False):
|
761 |
+
# gr.HTML('<p id=myinst>  Description: ' + get_instruction("Dual-Context") + '</p>')
|
762 |
+
# with gr.Row():
|
763 |
+
# input_session = []
|
764 |
+
# with gr.Column():
|
765 |
+
# img = gr.Image(label='Image Input', type='pil', elem_id='customized_imbox')
|
766 |
+
# fcs = gr.Slider(label="Focus (Semantic -- Style)", minimum=0, maximum=1, value=0.5, step=0.02)
|
767 |
+
# gr.HTML('<p id=myinst>  Focus: Focus on what aspect of the image? (0-semantic, 0.5-balanced (default), 1-style).</p>')
|
768 |
+
|
769 |
+
# text = gr.Textbox(lines=2, placeholder="Input prompt...", label='Text Input')
|
770 |
+
# tstrength = gr.Slider(label="Text Domination (NoEffect -- TextOnly)", minimum=0, maximum=1, value=0, step=0.02)
|
771 |
+
|
772 |
+
# seed = gr.Number(20, label="Seed", precision=0)
|
773 |
+
# button = gr.Button("Run")
|
774 |
+
|
775 |
+
# with gr.Column():
|
776 |
+
# output_gallary = gr.Gallery(label="Image Result", elem_id='customized_imbox')
|
777 |
+
|
778 |
+
# input_list = []
|
779 |
+
# for i in input_session:
|
780 |
+
# input_list += i
|
781 |
+
# button.click(
|
782 |
+
# vd_inference.inference_dcg,
|
783 |
+
# inputs=[img, fcs, text, tstrength, seed],
|
784 |
+
# outputs=[output_gallary])
|
785 |
+
|
786 |
+
# if with_example:
|
787 |
+
# gr.Examples(
|
788 |
+
# label='Examples',
|
789 |
+
# examples=get_example('Dual-Context'),
|
790 |
+
# fn=vd_inference.inference_dcg,
|
791 |
+
# inputs=[img, fcs, text, tstrength, seed],
|
792 |
+
# outputs=[output_gallary],
|
793 |
+
# cache_examples=cache_examples)
|
794 |
+
|
795 |
+
# def tcg_interface(with_example=False):
|
796 |
+
# gr.HTML('<p id=myinst>  Description: ' + get_instruction("Triple-Context") + '</p>')
|
797 |
+
# with gr.Row():
|
798 |
+
# input_session = []
|
799 |
+
# with gr.Column(min_width=940):
|
800 |
+
# with gr.Row():
|
801 |
+
# with gr.Column():
|
802 |
+
# img0 = gr.Image(label='Image Input', type='pil', elem_id='customized_imbox')
|
803 |
+
# img0.as_example = types.MethodType(customized_as_example, img0)
|
804 |
+
# imgm0 = gr.Image(label='Image Input with Mask', type='pil', elem_id='customized_imbox', tool='sketch', source="upload", visible=False)
|
805 |
+
# imgm0.postprocess = types.MethodType(customized_postprocess, imgm0)
|
806 |
+
# imgm0.as_example = types.MethodType(customized_as_example, imgm0)
|
807 |
+
# istrength0 = gr.Slider(label="Weight", minimum=0, maximum=1, value=1, step=0.02)
|
808 |
+
# fcs0 = gr.Slider(label="Focus (Semantic -- Style)", minimum=0, maximum=1, value=0.5, step=0.02)
|
809 |
+
# msk0 = gr.Checkbox(label='Use mask?')
|
810 |
+
# swapf0 = image_mimage_swap(img0, imgm0)
|
811 |
+
|
812 |
+
# msk0.change(
|
813 |
+
# fn=swapf0,
|
814 |
+
# inputs=[img0, imgm0, msk0],
|
815 |
+
# outputs=[img0, imgm0],)
|
816 |
+
# input_session.append([img0, imgm0, istrength0, fcs0, msk0])
|
817 |
+
|
818 |
+
# with gr.Column():
|
819 |
+
# img1 = gr.Image(label='Image Input', type='pil', elem_id='customized_imbox')
|
820 |
+
# img1.as_example = types.MethodType(customized_as_example, img1)
|
821 |
+
# imgm1 = gr.Image(label='Image Input with Mask', type='pil', elem_id='customized_imbox', tool='sketch', source="upload", visible=False)
|
822 |
+
# imgm1.postprocess = types.MethodType(customized_postprocess, imgm1)
|
823 |
+
# imgm1.as_example = types.MethodType(customized_as_example, imgm1)
|
824 |
+
# istrength1 = gr.Slider(label="Weight", minimum=0, maximum=1, value=1, step=0.02)
|
825 |
+
# fcs1 = gr.Slider(label="Focus (Semantic -- Style)", minimum=0, maximum=1, value=0.5, step=0.02)
|
826 |
+
# msk1 = gr.Checkbox(label='Use mask?')
|
827 |
+
# swapf1 = image_mimage_swap(img1, imgm1)
|
828 |
+
|
829 |
+
# msk1.change(
|
830 |
+
# fn=swapf1,
|
831 |
+
# inputs=[img1, imgm1, msk1],
|
832 |
+
# outputs=[img1, imgm1],)
|
833 |
+
# input_session.append([img1, imgm1, istrength1, fcs1, msk1])
|
834 |
+
|
835 |
+
# gr.HTML('<p id=myinst>  Weight: The strength of the reference image. This weight is subject to <u>Text Domination</u>).</p>'+
|
836 |
+
# '<p id=myinst>  Focus: Focus on what aspect of the image? (0-semantic, 0.5-balanced (default), 1-style).</p>'+
|
837 |
+
# '<p id=myinst>  Mask: Remove regions on reference image so they will not influence the output.</p>',)
|
838 |
+
|
839 |
+
# text = gr.Textbox(lines=2, placeholder="Input prompt...", label='Text Input')
|
840 |
+
# tstrength = gr.Slider(label="Text Domination (NoEffect -- TextOnly)", minimum=0, maximum=1, value=0, step=0.02)
|
841 |
+
|
842 |
+
# seed = gr.Number(20, label="Seed", precision=0)
|
843 |
+
# button = gr.Button("Run")
|
844 |
+
|
845 |
+
# with gr.Column(min_width=470):
|
846 |
+
# input_gallary = gr.Gallery(label="Input Display", elem_id="customized_imbox")
|
847 |
+
# output_gallary = gr.Gallery(label="Image Result", elem_id="customized_imbox")
|
848 |
+
|
849 |
+
# input_list = []
|
850 |
+
# for i in input_session:
|
851 |
+
# input_list += i
|
852 |
+
# input_list += [text, tstrength, seed]
|
853 |
+
# button.click(
|
854 |
+
# vd_inference.inference_tcg,
|
855 |
+
# inputs=input_list,
|
856 |
+
# outputs=[input_gallary, output_gallary])
|
857 |
+
|
858 |
+
# if with_example:
|
859 |
+
# create_myexamples(
|
860 |
+
# label='Examples',
|
861 |
+
# examples=get_example('Triple-Context'),
|
862 |
+
# fn=vd_inference.inference_tcg,
|
863 |
+
# inputs=input_list,
|
864 |
+
# outputs=[input_gallary, output_gallary, ],
|
865 |
+
# cache_examples=cache_examples, )
|
866 |
+
|
867 |
+
# gr.HTML('<br><p id=myinst>  How to add mask: Please see the following instructions.</p><br>'+
|
868 |
+
# '<div id="maskinst">'+
|
869 |
+
# '<img src="file/assets/demo/misc/mask_inst1.gif">'+
|
870 |
+
# '<img src="file/assets/demo/misc/mask_inst2.gif">'+
|
871 |
+
# '<img src="file/assets/demo/misc/mask_inst3.gif">'+
|
872 |
+
# '</div>')
|
873 |
+
|
874 |
+
# def mcg_interface(with_example=False):
|
875 |
+
# num_img_input = 4
|
876 |
+
# gr.HTML('<p id=myinst>  Description: ' + get_instruction("Multi-Context") + '</p>')
|
877 |
+
# with gr.Row():
|
878 |
+
# input_session = []
|
879 |
+
# with gr.Column():
|
880 |
+
# for idx in range(num_img_input):
|
881 |
+
# with gr.Tab('Image{}'.format(idx+1)):
|
882 |
+
# img = gr.Image(label='Image Input', type='pil', elem_id='customized_imbox')
|
883 |
+
# img.as_example = types.MethodType(customized_as_example, img)
|
884 |
+
# imgm = gr.Image(label='Image Input with Mask', type='pil', elem_id='customized_imbox', tool='sketch', source="upload", visible=False)
|
885 |
+
# imgm.postprocess = types.MethodType(customized_postprocess, imgm)
|
886 |
+
# imgm.as_example = types.MethodType(customized_as_example, imgm)
|
887 |
+
|
888 |
+
# with gr.Row():
|
889 |
+
# istrength = gr.Slider(label="Weight", minimum=0, maximum=1, value=1, step=0.02)
|
890 |
+
# fcs = gr.Slider(label="Focus (Semantic -- Style)", minimum=0, maximum=1, value=0.5, step=0.02)
|
891 |
+
# msk = gr.Checkbox(label='Use mask?')
|
892 |
+
# gr.HTML('<p id=myinst>  Weight: The strength of the reference image. This weight is subject to <u>Text Domination</u>).</p>'+
|
893 |
+
# '<p id=myinst>  Focus: Focus on what aspect of the image? (0-semantic, 0.5-balanced (default), 1-style).</p>'+
|
894 |
+
# '<p id=myinst>  Mask: Remove regions on reference image so they will not influence the output.</p>',)
|
895 |
+
|
896 |
+
# msk.change(
|
897 |
+
# fn=image_mimage_swap(img, imgm),
|
898 |
+
# inputs=[img, imgm, msk],
|
899 |
+
# outputs=[img, imgm],)
|
900 |
+
# input_session.append([img, imgm, istrength, fcs, msk])
|
901 |
+
|
902 |
+
# text = gr.Textbox(lines=2, placeholder="Input prompt...", label='Text Input')
|
903 |
+
# tstrength = gr.Slider(label="Text Domination (NoEffect -- TextOnly)", minimum=0, maximum=1, value=0, step=0.02)
|
904 |
+
|
905 |
+
# seed = gr.Number(20, label="Seed", precision=0)
|
906 |
+
# button = gr.Button("Run")
|
907 |
+
|
908 |
+
|
909 |
+
# with gr.Column():
|
910 |
+
# input_gallary = gr.Gallery(label="Input Display", elem_id='customized_imbox')
|
911 |
+
# output_gallary = gr.Gallery(label="Image Result", elem_id='customized_imbox')
|
912 |
+
|
913 |
+
# input_list = []
|
914 |
+
# for i in input_session:
|
915 |
+
# input_list += i
|
916 |
+
# input_list += [text, tstrength, seed]
|
917 |
+
# button.click(
|
918 |
+
# vd_inference.inference_mcg,
|
919 |
+
# inputs=input_list,
|
920 |
+
# outputs=[input_gallary, output_gallary], )
|
921 |
+
|
922 |
+
# if with_example:
|
923 |
+
# create_myexamples(
|
924 |
+
# label='Examples',
|
925 |
+
# examples=get_example('Multi-Context'),
|
926 |
+
# fn=vd_inference.inference_mcg,
|
927 |
+
# inputs=input_list,
|
928 |
+
# outputs=[input_gallary, output_gallary],
|
929 |
+
# cache_examples=cache_examples, )
|
930 |
+
|
931 |
+
# gr.HTML('<br><p id=myinst>  How to add mask: Please see the following instructions.</p><br>'+
|
932 |
+
# '<div id="maskinst">'+
|
933 |
+
# '<img src="file/assets/demo/misc/mask_inst1.gif">'+
|
934 |
+
# '<img src="file/assets/demo/misc/mask_inst2.gif">'+
|
935 |
+
# '<img src="file/assets/demo/misc/mask_inst3.gif">'+
|
936 |
+
# '</div>')
|
937 |
|
938 |
###########
|
939 |
# Example #
|
|
|
1080 |
i2t_interface(with_example=True)
|
1081 |
with gr.Tab('Text-Variation'):
|
1082 |
t2t_interface(with_example=True)
|
1083 |
+
# with gr.Tab('Dual-Context Image-Generation'):
|
1084 |
+
# dcg_interface(with_example=True)
|
1085 |
+
# with gr.Tab('Triple-Context Image-Blender'):
|
1086 |
+
# tcg_interface(with_example=True)
|
1087 |
+
# with gr.Tab('Multi-Context Image-Blender'):
|
1088 |
+
# mcg_interface(with_example=True)
|
1089 |
|
1090 |
gr.HTML(
|
1091 |
"""
|
cusomized_gradio_blocks.py
CHANGED
@@ -11,8 +11,6 @@ import warnings
|
|
11 |
from pathlib import Path
|
12 |
from typing import TYPE_CHECKING, Any, Callable, Dict, Iterable, List, Tuple
|
13 |
|
14 |
-
import matplotlib
|
15 |
-
import matplotlib.pyplot as plt
|
16 |
import numpy as np
|
17 |
import PIL
|
18 |
import PIL.Image
|
@@ -20,7 +18,7 @@ import PIL.Image
|
|
20 |
import gradio
|
21 |
from gradio import components, processing_utils, routes, utils
|
22 |
from gradio.context import Context
|
23 |
-
from gradio.documentation import document, set_documentation_group
|
24 |
from gradio.flagging import CSVLogger
|
25 |
|
26 |
if TYPE_CHECKING: # Only import for type checking (to avoid circular imports).
|
|
|
11 |
from pathlib import Path
|
12 |
from typing import TYPE_CHECKING, Any, Callable, Dict, Iterable, List, Tuple
|
13 |
|
|
|
|
|
14 |
import numpy as np
|
15 |
import PIL
|
16 |
import PIL.Image
|
|
|
18 |
import gradio
|
19 |
from gradio import components, processing_utils, routes, utils
|
20 |
from gradio.context import Context
|
21 |
+
# from gradio.documentation import document, set_documentation_group
|
22 |
from gradio.flagging import CSVLogger
|
23 |
|
24 |
if TYPE_CHECKING: # Only import for type checking (to avoid circular imports).
|
lib/cfg_helper.py
CHANGED
@@ -6,7 +6,6 @@ import time
|
|
6 |
import pprint
|
7 |
import numpy as np
|
8 |
import torch
|
9 |
-
import matplotlib
|
10 |
import argparse
|
11 |
import json
|
12 |
import yaml
|
|
|
6 |
import pprint
|
7 |
import numpy as np
|
8 |
import torch
|
|
|
9 |
import argparse
|
10 |
import json
|
11 |
import yaml
|
lib/model_zoo/common/utils.py
CHANGED
@@ -6,8 +6,6 @@ import copy
|
|
6 |
import functools
|
7 |
import itertools
|
8 |
|
9 |
-
import matplotlib.pyplot as plt
|
10 |
-
|
11 |
########
|
12 |
# unit #
|
13 |
########
|
|
|
6 |
import functools
|
7 |
import itertools
|
8 |
|
|
|
|
|
9 |
########
|
10 |
# unit #
|
11 |
########
|
requirements.txt
CHANGED
@@ -1,16 +1,16 @@
|
|
1 |
-
--extra-index-url https://download.pytorch.org/whl/
|
|
|
|
|
2 |
|
3 |
-
|
4 |
-
|
5 |
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
tqdm==4.60.0
|
10 |
-
transformers==4.24.0
|
11 |
-
torchmetrics==0.7.3
|
12 |
|
13 |
einops==0.3.0
|
14 |
omegaconf==2.1.1
|
15 |
-
|
16 |
-
|
|
|
|
|
|
1 |
+
--extra-index-url https://download.pytorch.org/whl/cu117
|
2 |
+
torch==2.0.0+cu117
|
3 |
+
torchvision==0.15
|
4 |
|
5 |
+
numpy==1.26.4
|
6 |
+
gradio==5.15.0
|
7 |
|
8 |
+
transformers==4.48.2
|
9 |
+
torchmetrics==1.6.1
|
|
|
|
|
|
|
|
|
10 |
|
11 |
einops==0.3.0
|
12 |
omegaconf==2.1.1
|
13 |
+
|
14 |
+
pyyaml
|
15 |
+
easydict
|
16 |
+
tqdm
|