Spaces:
Sleeping
Sleeping
Abdullah
commited on
Commit
·
8cbea87
1
Parent(s):
aac6f76
added .gitignore file
Browse files- .gitattributes +1 -1
- .gitignore +1 -0
- README.md +6 -5
- app.py +101 -104
.gitattributes
CHANGED
@@ -32,4 +32,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
32 |
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
-
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
32 |
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
.gitignore
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
venv
|
README.md
CHANGED
@@ -1,12 +1,13 @@
|
|
1 |
---
|
2 |
-
title: Upscaler
|
3 |
-
emoji:
|
4 |
colorFrom: indigo
|
5 |
-
colorTo:
|
6 |
sdk: gradio
|
7 |
-
sdk_version:
|
8 |
app_file: app.py
|
9 |
pinned: false
|
|
|
10 |
---
|
11 |
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
1 |
---
|
2 |
+
title: Image Upscaler
|
3 |
+
emoji: 🔥
|
4 |
colorFrom: indigo
|
5 |
+
colorTo: green
|
6 |
sdk: gradio
|
7 |
+
sdk_version: 4.44.1
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
+
license: apache-2.0
|
11 |
---
|
12 |
|
13 |
+
Check out the configuration reference at <https://huggingface.co/docs/hub/spaces-config-reference>
|
app.py
CHANGED
@@ -3,7 +3,6 @@ import spaces
|
|
3 |
from gradio_imageslider import ImageSlider
|
4 |
from image_gen_aux import UpscaleWithModel
|
5 |
from image_gen_aux.utils import load_image
|
6 |
-
import torch
|
7 |
|
8 |
# This uses https://github.com/asomoza/image_gen_aux/blob/main/src/image_gen_aux/upscalers/README.md
|
9 |
# Also this space has been duplicated from their official huggingface space, https://huggingface.co/spaces/OzzyGT/basic_upscaler
|
@@ -12,106 +11,106 @@ import torch
|
|
12 |
|
13 |
# My models, alphabetically sorted
|
14 |
MODELS = {
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
}
|
116 |
|
117 |
|
@@ -119,9 +118,7 @@ MODELS = {
|
|
119 |
def upscale_image(image, model_selection):
|
120 |
original = load_image(image)
|
121 |
|
122 |
-
|
123 |
-
upscaler = UpscaleWithModel.from_pretrained(MODELS[model_selection]).to(device)
|
124 |
-
|
125 |
image = upscaler(original, tiling=True, tile_width=1024, tile_height=1024)
|
126 |
|
127 |
return original, image
|
|
|
3 |
from gradio_imageslider import ImageSlider
|
4 |
from image_gen_aux import UpscaleWithModel
|
5 |
from image_gen_aux.utils import load_image
|
|
|
6 |
|
7 |
# This uses https://github.com/asomoza/image_gen_aux/blob/main/src/image_gen_aux/upscalers/README.md
|
8 |
# Also this space has been duplicated from their official huggingface space, https://huggingface.co/spaces/OzzyGT/basic_upscaler
|
|
|
11 |
|
12 |
# My models, alphabetically sorted
|
13 |
MODELS = {
|
14 |
+
"1xDeH264_realplksr": "Phips/1xDeH264_realplksr",
|
15 |
+
"1xDeJPG_HAT": "Phips/1xDeJPG_HAT",
|
16 |
+
"1xDeJPG_OmniSR": "Phips/1xDeJPG_OmniSR",
|
17 |
+
"1xDeJPG_realplksr_otf": "Phips/1xDeJPG_realplksr_otf",
|
18 |
+
"1xDeJPG_SRFormer_light": "Phips/1xDeJPG_SRFormer_light",
|
19 |
+
"1xDeNoise_realplksr_otf": "Phips/1xDeNoise_realplksr_otf",
|
20 |
+
"1xExposureCorrection_compact": "Phips/1xExposureCorrection_compact",
|
21 |
+
"1xOverExposureCorrection_compact": "Phips/1xOverExposureCorrection_compact",
|
22 |
+
"1xUnderExposureCorrection_compact": "Phips/1xUnderExposureCorrection_compact",
|
23 |
+
"2xAoMR_mosr": "Phips/2xAoMR_mosr",
|
24 |
+
"2xEvangelion_compact": "Phips/2xEvangelion_compact",
|
25 |
+
"2xEvangelion_dat2": "Phips/2xEvangelion_dat2",
|
26 |
+
"2xEvangelion_omnisr": "Phips/2xEvangelion_omnisr",
|
27 |
+
"2xHFA2k_compact_multijpg": "Phips/2xHFA2k_compact_multijpg",
|
28 |
+
"2xHFA2k_LUDVAE_compact": "Phips/2xHFA2k_LUDVAE_compact",
|
29 |
+
"2xHFA2k_LUDVAE_SPAN": "Phips/2xHFA2k_LUDVAE_SPAN",
|
30 |
+
"2xHFA2kAVCCompact": "Phips/2xHFA2kAVCCompact",
|
31 |
+
"2xHFA2kAVCOmniSR": "Phips/2xHFA2kAVCOmniSR",
|
32 |
+
"2xHFA2kAVCSRFormer_light": "Phips/2xHFA2kAVCSRFormer_light",
|
33 |
+
"2xHFA2kCompact": "Phips/2xHFA2kCompact",
|
34 |
+
"2xHFA2kOmniSR": "Phips/2xHFA2kOmniSR",
|
35 |
+
"2xHFA2kReal-CUGAN": "Phips/2xHFA2kReal-CUGAN",
|
36 |
+
"2xHFA2kShallowESRGAN": "Phips/2xHFA2kShallowESRGAN",
|
37 |
+
"2xHFA2kSPAN": "Phips/2xHFA2kSPAN",
|
38 |
+
"2xHFA2kSwinIR-S": "Phips/2xHFA2kSwinIR-S",
|
39 |
+
"2xLexicaRRDBNet": "Phips/2xLexicaRRDBNet",
|
40 |
+
"2xLexicaRRDBNet_Sharp": "Phips/2xLexicaRRDBNet_Sharp",
|
41 |
+
"2xNomosUni_compact_multijpg": "Phips/2xNomosUni_compact_multijpg",
|
42 |
+
"2xNomosUni_compact_multijpg_ldl": "Phips/2xNomosUni_compact_multijpg_ldl",
|
43 |
+
"2xNomosUni_compact_otf_medium": "Phips/2xNomosUni_compact_otf_medium",
|
44 |
+
"2xNomosUni_esrgan_multijpg": "Phips/2xNomosUni_esrgan_multijpg",
|
45 |
+
"2xNomosUni_span_multijpg": "Phips/2xNomosUni_span_multijpg",
|
46 |
+
"2xNomosUni_span_multijpg_ldl": "Phips/2xNomosUni_span_multijpg_ldl",
|
47 |
+
"2xParimgCompact": "Phips/2xParimgCompact",
|
48 |
+
"4x4xTextures_GTAV_rgt-s": "Phips/4xTextures_GTAV_rgt-s",
|
49 |
+
"4xArtFaces_realplksr_dysample": "Phips/4xArtFaces_realplksr_dysample",
|
50 |
+
"4xBHI_dat2_multiblur": "Phips/4xBHI_dat2_multiblur",
|
51 |
+
"4xBHI_dat2_multiblurjpg": "Phips/4xBHI_dat2_multiblurjpg",
|
52 |
+
"4xBHI_dat2_otf": "Phips/4xBHI_dat2_otf",
|
53 |
+
"4xBHI_dat2_real": "Phips/4xBHI_dat2_real",
|
54 |
+
"4xBHI_realplksr_dysample_multi": "Phips/4xBHI_realplksr_dysample_multi",
|
55 |
+
"4xBHI_realplksr_dysample_multiblur": "Phips/4xBHI_realplksr_dysample_multiblur",
|
56 |
+
"4xBHI_realplksr_dysample_otf": "Phips/4xBHI_realplksr_dysample_otf",
|
57 |
+
"4xBHI_realplksr_dysample_otf_nn": "Phips/4xBHI_realplksr_dysample_otf_nn",
|
58 |
+
"4xBHI_realplksr_dysample_real": "Phips/4xBHI_realplksr_dysample_real",
|
59 |
+
"4xFaceUpDAT": "Phips/4xFaceUpDAT",
|
60 |
+
"4xFaceUpLDAT": "Phips/4xFaceUpLDAT",
|
61 |
+
"4xFaceUpSharpDAT": "Phips/4xFaceUpSharpDAT",
|
62 |
+
"4xFaceUpSharpLDAT": "Phips/4xFaceUpSharpLDAT",
|
63 |
+
"4xFFHQDAT": "Phips/4xFFHQDAT",
|
64 |
+
"4xFFHQLDAT": "Phips/4xFFHQLDAT",
|
65 |
+
"4xHFA2k": "Phips/4xHFA2k",
|
66 |
+
"4xHFA2k_ludvae_realplksr_dysample": "Phips/4xHFA2k_ludvae_realplksr_dysample",
|
67 |
+
"4xHFA2kLUDVAEGRL_small": "Phips/4xHFA2kLUDVAEGRL_small",
|
68 |
+
"4xHFA2kLUDVAESRFormer_light": "Phips/4xHFA2kLUDVAESRFormer_light",
|
69 |
+
"4xHFA2kLUDVAESwinIR_light": "Phips/4xHFA2kLUDVAESwinIR_light",
|
70 |
+
"4xLexicaDAT2_otf": "Phips/4xLexicaDAT2_otf",
|
71 |
+
"4xLSDIRCompact2": "Phips/4xLSDIRCompact2",
|
72 |
+
"4xLSDIRCompact": "Phips/4xLSDIRCompact",
|
73 |
+
"4xLSDIRCompactC3": "Phips/4xLSDIRCompactC3",
|
74 |
+
"4xLSDIRCompactC": "Phips/4xLSDIRCompactC",
|
75 |
+
"4xLSDIRCompactCR3": "Phips/4xLSDIRCompactCR3",
|
76 |
+
"4xLSDIRCompactN3": "Phips/4xLSDIRCompactN3",
|
77 |
+
"4xLSDIRCompactR3": "Phips/4xLSDIRCompactR3",
|
78 |
+
"4xLSDIRCompactR": "Phips/4xLSDIRCompactR",
|
79 |
+
"4xLSDIRDAT": "Phips/4xLSDIRDAT",
|
80 |
+
"4xNature_realplksr_dysample": "Phips/4xNature_realplksr_dysample",
|
81 |
+
"4xNomos2_hq_atd": "Phips/4xNomos2_hq_atd",
|
82 |
+
"4xNomos2_hq_dat2": "Phips/4xNomos2_hq_dat2",
|
83 |
+
"4xNomos2_hq_drct-l": "Phips/4xNomos2_hq_drct-l",
|
84 |
+
"4xNomos2_hq_mosr": "Phips/4xNomos2_hq_mosr",
|
85 |
+
"4xNomos2_otf_esrgan": "Phips/4xNomos2_otf_esrgan",
|
86 |
+
"4xNomos2_realplksr_dysample": "Phips/4xNomos2_realplksr_dysample",
|
87 |
+
"4xNomos8k_atd_jpg": "Phips/4xNomos8k_atd_jpg",
|
88 |
+
"4xNomos8kDAT": "Phips/4xNomos8kDAT",
|
89 |
+
"4xNomos8kHAT-L_bokeh_jpg": "Phips/4xNomos8kHAT-L_bokeh_jpg",
|
90 |
+
"4xNomos8kHAT-L_otf": "Phips/4xNomos8kHAT-L_otf",
|
91 |
+
"4xNomos8kSC": "Phips/4xNomos8kSC",
|
92 |
+
"4xNomos8kSCHAT-L": "Phips/4xNomos8kSCHAT-L",
|
93 |
+
"4xNomos8kSCHAT-S": "Phips/4xNomos8kSCHAT-S",
|
94 |
+
"4xNomos8kSCSRFormer": "Phips/4xNomos8kSCSRFormer",
|
95 |
+
"4xNomosUni_rgt_multijpg": "Phips/4xNomosUni_rgt_multijpg",
|
96 |
+
"4xNomosUni_rgt_s_multijpg": "Phips/4xNomosUni_rgt_s_multijpg",
|
97 |
+
"4xNomosUni_span_multijpg": "Phips/4xNomosUni_span_multijpg",
|
98 |
+
"4xNomosUniDAT2_box": "Phips/4xNomosUniDAT2_box",
|
99 |
+
"4xNomosUniDAT2_multijpg_ldl": "Phips/4xNomosUniDAT2_multijpg_ldl",
|
100 |
+
"4xNomosUniDAT2_multijpg_ldl_sharp": "Phips/4xNomosUniDAT2_multijpg_ldl_sharp",
|
101 |
+
"4xNomosUniDAT_bokeh_jpg": "Phips/4xNomosUniDAT_bokeh_jpg",
|
102 |
+
"4xNomosUniDAT_otf": "Phips/4xNomosUniDAT_otf",
|
103 |
+
"4xNomosWebPhoto_atd": "Phips/4xNomosWebPhoto_atd",
|
104 |
+
"4xNomosWebPhoto_esrgan": "Phips/4xNomosWebPhoto_esrgan",
|
105 |
+
"4xNomosWebPhoto_RealPLKSR": "Phips/4xNomosWebPhoto_RealPLKSR",
|
106 |
+
"4xReal_SSDIR_DAT_GAN": "Phips/4xReal_SSDIR_DAT_GAN",
|
107 |
+
"4xRealWebPhoto_v3_atd": "Phips/4xRealWebPhoto_v3_atd",
|
108 |
+
"4xRealWebPhoto_v4_dat2": "Phips/4xRealWebPhoto_v4_dat2",
|
109 |
+
"4xRealWebPhoto_v4_drct-l": "Phips/4xRealWebPhoto_v4_drct-l",
|
110 |
+
"4xSSDIRDAT": "Phips/4xSSDIRDAT",
|
111 |
+
"4xTextureDAT2_otf": "Phips/4xTextureDAT2_otf",
|
112 |
+
"4xTextures_GTAV_rgt-s": "Phips/4xTextures_GTAV_rgt-s",
|
113 |
+
"4xTextures_GTAV_rgt-s_dither": "Phips/4xTextures_GTAV_rgt-s_dither",
|
114 |
}
|
115 |
|
116 |
|
|
|
118 |
def upscale_image(image, model_selection):
|
119 |
original = load_image(image)
|
120 |
|
121 |
+
upscaler = UpscaleWithModel.from_pretrained(MODELS[model_selection]).to("cuda")
|
|
|
|
|
122 |
image = upscaler(original, tiling=True, tile_width=1024, tile_height=1024)
|
123 |
|
124 |
return original, image
|