Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -60,8 +60,9 @@ def infer_img2img(prompt, audio_path, desired_strength, progress=gr.Progress(tra
|
|
60 |
norm_spec = pad_spec(norm_spec, 1024)
|
61 |
norm_spec = normalize(norm_spec) # normalize to [-1, 1], because pipeline do not normalize for torch.Tensor input
|
62 |
|
63 |
-
raw_image = image_add_color(torch_to_pil(norm_spec[:,:,:width]))
|
64 |
-
|
|
|
65 |
# Generation for different strength
|
66 |
image_list = []
|
67 |
audio_list = []
|
@@ -179,9 +180,10 @@ with gr.Blocks(css=css) as demo:
|
|
179 |
submit_btn_img2img = gr.Button("Submit")
|
180 |
audio_out_img2img = gr.Audio(label="Audio Ressult")
|
181 |
|
182 |
-
with gr.
|
183 |
-
|
184 |
-
|
|
|
185 |
|
186 |
gr.Examples(
|
187 |
examples = [
|
|
|
60 |
norm_spec = pad_spec(norm_spec, 1024)
|
61 |
norm_spec = normalize(norm_spec) # normalize to [-1, 1], because pipeline do not normalize for torch.Tensor input
|
62 |
|
63 |
+
# raw_image = image_add_color(torch_to_pil(norm_spec[:,:,:width]))
|
64 |
+
raw_image = image_add_color(torch_to_pil(norm_spec))
|
65 |
+
|
66 |
# Generation for different strength
|
67 |
image_list = []
|
68 |
audio_list = []
|
|
|
180 |
submit_btn_img2img = gr.Button("Submit")
|
181 |
audio_out_img2img = gr.Audio(label="Audio Ressult")
|
182 |
|
183 |
+
with gr.Accordion("Compare Spectrograms", open=False):
|
184 |
+
with gr.Column():
|
185 |
+
input_spectrogram = gr.Image(label="Input Spectrogram")
|
186 |
+
output_spectrogram = gr.Image(label="Output Spectrogram")
|
187 |
|
188 |
gr.Examples(
|
189 |
examples = [
|