Update app.py
Browse files
app.py
CHANGED
@@ -227,6 +227,8 @@ def load_input_spectrogram(audio_path):
|
|
227 |
input_spec_image_path = "input_spectrogram.png"
|
228 |
raw_image.save(input_spec_image_path)
|
229 |
|
|
|
|
|
230 |
def preview_masked_area(audio_path, mask_start_point, mask_end_point):
|
231 |
# Loading
|
232 |
audio, sampling_rate = load_wav(audio_path)
|
@@ -236,6 +238,7 @@ def preview_masked_area(audio_path, mask_start_point, mask_end_point):
|
|
236 |
norm_spec = normalize(norm_spec) # normalize to [-1, 1], because pipeline do not normalize for torch.Tensor input
|
237 |
|
238 |
# Add Mask
|
|
|
239 |
mask = torch.zeros_like(norm_spec)[:1,...]
|
240 |
mask[:, :, width_start:width_start+width] = 1
|
241 |
mask_image = torch_to_pil(mask)
|
|
|
227 |
input_spec_image_path = "input_spectrogram.png"
|
228 |
raw_image.save(input_spec_image_path)
|
229 |
|
230 |
+
return input_spec_image_path
|
231 |
+
|
232 |
def preview_masked_area(audio_path, mask_start_point, mask_end_point):
|
233 |
# Loading
|
234 |
audio, sampling_rate = load_wav(audio_path)
|
|
|
238 |
norm_spec = normalize(norm_spec) # normalize to [-1, 1], because pipeline do not normalize for torch.Tensor input
|
239 |
|
240 |
# Add Mask
|
241 |
+
width_start, width = mask_start_point, mask_end_point-mask_start_point
|
242 |
mask = torch.zeros_like(norm_spec)[:1,...]
|
243 |
mask[:, :, width_start:width_start+width] = 1
|
244 |
mask_image = torch_to_pil(mask)
|