fffiloni commited on
Commit
7d372b2
·
verified ·
1 Parent(s): abf12f8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -9,7 +9,7 @@ from auffusion_pipeline import AuffusionPipeline
9
 
10
  # ——
11
 
12
- from diffusers import StableDiffusionImg2ImgPipeline
13
  from converter import load_wav, mel_spectrogram, normalize_spectrogram, denormalize_spectrogram, Generator, get_mel_spectrogram_from_audio
14
  from utils import pad_spec, image_add_color, torch_to_pil, normalize, denormalize, prepare_mask_and_masked_image
15
 
@@ -139,7 +139,7 @@ def infer_inp(prompt, audio_path, progress=gr.Progress(track_tqdm=True)):
139
  vocoder = Generator.from_pretrained(pretrained_model_name_or_path, subfolder="vocoder")
140
  vocoder = vocoder.to(device=device, dtype=dtype)
141
 
142
- pipe = StableDiffusionImg2ImgPipeline.from_pretrained(pretrained_model_name_or_path, torch_dtype=dtype)
143
  pipe = pipe.to(device)
144
 
145
  width_start, width = 256, 512
 
9
 
10
  # ——
11
 
12
+ from diffusers import StableDiffusionImg2ImgPipeline, StableDiffusionInpaintPipeline
13
  from converter import load_wav, mel_spectrogram, normalize_spectrogram, denormalize_spectrogram, Generator, get_mel_spectrogram_from_audio
14
  from utils import pad_spec, image_add_color, torch_to_pil, normalize, denormalize, prepare_mask_and_masked_image
15
 
 
139
  vocoder = Generator.from_pretrained(pretrained_model_name_or_path, subfolder="vocoder")
140
  vocoder = vocoder.to(device=device, dtype=dtype)
141
 
142
+ pipe = StableDiffusionInpaintPipeline.from_pretrained(pretrained_model_name_or_path, torch_dtype=dtype)
143
  pipe = pipe.to(device)
144
 
145
  width_start, width = 256, 512