PhoenixStormJr commited on
Commit
ddf573a
·
verified ·
1 Parent(s): f0a7163

Upload infer_batch_rvc.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. infer_batch_rvc.py +217 -0
infer_batch_rvc.py ADDED
@@ -0,0 +1,217 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ v1
3
+ runtime\python.exe myinfer-v2-0528.py 0 "E:\codes\py39\RVC-beta\todo-songs" "E:\codes\py39\logs\mi-test\added_IVF677_Flat_nprobe_7.index" harvest "E:\codes\py39\RVC-beta\output" "E:\codes\py39\test-20230416b\weights\mi-test.pth" 0.66 cuda:0 True 3 0 1 0.33
4
+ v2
5
+ runtime\python.exe myinfer-v2-0528.py 0 "E:\codes\py39\RVC-beta\todo-songs" "E:\codes\py39\test-20230416b\logs\mi-test-v2\aadded_IVF677_Flat_nprobe_1_v2.index" harvest "E:\codes\py39\RVC-beta\output_v2" "E:\codes\py39\test-20230416b\weights\mi-test-v2.pth" 0.66 cuda:0 True 3 0 1 0.33
6
+ """
7
+ import os, sys, pdb, torch
8
+
9
+ now_dir = os.getcwd()
10
+ sys.path.append(now_dir)
11
+ import argparse
12
+ import glob
13
+ import sys
14
+ import torch
15
+ import tqdm as tq
16
+ from multiprocessing import cpu_count
17
+
18
+
19
+ class Config:
20
+ def __init__(self, device, is_half):
21
+ self.device = device
22
+ self.is_half = is_half
23
+ self.n_cpu = 0
24
+ self.gpu_name = None
25
+ self.gpu_mem = None
26
+ self.x_pad, self.x_query, self.x_center, self.x_max = self.device_config()
27
+
28
+ def device_config(self) -> tuple:
29
+ if torch.cuda.is_available():
30
+ i_device = int(self.device.split(":")[-1])
31
+ self.gpu_name = torch.cuda.get_device_name(i_device)
32
+ if (
33
+ ("16" in self.gpu_name and "V100" not in self.gpu_name.upper())
34
+ or "P40" in self.gpu_name.upper()
35
+ or "1060" in self.gpu_name
36
+ or "1070" in self.gpu_name
37
+ or "1080" in self.gpu_name
38
+ ):
39
+ print("16系/10系显卡和P40强制单精度")
40
+ self.is_half = False
41
+ for config_file in ["32k.json", "40k.json", "48k.json"]:
42
+ with open(f"configs/{config_file}", "r") as f:
43
+ strr = f.read().replace("true", "false")
44
+ with open(f"configs/{config_file}", "w") as f:
45
+ f.write(strr)
46
+ with open("trainset_preprocess_pipeline_print.py", "r") as f:
47
+ strr = f.read().replace("3.7", "3.0")
48
+ with open("trainset_preprocess_pipeline_print.py", "w") as f:
49
+ f.write(strr)
50
+ else:
51
+ self.gpu_name = None
52
+ self.gpu_mem = int(
53
+ torch.cuda.get_device_properties(i_device).total_memory
54
+ / 1024
55
+ / 1024
56
+ / 1024
57
+ + 0.4
58
+ )
59
+ if self.gpu_mem <= 4:
60
+ with open("trainset_preprocess_pipeline_print.py", "r") as f:
61
+ strr = f.read().replace("3.7", "3.0")
62
+ with open("trainset_preprocess_pipeline_print.py", "w") as f:
63
+ f.write(strr)
64
+ elif torch.backends.mps.is_available():
65
+ print("没有发现支持的N卡, 使用MPS进行推理")
66
+ self.device = "mps"
67
+ else:
68
+ print("没有发现支持的N卡, 使用CPU进行推理")
69
+ self.device = "cpu"
70
+ self.is_half = True
71
+
72
+ if self.n_cpu == 0:
73
+ self.n_cpu = cpu_count()
74
+
75
+ if self.is_half:
76
+ # 6G显存配置
77
+ x_pad = 3
78
+ x_query = 10
79
+ x_center = 60
80
+ x_max = 65
81
+ else:
82
+ # 5G显存配置
83
+ x_pad = 1
84
+ x_query = 6
85
+ x_center = 38
86
+ x_max = 41
87
+
88
+ if self.gpu_mem != None and self.gpu_mem <= 4:
89
+ x_pad = 1
90
+ x_query = 5
91
+ x_center = 30
92
+ x_max = 32
93
+
94
+ return x_pad, x_query, x_center, x_max
95
+
96
+
97
+ f0up_key = sys.argv[1]
98
+ input_path = sys.argv[2]
99
+ index_path = sys.argv[3]
100
+ f0method = sys.argv[4] # harvest or pm
101
+ opt_path = sys.argv[5]
102
+ model_path = sys.argv[6]
103
+ index_rate = float(sys.argv[7])
104
+ device = sys.argv[8]
105
+ is_half = bool(sys.argv[9])
106
+ filter_radius = int(sys.argv[10])
107
+ resample_sr = int(sys.argv[11])
108
+ rms_mix_rate = float(sys.argv[12])
109
+ protect = float(sys.argv[13])
110
+ print(sys.argv)
111
+ config = Config(device, is_half)
112
+ now_dir = os.getcwd()
113
+ sys.path.append(now_dir)
114
+ from vc_infer_pipeline import VC
115
+ from infer_pack.models import (
116
+ SynthesizerTrnMs256NSFsid,
117
+ SynthesizerTrnMs256NSFsid_nono,
118
+ SynthesizerTrnMs768NSFsid,
119
+ SynthesizerTrnMs768NSFsid_nono,
120
+ )
121
+ from my_utils import load_audio
122
+ from fairseq import checkpoint_utils
123
+ from scipy.io import wavfile
124
+
125
+ hubert_model = None
126
+
127
+
128
+ def load_hubert():
129
+ global hubert_model
130
+ models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task(
131
+ ["hubert_base.pt"],
132
+ suffix="",
133
+ )
134
+ hubert_model = models[0]
135
+ hubert_model = hubert_model.to(device)
136
+ if is_half:
137
+ hubert_model = hubert_model.half()
138
+ else:
139
+ hubert_model = hubert_model.float()
140
+ hubert_model.eval()
141
+
142
+
143
+ def vc_single(sid, input_audio, f0_up_key, f0_file, f0_method, file_index, index_rate):
144
+ global tgt_sr, net_g, vc, hubert_model, version
145
+ if input_audio is None:
146
+ return "You need to upload an audio", None
147
+ f0_up_key = int(f0_up_key)
148
+ audio = load_audio(input_audio, 16000)
149
+ times = [0, 0, 0]
150
+ if hubert_model == None:
151
+ load_hubert()
152
+ if_f0 = cpt.get("f0", 1)
153
+ # audio_opt=vc.pipeline(hubert_model,net_g,sid,audio,times,f0_up_key,f0_method,file_index,file_big_npy,index_rate,if_f0,f0_file=f0_file)
154
+ audio_opt = vc.pipeline(
155
+ hubert_model,
156
+ net_g,
157
+ sid,
158
+ audio,
159
+ input_audio,
160
+ times,
161
+ f0_up_key,
162
+ f0_method,
163
+ file_index,
164
+ index_rate,
165
+ if_f0,
166
+ filter_radius,
167
+ tgt_sr,
168
+ resample_sr,
169
+ rms_mix_rate,
170
+ version,
171
+ protect,
172
+ f0_file=f0_file,
173
+ )
174
+ print(times)
175
+ return audio_opt
176
+
177
+
178
+ def get_vc(model_path):
179
+ global n_spk, tgt_sr, net_g, vc, cpt, device, is_half, version
180
+ print("loading pth %s" % model_path)
181
+ cpt = torch.load(model_path, map_location="cpu")
182
+ tgt_sr = cpt["config"][-1]
183
+ cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] # n_spk
184
+ if_f0 = cpt.get("f0", 1)
185
+ version = cpt.get("version", "v1")
186
+ if version == "v1":
187
+ if if_f0 == 1:
188
+ net_g = SynthesizerTrnMs256NSFsid(*cpt["config"], is_half=is_half)
189
+ else:
190
+ net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"])
191
+ elif version == "v2":
192
+ if if_f0 == 1: #
193
+ net_g = SynthesizerTrnMs768NSFsid(*cpt["config"], is_half=is_half)
194
+ else:
195
+ net_g = SynthesizerTrnMs768NSFsid_nono(*cpt["config"])
196
+ del net_g.enc_q
197
+ print(net_g.load_state_dict(cpt["weight"], strict=False)) # 不加这一行清不干净,真奇葩
198
+ net_g.eval().to(device)
199
+ if is_half:
200
+ net_g = net_g.half()
201
+ else:
202
+ net_g = net_g.float()
203
+ vc = VC(tgt_sr, config)
204
+ n_spk = cpt["config"][-3]
205
+ # return {"visible": True,"maximum": n_spk, "__type__": "update"}
206
+
207
+
208
+ get_vc(model_path)
209
+ audios = os.listdir(input_path)
210
+ for file in tq.tqdm(audios):
211
+ if file.endswith(".wav"):
212
+ file_path = input_path + "/" + file
213
+ wav_opt = vc_single(
214
+ 0, file_path, f0up_key, None, f0method, index_path, index_rate
215
+ )
216
+ out_path = opt_path + "/" + file
217
+ wavfile.write(out_path, tgt_sr, wav_opt)