kevinwang676 commited on
Commit
d076f68
·
verified ·
1 Parent(s): 3a8af5e

Create vc_webui2.py

Browse files
Files changed (1) hide show
  1. GPT_SoVITS/vc_webui2.py +1132 -0
GPT_SoVITS/vc_webui2.py ADDED
@@ -0,0 +1,1132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ 按中英混合识别
3
+ 按日英混合识别
4
+ 多语种启动切分识别语种
5
+ 全部按中文识别
6
+ 全部按英文识别
7
+ 全部按日文识别
8
+ '''
9
+ import logging
10
+ import traceback,torchaudio,warnings
11
+ logging.getLogger("markdown_it").setLevel(logging.ERROR)
12
+ logging.getLogger("urllib3").setLevel(logging.ERROR)
13
+ logging.getLogger("httpcore").setLevel(logging.ERROR)
14
+ logging.getLogger("httpx").setLevel(logging.ERROR)
15
+ logging.getLogger("asyncio").setLevel(logging.ERROR)
16
+ logging.getLogger("charset_normalizer").setLevel(logging.ERROR)
17
+ logging.getLogger("torchaudio._extension").setLevel(logging.ERROR)
18
+ logging.getLogger("multipart.multipart").setLevel(logging.ERROR)
19
+ warnings.simplefilter(action='ignore', category=FutureWarning)
20
+
21
+ import os, re, sys, json
22
+ import pdb
23
+ import torch
24
+ from text.LangSegmenter import LangSegmenter
25
+
26
+ try:
27
+ import gradio.analytics as analytics
28
+ analytics.version_check = lambda:None
29
+ except:...
30
+ version=model_version="v3"
31
+ pretrained_sovits_name=["GPT_SoVITS/pretrained_models/s2Gv3.pth"]
32
+ pretrained_gpt_name=["GPT_SoVITS/pretrained_models/s1v3.ckpt"]
33
+
34
+
35
+ _ =[[],[]]
36
+ for i in range(1):
37
+ if os.path.exists(pretrained_gpt_name[i]):_[0].append(pretrained_gpt_name[i])
38
+ if os.path.exists(pretrained_sovits_name[i]):_[-1].append(pretrained_sovits_name[i])
39
+ pretrained_gpt_name,pretrained_sovits_name = _
40
+
41
+
42
+ if os.path.exists(f"./weight.json"):
43
+ pass
44
+ else:
45
+ with open(f"./weight.json", 'w', encoding="utf-8") as file:json.dump({'GPT':{},'SoVITS':{}},file)
46
+
47
+ with open(f"./weight.json", 'r', encoding="utf-8") as file:
48
+ weight_data = file.read()
49
+ weight_data=json.loads(weight_data)
50
+ gpt_path = os.environ.get(
51
+ "gpt_path", weight_data.get('GPT',{}).get(version,pretrained_gpt_name))
52
+ sovits_path = os.environ.get(
53
+ "sovits_path", weight_data.get('SoVITS',{}).get(version,pretrained_sovits_name))
54
+ if isinstance(gpt_path,list):
55
+ gpt_path = gpt_path[0]
56
+ if isinstance(sovits_path,list):
57
+ sovits_path = sovits_path[0]
58
+
59
+ # gpt_path = os.environ.get(
60
+ # "gpt_path", pretrained_gpt_name
61
+ # )
62
+ # sovits_path = os.environ.get("sovits_path", pretrained_sovits_name)
63
+ cnhubert_base_path = os.environ.get(
64
+ "cnhubert_base_path", "GPT_SoVITS/pretrained_models/chinese-hubert-base"
65
+ )
66
+ bert_path = os.environ.get(
67
+ "bert_path", "GPT_SoVITS/pretrained_models/chinese-roberta-wwm-ext-large"
68
+ )
69
+ infer_ttswebui = os.environ.get("infer_ttswebui", 9872)
70
+ infer_ttswebui = int(infer_ttswebui)
71
+ is_share = os.environ.get("is_share", "False")
72
+ is_share = eval(is_share)
73
+ if "_CUDA_VISIBLE_DEVICES" in os.environ:
74
+ os.environ["CUDA_VISIBLE_DEVICES"] = os.environ["_CUDA_VISIBLE_DEVICES"]
75
+ is_half = eval(os.environ.get("is_half", "True")) and torch.cuda.is_available()
76
+ punctuation = set(['!', '?', '…', ',', '.', '-'," "])
77
+ import gradio as gr
78
+ from transformers import AutoModelForMaskedLM, AutoTokenizer
79
+ import numpy as np
80
+ import librosa
81
+ from feature_extractor import cnhubert
82
+
83
+ cnhubert.cnhubert_base_path = cnhubert_base_path
84
+
85
+ from GPT_SoVITS.module.models import SynthesizerTrn,SynthesizerTrnV3
86
+ from AR.models.t2s_lightning_module import Text2SemanticLightningModule
87
+ from text import cleaned_text_to_sequence
88
+ from text.cleaner import clean_text
89
+ from time import time as ttime
90
+ from module.mel_processing import spectrogram_torch
91
+ from tools.my_utils import load_audio
92
+ from tools.i18n.i18n import I18nAuto, scan_language_list
93
+
94
+ language=os.environ.get("language","Auto")
95
+ language=sys.argv[-1] if sys.argv[-1] in scan_language_list() else language
96
+ i18n = I18nAuto(language=language)
97
+
98
+ # os.environ['PYTORCH_ENABLE_MPS_FALLBACK'] = '1' # 确保直接启动推理UI时也能够设置。
99
+
100
+ if torch.cuda.is_available():
101
+ device = "cuda"
102
+ else:
103
+ device = "cpu"
104
+
105
+ dict_language_v1 = {
106
+ i18n("中文"): "all_zh",#全部按中文识别
107
+ i18n("英文"): "en",#全部按英文识别#######不变
108
+ i18n("日文"): "all_ja",#全部按日文识别
109
+ i18n("中英混合"): "zh",#按中英混合识别####不变
110
+ i18n("日英混合"): "ja",#按日英混合识别####不变
111
+ i18n("多语种混合"): "auto",#多语种启动切分识别语种
112
+ }
113
+ dict_language_v2 = {
114
+ i18n("中文"): "all_zh",#全部按中文识别
115
+ i18n("英文"): "en",#全部按英文识别#######不变
116
+ i18n("日文"): "all_ja",#全部按日文识别
117
+ i18n("粤语"): "all_yue",#全部按中文识别
118
+ i18n("韩文"): "all_ko",#全部按韩文识别
119
+ i18n("中英混合"): "zh",#按中英混合识别####不变
120
+ i18n("日英混合"): "ja",#按日英混合识别####不变
121
+ i18n("粤英混合"): "yue",#按粤英混合识别####不变
122
+ i18n("韩英混合"): "ko",#按韩英混合识别####不变
123
+ i18n("多语种混合"): "auto",#多语种启动切分识别语种
124
+ i18n("多语种混合(粤语)"): "auto_yue",#多语种启动切分识别语种
125
+ }
126
+ dict_language = dict_language_v1 if version =='v1' else dict_language_v2
127
+
128
+ tokenizer = AutoTokenizer.from_pretrained(bert_path)
129
+ bert_model = AutoModelForMaskedLM.from_pretrained(bert_path)
130
+ if is_half == True:
131
+ bert_model = bert_model.half().to(device)
132
+ else:
133
+ bert_model = bert_model.to(device)
134
+
135
+
136
+ def get_bert_feature(text, word2ph):
137
+ with torch.no_grad():
138
+ inputs = tokenizer(text, return_tensors="pt")
139
+ for i in inputs:
140
+ inputs[i] = inputs[i].to(device)
141
+ res = bert_model(**inputs, output_hidden_states=True)
142
+ res = torch.cat(res["hidden_states"][-3:-2], -1)[0].cpu()[1:-1]
143
+ assert len(word2ph) == len(text)
144
+ phone_level_feature = []
145
+ for i in range(len(word2ph)):
146
+ repeat_feature = res[i].repeat(word2ph[i], 1)
147
+ phone_level_feature.append(repeat_feature)
148
+ phone_level_feature = torch.cat(phone_level_feature, dim=0)
149
+ return phone_level_feature.T
150
+
151
+
152
+ class DictToAttrRecursive(dict):
153
+ def __init__(self, input_dict):
154
+ super().__init__(input_dict)
155
+ for key, value in input_dict.items():
156
+ if isinstance(value, dict):
157
+ value = DictToAttrRecursive(value)
158
+ self[key] = value
159
+ setattr(self, key, value)
160
+
161
+ def __getattr__(self, item):
162
+ try:
163
+ return self[item]
164
+ except KeyError:
165
+ raise AttributeError(f"Attribute {item} not found")
166
+
167
+ def __setattr__(self, key, value):
168
+ if isinstance(value, dict):
169
+ value = DictToAttrRecursive(value)
170
+ super(DictToAttrRecursive, self).__setitem__(key, value)
171
+ super().__setattr__(key, value)
172
+
173
+ def __delattr__(self, item):
174
+ try:
175
+ del self[item]
176
+ except KeyError:
177
+ raise AttributeError(f"Attribute {item} not found")
178
+
179
+
180
+ ssl_model = cnhubert.get_model()
181
+ if is_half == True:
182
+ ssl_model = ssl_model.half().to(device)
183
+ else:
184
+ ssl_model = ssl_model.to(device)
185
+
186
+ resample_transform_dict={}
187
+ def resample(audio_tensor, sr0):
188
+ global resample_transform_dict
189
+ if sr0 not in resample_transform_dict:
190
+ resample_transform_dict[sr0] = torchaudio.transforms.Resample(
191
+ sr0, 24000
192
+ ).to(device)
193
+ return resample_transform_dict[sr0](audio_tensor)
194
+
195
+ def change_sovits_weights(sovits_path,prompt_language=None,text_language=None):
196
+ global vq_model, hps, version, model_version, dict_language
197
+ '''
198
+ v1:about 82942KB
199
+ half thr:82978KB
200
+ v2:about 83014KB
201
+ half thr:100MB
202
+ v1base:103490KB
203
+ half thr:103520KB
204
+ v2base:103551KB
205
+ v3:about 750MB
206
+
207
+ ~82978K~100M~103420~700M
208
+ v1-v2-v1base-v2base-v3
209
+ version:
210
+ symbols version and timebre_embedding version
211
+ model_version:
212
+ sovits is v1/2 (VITS) or v3 (shortcut CFM DiT)
213
+ '''
214
+ size=os.path.getsize(sovits_path)
215
+ if size<82978*1024:
216
+ model_version=version="v1"
217
+ elif size<100*1024*1024:
218
+ model_version=version="v2"
219
+ elif size<103520*1024:
220
+ model_version=version="v1"
221
+ elif size<700*1024*1024:
222
+ model_version = version = "v2"
223
+ else:
224
+ version = "v2"
225
+ model_version="v3"
226
+
227
+ dict_language = dict_language_v1 if version =='v1' else dict_language_v2
228
+ if prompt_language is not None and text_language is not None:
229
+ if prompt_language in list(dict_language.keys()):
230
+ prompt_text_update, prompt_language_update = {'__type__':'update'}, {'__type__':'update', 'value':prompt_language}
231
+ else:
232
+ prompt_text_update = {'__type__':'update', 'value':''}
233
+ prompt_language_update = {'__type__':'update', 'value':i18n("中文")}
234
+ if text_language in list(dict_language.keys()):
235
+ text_update, text_language_update = {'__type__':'update'}, {'__type__':'update', 'value':text_language}
236
+ else:
237
+ text_update = {'__type__':'update', 'value':''}
238
+ text_language_update = {'__type__':'update', 'value':i18n("中文")}
239
+ if model_version=="v3":
240
+ visible_sample_steps=True
241
+ visible_inp_refs=False
242
+ else:
243
+ visible_sample_steps=False
244
+ visible_inp_refs=True
245
+ yield {'__type__':'update', 'choices':list(dict_language.keys())}, {'__type__':'update', 'choices':list(dict_language.keys())}, prompt_text_update, prompt_language_update, text_update, text_language_update,{"__type__": "update", "visible": visible_sample_steps},{"__type__": "update", "visible": visible_inp_refs},{"__type__": "update", "value": False,"interactive":True if model_version!="v3"else False}
246
+
247
+ dict_s2 = torch.load(sovits_path, map_location="cpu", weights_only=False)
248
+ hps = dict_s2["config"]
249
+ hps = DictToAttrRecursive(hps)
250
+ hps.model.semantic_frame_rate = "25hz"
251
+ if dict_s2['weight']['enc_p.text_embedding.weight'].shape[0] == 322:
252
+ hps.model.version = "v1"
253
+ else:
254
+ hps.model.version = "v2"
255
+ version=hps.model.version
256
+ # print("sovits版本:",hps.model.version)
257
+ if model_version!="v3":
258
+ vq_model = SynthesizerTrn(
259
+ hps.data.filter_length // 2 + 1,
260
+ hps.train.segment_size // hps.data.hop_length,
261
+ n_speakers=hps.data.n_speakers,
262
+ **hps.model
263
+ )
264
+ model_version=version
265
+ else:
266
+ vq_model = SynthesizerTrnV3(
267
+ hps.data.filter_length // 2 + 1,
268
+ hps.train.segment_size // hps.data.hop_length,
269
+ n_speakers=hps.data.n_speakers,
270
+ **hps.model
271
+ )
272
+ if ("pretrained" not in sovits_path):
273
+ try:
274
+ del vq_model.enc_q
275
+ except:pass
276
+ if is_half == True:
277
+ vq_model = vq_model.half().to(device)
278
+ else:
279
+ vq_model = vq_model.to(device)
280
+ vq_model.eval()
281
+ print("loading sovits_%s"%model_version,vq_model.load_state_dict(dict_s2["weight"], strict=False))
282
+ with open("./weight.json")as f:
283
+ data=f.read()
284
+ data=json.loads(data)
285
+ data["SoVITS"][version]=sovits_path
286
+ with open("./weight.json","w")as f:f.write(json.dumps(data))
287
+
288
+
289
+ try:next(change_sovits_weights(sovits_path))
290
+ except:pass
291
+
292
+ def change_gpt_weights(gpt_path):
293
+ global hz, max_sec, t2s_model, config
294
+ hz = 50
295
+ dict_s1 = torch.load(gpt_path, map_location="cpu")
296
+ config = dict_s1["config"]
297
+ max_sec = config["data"]["max_sec"]
298
+ t2s_model = Text2SemanticLightningModule(config, "****", is_train=False)
299
+ t2s_model.load_state_dict(dict_s1["weight"])
300
+ if is_half == True:
301
+ t2s_model = t2s_model.half()
302
+ t2s_model = t2s_model.to(device)
303
+ t2s_model.eval()
304
+ # total = sum([param.nelement() for param in t2s_model.parameters()])
305
+ # print("Number of parameter: %.2fM" % (total / 1e6))
306
+ with open("./weight.json")as f:
307
+ data=f.read()
308
+ data=json.loads(data)
309
+ data["GPT"][version]=gpt_path
310
+ with open("./weight.json","w")as f:f.write(json.dumps(data))
311
+
312
+
313
+ change_gpt_weights(gpt_path)
314
+ os.environ["HF_ENDPOINT"] = "https://hf-mirror.com"
315
+ import torch,soundfile
316
+ now_dir = os.getcwd()
317
+ import soundfile
318
+
319
+ def init_bigvgan():
320
+ global model
321
+ from BigVGAN import bigvgan
322
+ model = bigvgan.BigVGAN.from_pretrained("%s/GPT_SoVITS/pretrained_models/models--nvidia--bigvgan_v2_24khz_100band_256x" % (now_dir,), use_cuda_kernel=False) # if True, RuntimeError: Ninja is required to load C++ extensions
323
+ # remove weight norm in the model and set to eval mode
324
+ model.remove_weight_norm()
325
+ model = model.eval()
326
+ if is_half == True:
327
+ model = model.half().to(device)
328
+ else:
329
+ model = model.to(device)
330
+
331
+ if model_version!="v3":model=None
332
+ else:init_bigvgan()
333
+
334
+
335
+ def get_spepc(hps, filename):
336
+ audio = load_audio(filename, int(hps.data.sampling_rate))
337
+ audio = torch.FloatTensor(audio)
338
+ maxx=audio.abs().max()
339
+ if(maxx>1):audio/=min(2,maxx)
340
+ audio_norm = audio
341
+ audio_norm = audio_norm.unsqueeze(0)
342
+ spec = spectrogram_torch(
343
+ audio_norm,
344
+ hps.data.filter_length,
345
+ hps.data.sampling_rate,
346
+ hps.data.hop_length,
347
+ hps.data.win_length,
348
+ center=False,
349
+ )
350
+ return spec
351
+
352
+ def clean_text_inf(text, language, version):
353
+ phones, word2ph, norm_text = clean_text(text, language, version)
354
+ phones = cleaned_text_to_sequence(phones, version)
355
+ return phones, word2ph, norm_text
356
+
357
+ dtype=torch.float16 if is_half == True else torch.float32
358
+ def get_bert_inf(phones, word2ph, norm_text, language):
359
+ language=language.replace("all_","")
360
+ if language == "zh":
361
+ bert = get_bert_feature(norm_text, word2ph).to(device)#.to(dtype)
362
+ else:
363
+ bert = torch.zeros(
364
+ (1024, len(phones)),
365
+ dtype=torch.float16 if is_half == True else torch.float32,
366
+ ).to(device)
367
+
368
+ return bert
369
+
370
+
371
+ splits = {",", "。", "?", "!", ",", ".", "?", "!", "~", ":", ":", "—", "…", }
372
+
373
+
374
+ def get_first(text):
375
+ pattern = "[" + "".join(re.escape(sep) for sep in splits) + "]"
376
+ text = re.split(pattern, text)[0].strip()
377
+ return text
378
+
379
+ from text import chinese
380
+ def get_phones_and_bert(text,language,version,final=False):
381
+ if language in {"en", "all_zh", "all_ja", "all_ko", "all_yue"}:
382
+ language = language.replace("all_","")
383
+ if language == "en":
384
+ formattext = text
385
+ else:
386
+ # 因无法区别中日韩文汉字,以用户输入为准
387
+ formattext = text
388
+ while " " in formattext:
389
+ formattext = formattext.replace(" ", " ")
390
+ if language == "zh":
391
+ if re.search(r'[A-Za-z]', formattext):
392
+ formattext = re.sub(r'[a-z]', lambda x: x.group(0).upper(), formattext)
393
+ formattext = chinese.mix_text_normalize(formattext)
394
+ return get_phones_and_bert(formattext,"zh",version)
395
+ else:
396
+ phones, word2ph, norm_text = clean_text_inf(formattext, language, version)
397
+ bert = get_bert_feature(norm_text, word2ph).to(device)
398
+ elif language == "yue" and re.search(r'[A-Za-z]', formattext):
399
+ formattext = re.sub(r'[a-z]', lambda x: x.group(0).upper(), formattext)
400
+ formattext = chinese.mix_text_normalize(formattext)
401
+ return get_phones_and_bert(formattext,"yue",version)
402
+ else:
403
+ phones, word2ph, norm_text = clean_text_inf(formattext, language, version)
404
+ bert = torch.zeros(
405
+ (1024, len(phones)),
406
+ dtype=torch.float16 if is_half == True else torch.float32,
407
+ ).to(device)
408
+ elif language in {"zh", "ja", "ko", "yue", "auto", "auto_yue"}:
409
+ textlist=[]
410
+ langlist=[]
411
+ if language == "auto":
412
+ for tmp in LangSegmenter.getTexts(text):
413
+ langlist.append(tmp["lang"])
414
+ textlist.append(tmp["text"])
415
+ elif language == "auto_yue":
416
+ for tmp in LangSegmenter.getTexts(text):
417
+ if tmp["lang"] == "zh":
418
+ tmp["lang"] = "yue"
419
+ langlist.append(tmp["lang"])
420
+ textlist.append(tmp["text"])
421
+ else:
422
+ for tmp in LangSegmenter.getTexts(text):
423
+ if tmp["lang"] == "en":
424
+ langlist.append(tmp["lang"])
425
+ else:
426
+ # 因无法区别中日韩文汉字,以用户输入为准
427
+ langlist.append(language)
428
+ textlist.append(tmp["text"])
429
+ print(textlist)
430
+ print(langlist)
431
+ phones_list = []
432
+ bert_list = []
433
+ norm_text_list = []
434
+ for i in range(len(textlist)):
435
+ lang = langlist[i]
436
+ phones, word2ph, norm_text = clean_text_inf(textlist[i], lang, version)
437
+ bert = get_bert_inf(phones, word2ph, norm_text, lang)
438
+ phones_list.append(phones)
439
+ norm_text_list.append(norm_text)
440
+ bert_list.append(bert)
441
+ bert = torch.cat(bert_list, dim=1)
442
+ phones = sum(phones_list, [])
443
+ norm_text = ''.join(norm_text_list)
444
+
445
+ if not final and len(phones) < 6:
446
+ return get_phones_and_bert("." + text,language,version,final=True)
447
+
448
+ return phones,bert.to(dtype),norm_text
449
+
450
+ from module.mel_processing import spectrogram_torch,spec_to_mel_torch
451
+ def mel_spectrogram(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False):
452
+ spec=spectrogram_torch(y,n_fft,sampling_rate,hop_size,win_size,center)
453
+ mel=spec_to_mel_torch(spec,n_fft,num_mels,sampling_rate,fmin,fmax)
454
+ return mel
455
+ mel_fn_args = {
456
+ "n_fft": 1024,
457
+ "win_size": 1024,
458
+ "hop_size": 256,
459
+ "num_mels": 100,
460
+ "sampling_rate": 24000,
461
+ "fmin": 0,
462
+ "fmax": None,
463
+ "center": False
464
+ }
465
+
466
+ spec_min = -12
467
+ spec_max = 2
468
+ def norm_spec(x):
469
+ return (x - spec_min) / (spec_max - spec_min) * 2 - 1
470
+ def denorm_spec(x):
471
+ return (x + 1) / 2 * (spec_max - spec_min) + spec_min
472
+ mel_fn=lambda x: mel_spectrogram(x, **mel_fn_args)
473
+
474
+
475
+ def merge_short_text_in_array(texts, threshold):
476
+ if (len(texts)) < 2:
477
+ return texts
478
+ result = []
479
+ text = ""
480
+ for ele in texts:
481
+ text += ele
482
+ if len(text) >= threshold:
483
+ result.append(text)
484
+ text = ""
485
+ if (len(text) > 0):
486
+ if len(result) == 0:
487
+ result.append(text)
488
+ else:
489
+ result[len(result) - 1] += text
490
+ return result
491
+
492
+ ##ref_wav_path+prompt_text+prompt_language+text(单个)+text_language+top_k+top_p+temperature
493
+ # cache_tokens={}#暂未实现清理机制
494
+ cache= {}
495
+ def get_tts_wav(ref_wav_path, prompt_text, prompt_language, text, text_language, how_to_cut=i18n("不切"), top_k=20, top_p=0.6, temperature=0.6, ref_free = False,speed=1,if_freeze=False,inp_refs=None,sample_steps=8):
496
+ global cache
497
+ if ref_wav_path:pass
498
+ else:gr.Warning(i18n('请上传参考音频'))
499
+ if text:pass
500
+ else:gr.Warning(i18n('请填入推理文本'))
501
+ t = []
502
+ if prompt_text is None or len(prompt_text) == 0:
503
+ ref_free = True
504
+ if model_version=="v3":ref_free=False#s2v3暂不支持ref_free
505
+ t0 = ttime()
506
+ prompt_language = dict_language[prompt_language]
507
+ text_language = dict_language[text_language]
508
+
509
+
510
+ if not ref_free:
511
+ prompt_text = prompt_text.strip("\n")
512
+ if (prompt_text[-1] not in splits): prompt_text += "。" if prompt_language != "en" else "."
513
+ print(i18n("实际输入的参考文本:"), prompt_text)
514
+ text = text.strip("\n")
515
+ # if (text[0] not in splits and len(get_first(text)) < 4): text = "。" + text if text_language != "en" else "." + text
516
+
517
+ print(i18n("实际输入的目标文本:"), text)
518
+ zero_wav = np.zeros(
519
+ int(hps.data.sampling_rate * 0.3),
520
+ dtype=np.float16 if is_half == True else np.float32,
521
+ )
522
+ if not ref_free:
523
+ with torch.no_grad():
524
+ wav16k, sr = librosa.load(ref_wav_path, sr=16000)
525
+ if (wav16k.shape[0] > 160000 or wav16k.shape[0] < 48000):
526
+ gr.Warning(i18n("参考音频在3~10秒范围外,请更换!"))
527
+ raise OSError(i18n("参考音频在3~10秒范围外,请更换!"))
528
+ wav16k = torch.from_numpy(wav16k)
529
+ zero_wav_torch = torch.from_numpy(zero_wav)
530
+ if is_half == True:
531
+ wav16k = wav16k.half().to(device)
532
+ zero_wav_torch = zero_wav_torch.half().to(device)
533
+ else:
534
+ wav16k = wav16k.to(device)
535
+ zero_wav_torch = zero_wav_torch.to(device)
536
+ wav16k = torch.cat([wav16k, zero_wav_torch])
537
+ ssl_content = ssl_model.model(wav16k.unsqueeze(0))[
538
+ "last_hidden_state"
539
+ ].transpose(
540
+ 1, 2
541
+ ) # .float()
542
+ codes = vq_model.extract_latent(ssl_content)
543
+ prompt_semantic = codes[0, 0]
544
+ prompt = prompt_semantic.unsqueeze(0).to(device)
545
+
546
+ t1 = ttime()
547
+ t.append(t1-t0)
548
+
549
+ if (how_to_cut == i18n("凑四句一切")):
550
+ text = cut1(text)
551
+ elif (how_to_cut == i18n("凑50字一切")):
552
+ text = cut2(text)
553
+ elif (how_to_cut == i18n("按中文句号。切")):
554
+ text = cut3(text)
555
+ elif (how_to_cut == i18n("按英文句号.切")):
556
+ text = cut4(text)
557
+ elif (how_to_cut == i18n("按标点符号切")):
558
+ text = cut5(text)
559
+ while "\n\n" in text:
560
+ text = text.replace("\n\n", "\n")
561
+ print(i18n("实际输入的目标文本(切句后):"), text)
562
+ texts = text.split("\n")
563
+ texts = process_text(texts)
564
+ texts = merge_short_text_in_array(texts, 5)
565
+ audio_opt = []
566
+ ###s2v3暂不支持ref_free
567
+ if not ref_free:
568
+ phones1,bert1,norm_text1=get_phones_and_bert(prompt_text, prompt_language, version)
569
+
570
+ for i_text,text in enumerate(texts):
571
+ # 解决输入目标文本的空行导致报错的问题
572
+ if (len(text.strip()) == 0):
573
+ continue
574
+ if (text[-1] not in splits): text += "。" if text_language != "en" else "."
575
+ print(i18n("实际输入的目标文本(每句):"), text)
576
+ phones2,bert2,norm_text2=get_phones_and_bert(text, text_language, version)
577
+ print(i18n("前端处理后的文本(每句):"), norm_text2)
578
+ if not ref_free:
579
+ bert = torch.cat([bert1, bert2], 1)
580
+ all_phoneme_ids = torch.LongTensor(phones1+phones2).to(device).unsqueeze(0)
581
+ else:
582
+ bert = bert2
583
+ all_phoneme_ids = torch.LongTensor(phones2).to(device).unsqueeze(0)
584
+
585
+ bert = bert.to(device).unsqueeze(0)
586
+ all_phoneme_len = torch.tensor([all_phoneme_ids.shape[-1]]).to(device)
587
+
588
+ t2 = ttime()
589
+ # cache_key="%s-%s-%s-%s-%s-%s-%s-%s"%(ref_wav_path,prompt_text,prompt_language,text,text_language,top_k,top_p,temperature)
590
+ # print(cache.keys(),if_freeze)
591
+ if(i_text in cache and if_freeze==True):pred_semantic=cache[i_text]
592
+ else:
593
+ with torch.no_grad():
594
+ pred_semantic, idx = t2s_model.model.infer_panel(
595
+ all_phoneme_ids,
596
+ all_phoneme_len,
597
+ None if ref_free else prompt,
598
+ bert,
599
+ # prompt_phone_len=ph_offset,
600
+ top_k=top_k,
601
+ top_p=top_p,
602
+ temperature=temperature,
603
+ early_stop_num=hz * max_sec,
604
+ )
605
+ pred_semantic = pred_semantic[:, -idx:].unsqueeze(0)
606
+ cache[i_text]=pred_semantic
607
+ t3 = ttime()
608
+ ###v3不存在以下逻辑和inp_refs
609
+ if model_version!="v3":
610
+ refers=[]
611
+ if(inp_refs):
612
+ for path in inp_refs:
613
+ try:
614
+ refer = get_spepc(hps, path.name).to(dtype).to(device)
615
+ refers.append(refer)
616
+ except:
617
+ traceback.print_exc()
618
+ if(len(refers)==0):refers = [get_spepc(hps, ref_wav_path).to(dtype).to(device)]
619
+ audio = (vq_model.decode(pred_semantic, torch.LongTensor(phones2).to(device).unsqueeze(0), refers,speed=speed).detach().cpu().numpy()[0, 0])
620
+ else:
621
+ refer = get_spepc(hps, ref_wav_path).to(device).to(dtype)#######这里要重采样切到32k,因为src是24k的,没有单独的32k的src,所以不能改成2个路径
622
+ phoneme_ids0=torch.LongTensor(phones1).to(device).unsqueeze(0)
623
+ phoneme_ids1=torch.LongTensor(phones2).to(device).unsqueeze(0)
624
+ fea_ref,ge = vq_model.decode_encp(prompt.unsqueeze(0), phoneme_ids0, refer)
625
+ ref_audio, sr = torchaudio.load(ref_wav_path)
626
+ ref_audio=ref_audio.to(device).float()
627
+ if (ref_audio.shape[0] == 2):
628
+ ref_audio = ref_audio.mean(0).unsqueeze(0)
629
+ if sr!=24000:
630
+ ref_audio=resample(ref_audio,sr)
631
+ mel2 = mel_fn(ref_audio.to(dtype))
632
+ mel2 = norm_spec(mel2)
633
+ T_min = min(mel2.shape[2], fea_ref.shape[2])
634
+ mel2 = mel2[:, :, :T_min]
635
+ fea_ref = fea_ref[:, :, :T_min]
636
+ if (T_min > 468):
637
+ mel2 = mel2[:, :, -468:]
638
+ fea_ref = fea_ref[:, :, -468:]
639
+ T_min = 468
640
+ chunk_len = 934 - T_min
641
+ fea_todo, ge = vq_model.decode_encp(pred_semantic, phoneme_ids1, refer, ge)
642
+ cfm_resss = []
643
+ idx = 0
644
+ while (1):
645
+ fea_todo_chunk = fea_todo[:, :, idx:idx + chunk_len]
646
+ if (fea_todo_chunk.shape[-1] == 0): break
647
+ idx += chunk_len
648
+ fea = torch.cat([fea_ref, fea_todo_chunk], 2).transpose(2, 1)
649
+ cfm_res = vq_model.cfm.inference(fea, torch.LongTensor([fea.size(1)]).to(fea.device), mel2, sample_steps, inference_cfg_rate=0)
650
+ cfm_res = cfm_res[:, :, mel2.shape[2]:]
651
+ mel2 = cfm_res[:, :, -T_min:]
652
+ fea_ref = fea_todo_chunk[:, :, -T_min:]
653
+ cfm_resss.append(cfm_res)
654
+ cmf_res = torch.cat(cfm_resss, 2)
655
+ cmf_res = denorm_spec(cmf_res)
656
+ if model==None:init_bigvgan()
657
+ with torch.inference_mode():
658
+ wav_gen = model(cmf_res)
659
+ audio=wav_gen[0][0].cpu().detach().numpy()
660
+ max_audio=np.abs(audio).max()#简单防止16bit爆音
661
+ if max_audio>1:audio/=max_audio
662
+ audio_opt.append(audio)
663
+ audio_opt.append(zero_wav)
664
+ t4 = ttime()
665
+ t.extend([t2 - t1,t3 - t2, t4 - t3])
666
+ t1 = ttime()
667
+ print("%.3f\t%.3f\t%.3f\t%.3f" %
668
+ (t[0], sum(t[1::3]), sum(t[2::3]), sum(t[3::3]))
669
+ )
670
+ sr=hps.data.sampling_rate if model_version!="v3"else 24000
671
+ yield sr, (np.concatenate(audio_opt, 0) * 32768).astype(np.int16)
672
+
673
+
674
+ def split(todo_text):
675
+ todo_text = todo_text.replace("……", "。").replace("——", ",")
676
+ if todo_text[-1] not in splits:
677
+ todo_text += "。"
678
+ i_split_head = i_split_tail = 0
679
+ len_text = len(todo_text)
680
+ todo_texts = []
681
+ while 1:
682
+ if i_split_head >= len_text:
683
+ break # 结尾一定有标点,所以直接跳出即可,最后一段在上次已加入
684
+ if todo_text[i_split_head] in splits:
685
+ i_split_head += 1
686
+ todo_texts.append(todo_text[i_split_tail:i_split_head])
687
+ i_split_tail = i_split_head
688
+ else:
689
+ i_split_head += 1
690
+ return todo_texts
691
+
692
+
693
+ def cut1(inp):
694
+ inp = inp.strip("\n")
695
+ inps = split(inp)
696
+ split_idx = list(range(0, len(inps), 4))
697
+ split_idx[-1] = None
698
+ if len(split_idx) > 1:
699
+ opts = []
700
+ for idx in range(len(split_idx) - 1):
701
+ opts.append("".join(inps[split_idx[idx]: split_idx[idx + 1]]))
702
+ else:
703
+ opts = [inp]
704
+ opts = [item for item in opts if not set(item).issubset(punctuation)]
705
+ return "\n".join(opts)
706
+
707
+
708
+ def cut2(inp):
709
+ inp = inp.strip("\n")
710
+ inps = split(inp)
711
+ if len(inps) < 2:
712
+ return inp
713
+ opts = []
714
+ summ = 0
715
+ tmp_str = ""
716
+ for i in range(len(inps)):
717
+ summ += len(inps[i])
718
+ tmp_str += inps[i]
719
+ if summ > 50:
720
+ summ = 0
721
+ opts.append(tmp_str)
722
+ tmp_str = ""
723
+ if tmp_str != "":
724
+ opts.append(tmp_str)
725
+ # print(opts)
726
+ if len(opts) > 1 and len(opts[-1]) < 50: ##如果最后一个太短了,和前一个合一起
727
+ opts[-2] = opts[-2] + opts[-1]
728
+ opts = opts[:-1]
729
+ opts = [item for item in opts if not set(item).issubset(punctuation)]
730
+ return "\n".join(opts)
731
+
732
+
733
+ def cut3(inp):
734
+ inp = inp.strip("\n")
735
+ opts = ["%s" % item for item in inp.strip("。").split("。")]
736
+ opts = [item for item in opts if not set(item).issubset(punctuation)]
737
+ return "\n".join(opts)
738
+
739
+ def cut4(inp):
740
+ inp = inp.strip("\n")
741
+ opts = ["%s" % item for item in inp.strip(".").split(".")]
742
+ opts = [item for item in opts if not set(item).issubset(punctuation)]
743
+ return "\n".join(opts)
744
+
745
+
746
+ # contributed by https://github.com/AI-Hobbyist/GPT-SoVITS/blob/main/GPT_SoVITS/inference_webui.py
747
+ def cut5(inp):
748
+ inp = inp.strip("\n")
749
+ punds = {',', '.', ';', '?', '!', '、', ',', '。', '?', '!', ';', ':', '…'}
750
+ mergeitems = []
751
+ items = []
752
+
753
+ for i, char in enumerate(inp):
754
+ if char in punds:
755
+ if char == '.' and i > 0 and i < len(inp) - 1 and inp[i - 1].isdigit() and inp[i + 1].isdigit():
756
+ items.append(char)
757
+ else:
758
+ items.append(char)
759
+ mergeitems.append("".join(items))
760
+ items = []
761
+ else:
762
+ items.append(char)
763
+
764
+ if items:
765
+ mergeitems.append("".join(items))
766
+
767
+ opt = [item for item in mergeitems if not set(item).issubset(punds)]
768
+ return "\n".join(opt)
769
+
770
+
771
+ def custom_sort_key(s):
772
+ # 使用正则表达式提取字符串中的数字部分和非数字部分
773
+ parts = re.split('(\d+)', s)
774
+ # 将数字部分转换为整数,非数字部分保持不变
775
+ parts = [int(part) if part.isdigit() else part for part in parts]
776
+ return parts
777
+
778
+ def process_text(texts):
779
+ _text=[]
780
+ if all(text in [None, " ", "\n",""] for text in texts):
781
+ raise ValueError(i18n("请输入有效文本"))
782
+ for text in texts:
783
+ if text in [None, " ", ""]:
784
+ pass
785
+ else:
786
+ _text.append(text)
787
+ return _text
788
+
789
+
790
+ def change_choices():
791
+ SoVITS_names, GPT_names = get_weights_names(GPT_weight_root, SoVITS_weight_root)
792
+ return {"choices": sorted(SoVITS_names, key=custom_sort_key), "__type__": "update"}, {"choices": sorted(GPT_names, key=custom_sort_key), "__type__": "update"}
793
+
794
+
795
+ SoVITS_weight_root=["SoVITS_weights","SoVITS_weights_v2","SoVITS_weights_v3"]
796
+ GPT_weight_root=["GPT_weights","GPT_weights_v2","GPT_weights_v3"]
797
+ for path in SoVITS_weight_root+GPT_weight_root:
798
+ os.makedirs(path,exist_ok=True)
799
+
800
+
801
+ def get_weights_names(GPT_weight_root, SoVITS_weight_root):
802
+ SoVITS_names = [i for i in pretrained_sovits_name]
803
+ for path in SoVITS_weight_root:
804
+ for name in os.listdir(path):
805
+ if name.endswith(".pth"): SoVITS_names.append("%s/%s" % (path, name))
806
+ GPT_names = [i for i in pretrained_gpt_name]
807
+ for path in GPT_weight_root:
808
+ for name in os.listdir(path):
809
+ if name.endswith(".ckpt"): GPT_names.append("%s/%s" % (path, name))
810
+ return SoVITS_names, GPT_names
811
+
812
+
813
+ SoVITS_names, GPT_names = get_weights_names(GPT_weight_root, SoVITS_weight_root)
814
+
815
+ def html_center(text, label='p'):
816
+ return f"""<div style="text-align: center; margin: 100; padding: 50;">
817
+ <{label} style="margin: 0; padding: 0;">{text}</{label}>
818
+ </div>"""
819
+
820
+ def html_left(text, label='p'):
821
+ return f"""<div style="text-align: left; margin: 0; padding: 0;">
822
+ <{label} style="margin: 0; padding: 0;">{text}</{label}>
823
+ </div>"""
824
+
825
+ @torch.no_grad()
826
+ def get_code_from_ssl(ssl):
827
+ ssl = vq_model.ssl_proj(ssl)
828
+ quantized, codes, commit_loss, quantized_list = vq_model.quantizer(ssl)
829
+ # print(codes.shape, codes.dtype) # [n_q, B, T]
830
+ return codes.transpose(0, 1) # [B, n_q, T]
831
+
832
+
833
+ @torch.no_grad()
834
+ def get_code_from_wav(wav_path):
835
+ wav16k, sr = librosa.load(wav_path, sr=16000)
836
+ wav16k = torch.from_numpy(wav16k)
837
+ if is_half == True:
838
+ wav16k = wav16k.half().to(device)
839
+ else:
840
+ wav16k = wav16k.to(device)
841
+ ssl_content = ssl_model.model(wav16k.unsqueeze(0))[
842
+ "last_hidden_state"
843
+ ].transpose(
844
+ 1, 2
845
+ ) # .float()
846
+ codes = get_code_from_ssl(ssl_content) # [B, n_q, T]
847
+
848
+ prompt_semantic = codes[0, 0]
849
+ return prompt_semantic
850
+
851
+
852
+ def vc_main(wav_path, text, language, prompt_wav, noise_scale=0.5, top_k=20, top_p=0.6, temperature=0.6, speed=1, sample_steps=8):
853
+ """
854
+ Voice Conversion function that supports both v2 and v3 model versions
855
+
856
+ Args:
857
+ wav_path: Path to source audio for conversion
858
+ text: Corresponding text for phoneme extraction
859
+ language: Language of the text
860
+ prompt_wav: Path to target/reference voice
861
+ noise_scale: Noise scale for v2 models
862
+ top_k, top_p, temperature: Parameters for v3 models
863
+ speed: Speed factor for audio playback
864
+ sample_steps: Number of sample steps for v3 models
865
+
866
+ Returns:
867
+ Sampling rate and converted audio
868
+ """
869
+ # Get language format
870
+ language = dict_language[language]
871
+
872
+ # Get phones from text
873
+ phones, word2ph, norm_text = clean_text_inf(text, language, version)
874
+
875
+ # Get reference audio spectrogram
876
+ refer = get_spepc(hps, prompt_wav).to(dtype).to(device)
877
+
878
+ # Get codes from source audio
879
+ source_codes = get_code_from_wav(wav_path)
880
+
881
+ if model_version != "v3":
882
+ # V1/V2 models voice conversion logic
883
+ ge = vq_model.ref_enc(refer) # [B, D, T/1]
884
+ quantized = vq_model.quantizer.decode(source_codes[None, None]) # [B, D, T]
885
+
886
+ # Interpolate if necessary for 25hz models
887
+ if hps.model.semantic_frame_rate == "25hz":
888
+ quantized = F.interpolate(
889
+ quantized, size=int(quantized.shape[-1] * 2), mode="nearest"
890
+ )
891
+
892
+ m_p, logs_p, y_mask = vq_model.enc_p(
893
+ quantized,
894
+ torch.LongTensor([quantized.shape[-1]]).to(device),
895
+ torch.LongTensor(phones).to(device).unsqueeze(0),
896
+ torch.LongTensor([len(phones)]).to(device),
897
+ ge
898
+ )
899
+
900
+ z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale
901
+ z = vq_model.flow(z_p, y_mask, g=ge, reverse=True)
902
+ o = vq_model.dec((z * y_mask)[:, :, :], g=ge) # [B, D=1, T], torch.float32 (-1, 1)
903
+ audio = o.detach().cpu().numpy()[0, 0]
904
+
905
+ else:
906
+ # V3 model voice conversion logic
907
+ if model is None:
908
+ init_bigvgan()
909
+
910
+ # Handle 1D tensor case (the source_codes from get_code_from_wav is 1D)
911
+ # The shape of source_codes is [T] - just the sequence length
912
+ if source_codes.dim() == 1: # If [T]
913
+ # For v3 models, we need to reshape to [B, T, D]
914
+ # We need to determine the feature dimension D
915
+ # From the error message, we can see the tensor has shape [225]
916
+ # This is likely just the sequence length, and we need to add feature dimension
917
+
918
+ # First, reshape to [1, T, 1] - adding batch and feature dimensions
919
+ semantic = source_codes.unsqueeze(0).unsqueeze(-1)
920
+
921
+ # The feature dimension may need to be expanded to match what the model expects
922
+ # This depends on the model architecture - let's try using the same dimension as SSL features
923
+ if hasattr(vq_model, 'ssl_dim'):
924
+ feature_dim = vq_model.ssl_dim
925
+ else:
926
+ # If we can't determine it, use a default value that seems reasonable
927
+ # For v3 models, this is often 768 (BERT/HuBERT hidden size)
928
+ feature_dim = 768
929
+
930
+ # Expand the feature dimension to match expected size
931
+ semantic = semantic.expand(-1, -1, feature_dim)
932
+
933
+ elif source_codes.dim() == 2: # If [T, D]
934
+ semantic = source_codes.unsqueeze(0) # Add batch dimension [1, T, D]
935
+ elif source_codes.dim() == 3: # If [B, T, D]
936
+ semantic = source_codes
937
+ else:
938
+ # For any other unexpected shape
939
+ raise ValueError(f"Unexpected source_codes shape: {source_codes.shape}")
940
+
941
+ # Prepare phoneme IDs
942
+ phoneme_ids = torch.LongTensor(phones).to(device).unsqueeze(0)
943
+
944
+ # Get reference audio features and global embedding
945
+ fea_ref, ge = vq_model.decode_encp(semantic, phoneme_ids, refer)
946
+
947
+ # Load and process reference audio
948
+ ref_audio, sr = torchaudio.load(prompt_wav)
949
+ ref_audio = ref_audio.to(device).float()
950
+ if ref_audio.shape[0] == 2: # Convert stereo to mono
951
+ ref_audio = ref_audio.mean(0).unsqueeze(0)
952
+ if sr != 24000:
953
+ ref_audio = resample(ref_audio, sr)
954
+
955
+ # Convert to mel spectrogram and normalize
956
+ mel2 = mel_fn(ref_audio.to(dtype))
957
+ mel2 = norm_spec(mel2)
958
+
959
+ # Adjust time dimensions
960
+ T_min = min(mel2.shape[2], fea_ref.shape[2])
961
+ mel2 = mel2[:, :, :T_min]
962
+ fea_ref = fea_ref[:, :, :T_min]
963
+
964
+ if T_min > 468:
965
+ mel2 = mel2[:, :, -468:]
966
+ fea_ref = fea_ref[:, :, -468:]
967
+ T_min = 468
968
+
969
+ # Process source audio features with phoneme conditioning
970
+ fea_todo, ge = vq_model.decode_encp(semantic, phoneme_ids, refer, ge)
971
+
972
+ # Process audio in chunks
973
+ chunk_len = 934 - T_min
974
+ cfm_resss = []
975
+ idx = 0
976
+
977
+ while True:
978
+ fea_todo_chunk = fea_todo[:, :, idx:idx + chunk_len]
979
+ if fea_todo_chunk.shape[-1] == 0:
980
+ break
981
+
982
+ idx += chunk_len
983
+ fea = torch.cat([fea_ref, fea_todo_chunk], 2).transpose(2, 1)
984
+ cfm_res = vq_model.cfm.inference(
985
+ fea,
986
+ torch.LongTensor([fea.size(1)]).to(fea.device),
987
+ mel2,
988
+ sample_steps,
989
+ inference_cfg_rate=0
990
+ )
991
+
992
+ cfm_res = cfm_res[:, :, mel2.shape[2]:]
993
+ mel2 = cfm_res[:, :, -T_min:]
994
+ fea_ref = fea_todo_chunk[:, :, -T_min:]
995
+ cfm_resss.append(cfm_res)
996
+
997
+ # Concatenate results and convert to audio
998
+ cmf_res = torch.cat(cfm_resss, 2)
999
+ cmf_res = denorm_spec(cmf_res)
1000
+
1001
+ with torch.inference_mode():
1002
+ wav_gen = model(cmf_res)
1003
+ audio = wav_gen[0][0].cpu().detach().numpy()
1004
+
1005
+ # Normalize audio to prevent clipping
1006
+ max_audio = np.abs(audio).max()
1007
+ if max_audio > 1:
1008
+ audio /= max_audio
1009
+
1010
+ sr = hps.data.sampling_rate if model_version != "v3" else 24000
1011
+ return sr, (audio * 32768).astype(np.int16)
1012
+
1013
+ # Create and launch the standalone Gradio interface for voice conversion
1014
+ def launch_vc_ui():
1015
+ with gr.Blocks(title="GPT-SoVITS Voice Conversion") as vc_app:
1016
+ gr.Markdown("# GPT-SoVITS Voice Conversion")
1017
+ gr.Markdown(f"Current Model Version: {model_version}")
1018
+
1019
+ with gr.Row():
1020
+ with gr.Column():
1021
+ source_audio = gr.Audio(type="filepath", label="Source Audio (to be converted)")
1022
+ text_input = gr.Textbox(label="Text content of the source audio")
1023
+ language_input = gr.Dropdown(
1024
+ choices=list(dict_language.keys()),
1025
+ value=i18n("中文"),
1026
+ label=i18n("语言 / Language")
1027
+ )
1028
+ target_audio = gr.Audio(type="filepath", label="Target Voice (reference)")
1029
+
1030
+ with gr.Accordion("Advanced Settings", open=False):
1031
+ with gr.Row():
1032
+ speed = gr.Slider(
1033
+ minimum=0.1, maximum=5, value=1, step=0.1,
1034
+ label=i18n("语速 / Speed")
1035
+ )
1036
+
1037
+ if model_version != "v3":
1038
+ noise_scale = gr.Slider(
1039
+ minimum=0.1, maximum=1.0, value=0.5, step=0.1,
1040
+ label="Noise Scale (V2 models only)"
1041
+ )
1042
+ else:
1043
+ noise_scale = gr.Slider(
1044
+ minimum=0.1, maximum=1.0, value=0.5, step=0.1,
1045
+ label="Noise Scale (ignored for V3)",
1046
+ visible=False
1047
+ )
1048
+
1049
+ if model_version == "v3":
1050
+ sample_steps = gr.Slider(
1051
+ minimum=1, maximum=30, value=8, step=1,
1052
+ label=i18n("采样步数 / Sample Steps")
1053
+ )
1054
+ top_k = gr.Slider(
1055
+ minimum=1, maximum=100, value=20, step=1,
1056
+ label=i18n("Top K")
1057
+ )
1058
+ top_p = gr.Slider(
1059
+ minimum=0.1, maximum=1.0, value=0.6, step=0.1,
1060
+ label=i18n("Top P")
1061
+ )
1062
+ temperature = gr.Slider(
1063
+ minimum=0.1, maximum=1.0, value=0.6, step=0.1,
1064
+ label=i18n("Temperature")
1065
+ )
1066
+ else:
1067
+ sample_steps = gr.Slider(
1068
+ minimum=1, maximum=30, value=8, step=1,
1069
+ label=i18n("采样步数 / Sample Steps"),
1070
+ visible=False
1071
+ )
1072
+ top_k = gr.Slider(
1073
+ minimum=1, maximum=100, value=20, step=1,
1074
+ label=i18n("Top K"),
1075
+ visible=False
1076
+ )
1077
+ top_p = gr.Slider(
1078
+ minimum=0.1, maximum=1.0, value=0.6, step=0.1,
1079
+ label=i18n("Top P"),
1080
+ visible=False
1081
+ )
1082
+ temperature = gr.Slider(
1083
+ minimum=0.1, maximum=1.0, value=0.6, step=0.1,
1084
+ label=i18n("Temperature"),
1085
+ visible=False
1086
+ )
1087
+
1088
+ go_btn = gr.Button(i18n("开始转换 / Start Conversion"), variant="primary")
1089
+
1090
+ with gr.Column():
1091
+ output_audio = gr.Audio(label=i18n("转换后的声音 / Converted Audio"))
1092
+ status_output = gr.Markdown("Ready")
1093
+
1094
+ def process_vc(source_path, text, lang, target_path, noise, k, p, temp, spd, steps):
1095
+ try:
1096
+ if not source_path:
1097
+ return None, "Error: Source audio is required"
1098
+ if not target_path:
1099
+ return None, "Error: Target audio is required"
1100
+ if not text:
1101
+ return None, "Error: Text content is required"
1102
+
1103
+ return vc_main(
1104
+ source_path, text, lang, target_path,
1105
+ noise_scale=noise,
1106
+ top_k=k,
1107
+ top_p=p,
1108
+ temperature=temp,
1109
+ speed=spd,
1110
+ sample_steps=steps
1111
+ ), "Conversion completed successfully"
1112
+ except Exception as e:
1113
+ import traceback
1114
+ return None, f"Error: {str(e)}\n{traceback.format_exc()}"
1115
+
1116
+ go_btn.click(
1117
+ fn=process_vc,
1118
+ inputs=[
1119
+ source_audio, text_input, language_input, target_audio,
1120
+ noise_scale, top_k, top_p, temperature, speed, sample_steps
1121
+ ],
1122
+ outputs=[output_audio, status_output]
1123
+ )
1124
+
1125
+ # Launch the app with the infer_ttswebui port + 1 to avoid conflicts
1126
+ vc_app.launch(
1127
+ share=True,
1128
+ )
1129
+
1130
+ if __name__ == "__main__":
1131
+ print(f"Launching Voice Conversion UI with model version: {model_version}")
1132
+ launch_vc_ui()