r3gm commited on
Commit
a59af46
·
verified ·
1 Parent(s): 4ebecc1

Update src/main.py

Browse files
Files changed (1) hide show
  1. src/main.py +450 -450
src/main.py CHANGED
@@ -1,451 +1,451 @@
1
- import spaces
2
- import torch
3
- import argparse
4
- import gc
5
- import hashlib
6
- import json
7
- import os
8
- import shlex
9
- import subprocess
10
- from contextlib import suppress
11
- from urllib.parse import urlparse, parse_qs
12
-
13
- import gradio as gr
14
- import librosa
15
- import numpy as np
16
- import soundfile as sf
17
- import sox
18
- import yt_dlp
19
- from pedalboard import Pedalboard, Reverb, Compressor, HighpassFilter
20
- from pedalboard.io import AudioFile
21
- from pydub import AudioSegment
22
-
23
- from mdx import run_mdx
24
- from rvc import Config, load_hubert, get_vc, rvc_infer
25
- import logging
26
-
27
- logging.getLogger("httpx").setLevel(logging.WARNING)
28
-
29
- BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
30
-
31
- mdxnet_models_dir = os.path.join(BASE_DIR, 'mdxnet_models')
32
- rvc_models_dir = os.path.join(BASE_DIR, 'rvc_models')
33
- output_dir = os.path.join(BASE_DIR, 'song_output')
34
-
35
-
36
- def get_youtube_video_id(url, ignore_playlist=True):
37
- """
38
- Examples:
39
- http://youtu.be/SA2iWivDJiE
40
- http://www.youtube.com/watch?v=_oPAwA_Udwc&feature=feedu
41
- http://www.youtube.com/embed/SA2iWivDJiE
42
- http://www.youtube.com/v/SA2iWivDJiE?version=3&hl=en_US
43
- """
44
- if "m.youtube.com" in url:
45
- url = url.replace("m.youtube.com", "www.youtube.com")
46
- query = urlparse(url)
47
- if query.hostname == 'youtu.be':
48
- if query.path[1:] == 'watch':
49
- return query.query[2:]
50
- return query.path[1:]
51
-
52
- if query.hostname in {'www.youtube.com', 'youtube.com', 'music.youtube.com'}:
53
- if not ignore_playlist:
54
- # use case: get playlist id not current video in playlist
55
- with suppress(KeyError):
56
- return parse_qs(query.query)['list'][0]
57
- if query.path == '/watch':
58
- return parse_qs(query.query)['v'][0]
59
- if query.path[:7] == '/watch/':
60
- return query.path.split('/')[1]
61
- if query.path[:7] == '/embed/':
62
- return query.path.split('/')[2]
63
- if query.path[:3] == '/v/':
64
- return query.path.split('/')[2]
65
-
66
- # returns None for invalid YouTube url
67
- return None
68
-
69
-
70
- def yt_download(link):
71
- ydl_opts = {
72
- 'format': 'bestaudio',
73
- 'outtmpl': '%(title)s',
74
- 'nocheckcertificate': True,
75
- 'ignoreerrors': True,
76
- 'no_warnings': True,
77
- 'quiet': True,
78
- 'extractaudio': True,
79
- 'postprocessors': [{'key': 'FFmpegExtractAudio', 'preferredcodec': 'mp3'}],
80
- }
81
- with yt_dlp.YoutubeDL(ydl_opts) as ydl:
82
- result = ydl.extract_info(link, download=True)
83
- download_path = ydl.prepare_filename(result, outtmpl='%(title)s.mp3')
84
-
85
- return download_path
86
-
87
-
88
- def raise_exception(error_msg, is_webui):
89
- if is_webui:
90
- raise gr.Error(error_msg)
91
- else:
92
- raise Exception(error_msg)
93
-
94
-
95
- def get_rvc_model(voice_model, is_webui):
96
- rvc_model_filename, rvc_index_filename = None, None
97
- model_dir = os.path.join(rvc_models_dir, voice_model)
98
- print(model_dir)
99
- for file in os.listdir(model_dir):
100
- print(file)
101
- if os.path.isdir(file):
102
- for ff in os.listdir(file):
103
- print("subfile", ff)
104
- ext = os.path.splitext(ff)[1]
105
- if ext == '.pth':
106
- rvc_model_filename = ff
107
- if ext == '.index':
108
- rvc_index_filename = ff
109
- ext = os.path.splitext(file)[1]
110
- if ext == '.pth':
111
- rvc_model_filename = file
112
- if ext == '.index':
113
- rvc_index_filename = file
114
-
115
- if rvc_model_filename is None:
116
- error_msg = f'No model file exists in {model_dir}.'
117
- raise_exception(error_msg, is_webui)
118
-
119
- return os.path.join(model_dir, rvc_model_filename), os.path.join(model_dir, rvc_index_filename) if rvc_index_filename else ''
120
-
121
-
122
- def get_audio_paths(song_dir):
123
- orig_song_path = None
124
- instrumentals_path = None
125
- main_vocals_dereverb_path = None
126
- backup_vocals_path = None
127
-
128
- for file in os.listdir(song_dir):
129
- if file.endswith('_Instrumental.wav'):
130
- instrumentals_path = os.path.join(song_dir, file)
131
- orig_song_path = instrumentals_path.replace('_Instrumental', '')
132
-
133
- elif file.endswith('_Vocals_Main_DeReverb.wav'):
134
- main_vocals_dereverb_path = os.path.join(song_dir, file)
135
-
136
- elif file.endswith('_Vocals_Backup.wav'):
137
- backup_vocals_path = os.path.join(song_dir, file)
138
-
139
- return orig_song_path, instrumentals_path, main_vocals_dereverb_path, backup_vocals_path
140
-
141
-
142
- def convert_to_stereo(audio_path):
143
- wave, sr = librosa.load(audio_path, mono=False, sr=44100)
144
-
145
- # check if mono
146
- if type(wave[0]) != np.ndarray:
147
- stereo_path = f'{os.path.splitext(audio_path)[0]}_stereo.wav'
148
- command = shlex.split(f'ffmpeg -y -loglevel error -i "{audio_path}" -ac 2 -f wav "{stereo_path}"')
149
- subprocess.run(command)
150
- return stereo_path
151
- else:
152
- return audio_path
153
-
154
-
155
- def pitch_shift(audio_path, pitch_change):
156
- output_path = f'{os.path.splitext(audio_path)[0]}_p{pitch_change}.wav'
157
- if not os.path.exists(output_path):
158
- y, sr = sf.read(audio_path)
159
- tfm = sox.Transformer()
160
- tfm.pitch(pitch_change)
161
- y_shifted = tfm.build_array(input_array=y, sample_rate_in=sr)
162
- sf.write(output_path, y_shifted, sr)
163
-
164
- return output_path
165
-
166
-
167
- def get_hash(filepath):
168
- with open(filepath, 'rb') as f:
169
- file_hash = hashlib.blake2b()
170
- while chunk := f.read(8192):
171
- file_hash.update(chunk)
172
-
173
- return file_hash.hexdigest()[:11]
174
-
175
-
176
- def display_progress(message, percent, is_webui, progress=None):
177
- if is_webui:
178
- progress(percent, desc=message)
179
- else:
180
- print(message)
181
-
182
-
183
- def preprocess_song(song_input, mdx_model_params, song_id, is_webui, input_type, progress, keep_orig, orig_song_path):
184
-
185
- song_output_dir = os.path.join(output_dir, song_id)
186
-
187
- display_progress('[~] Separating Vocals from Instrumental...', 0.1, is_webui, progress)
188
- vocals_path, instrumentals_path = run_mdx(mdx_model_params, song_output_dir, os.path.join(mdxnet_models_dir, 'UVR-MDX-NET-Voc_FT.onnx'), orig_song_path, denoise=True, keep_orig=keep_orig)
189
-
190
- display_progress('[~] Separating Main Vocals from Backup Vocals...', 0.2, is_webui, progress)
191
- backup_vocals_path, main_vocals_path = run_mdx(mdx_model_params, song_output_dir, os.path.join(mdxnet_models_dir, 'UVR_MDXNET_KARA_2.onnx'), vocals_path, suffix='Backup', invert_suffix='Main', denoise=True)
192
-
193
- display_progress('[~] Applying DeReverb to Vocals...', 0.3, is_webui, progress)
194
- _, main_vocals_dereverb_path = run_mdx(mdx_model_params, song_output_dir, os.path.join(mdxnet_models_dir, 'Reverb_HQ_By_FoxJoy.onnx'), main_vocals_path, invert_suffix='DeReverb', exclude_main=True, denoise=True)
195
-
196
- return orig_song_path, vocals_path, instrumentals_path, main_vocals_path, backup_vocals_path, main_vocals_dereverb_path
197
-
198
-
199
- def get_audio_file(song_input, is_webui, input_type, progress):
200
- keep_orig = False
201
- if input_type == 'yt':
202
- display_progress('[~] Downloading song...', 0, is_webui, progress)
203
- song_link = song_input.split('&')[0]
204
- orig_song_path = yt_download(song_link)
205
- elif input_type == 'local':
206
- orig_song_path = song_input
207
- keep_orig = True
208
- else:
209
- orig_song_path = None
210
- return keep_orig, orig_song_path
211
-
212
- device = "cuda:0" if torch.cuda.is_available() else "cpu"
213
- compute_half = True if torch.cuda.is_available() else False
214
- config = Config(device, compute_half)
215
- hubert_model = load_hubert("cuda", config.is_half, os.path.join(rvc_models_dir, 'hubert_base.pt'))
216
- print(device, "half>>", config.is_half)
217
-
218
- # @spaces.GPU(enable_queue=True)
219
- def voice_change(voice_model, vocals_path, output_path, pitch_change, f0_method, index_rate, filter_radius, rms_mix_rate, protect, crepe_hop_length, is_webui):
220
- rvc_model_path, rvc_index_path = get_rvc_model(voice_model, is_webui)
221
-
222
- device = "cuda:0" if torch.cuda.is_available() else "cpu"
223
- compute_half = True if torch.cuda.is_available() else False
224
- config = Config(device, compute_half)
225
-
226
- cpt, version, net_g, tgt_sr, vc = get_vc(device, config.is_half, config, rvc_model_path)
227
-
228
- # convert main vocals
229
- global hubert_model
230
- rvc_infer(rvc_index_path, index_rate, vocals_path, output_path, pitch_change, f0_method, cpt, version, net_g, filter_radius, tgt_sr, rms_mix_rate, protect, crepe_hop_length, vc, hubert_model)
231
- del hubert_model, cpt
232
- gc.collect()
233
-
234
-
235
- def add_audio_effects(audio_path, reverb_rm_size, reverb_wet, reverb_dry, reverb_damping):
236
- output_path = f'{os.path.splitext(audio_path)[0]}_mixed.wav'
237
-
238
- # Initialize audio effects plugins
239
- board = Pedalboard(
240
- [
241
- HighpassFilter(),
242
- Compressor(ratio=4, threshold_db=-15),
243
- Reverb(room_size=reverb_rm_size, dry_level=reverb_dry, wet_level=reverb_wet, damping=reverb_damping)
244
- ]
245
- )
246
-
247
- with AudioFile(audio_path) as f:
248
- with AudioFile(output_path, 'w', f.samplerate, f.num_channels) as o:
249
- # Read one second of audio at a time, until the file is empty:
250
- while f.tell() < f.frames:
251
- chunk = f.read(int(f.samplerate))
252
- effected = board(chunk, f.samplerate, reset=False)
253
- o.write(effected)
254
-
255
- return output_path
256
-
257
-
258
- def combine_audio(audio_paths, output_path, main_gain, backup_gain, inst_gain, output_format):
259
- main_vocal_audio = AudioSegment.from_wav(audio_paths[0]) - 4 + main_gain
260
- backup_vocal_audio = AudioSegment.from_wav(audio_paths[1]) - 6 + backup_gain
261
- instrumental_audio = AudioSegment.from_wav(audio_paths[2]) - 7 + inst_gain
262
- main_vocal_audio.overlay(backup_vocal_audio).overlay(instrumental_audio).export(output_path, format=output_format)
263
-
264
-
265
- # @spaces.GPU(enable_queue=True, duration=130)
266
- @spaces.GPU(duration=45)
267
- def process_song(
268
- song_dir, song_input, mdx_model_params, song_id, is_webui, input_type, progress,
269
- keep_files, pitch_change, pitch_change_all, voice_model, index_rate, filter_radius,
270
- rms_mix_rate, protect, f0_method, crepe_hop_length, output_format, keep_orig, orig_song_path
271
- ):
272
-
273
- if not os.path.exists(song_dir):
274
- os.makedirs(song_dir)
275
- orig_song_path, vocals_path, instrumentals_path, main_vocals_path, backup_vocals_path, main_vocals_dereverb_path = preprocess_song(song_input, mdx_model_params, song_id, is_webui, input_type, progress, keep_orig, orig_song_path)
276
- else:
277
- vocals_path, main_vocals_path = None, None
278
- paths = get_audio_paths(song_dir)
279
-
280
- # if any of the audio files aren't available or keep intermediate files, rerun preprocess
281
- if any(path is None for path in paths) or keep_files:
282
- orig_song_path, vocals_path, instrumentals_path, main_vocals_path, backup_vocals_path, main_vocals_dereverb_path = preprocess_song(song_input, mdx_model_params, song_id, is_webui, input_type, progress, keep_orig, orig_song_path)
283
- else:
284
- orig_song_path, instrumentals_path, main_vocals_dereverb_path, backup_vocals_path = paths
285
-
286
- pitch_change = pitch_change * 12 + pitch_change_all
287
- ai_vocals_path = os.path.join(song_dir, f'{os.path.splitext(os.path.basename(orig_song_path))[0]}_{voice_model}_p{pitch_change}_i{index_rate}_fr{filter_radius}_rms{rms_mix_rate}_pro{protect}_{f0_method}{"" if f0_method != "mangio-crepe" else f"_{crepe_hop_length}"}.wav')
288
- ai_cover_path = os.path.join(song_dir, f'{os.path.splitext(os.path.basename(orig_song_path))[0]} ({voice_model} Ver).{output_format}')
289
-
290
- if not os.path.exists(ai_vocals_path):
291
- display_progress('[~] Converting voice using RVC...', 0.5, is_webui, progress)
292
- voice_change(voice_model, main_vocals_dereverb_path, ai_vocals_path, pitch_change, f0_method, index_rate, filter_radius, rms_mix_rate, protect, crepe_hop_length, is_webui)
293
-
294
- return ai_vocals_path, ai_cover_path, instrumentals_path, backup_vocals_path, vocals_path, main_vocals_path
295
-
296
- # process_song.zerogpu = True
297
-
298
- # @spaces.GPU(duration=140)
299
- def song_cover_pipeline(song_input, voice_model, pitch_change, keep_files,
300
- is_webui=0, main_gain=0, backup_gain=0, inst_gain=0, index_rate=0.5, filter_radius=3,
301
- rms_mix_rate=0.25, f0_method='rmvpe', crepe_hop_length=128, protect=0.33, pitch_change_all=0,
302
- reverb_rm_size=0.15, reverb_wet=0.2, reverb_dry=0.8, reverb_damping=0.7, output_format='mp3',
303
- progress=gr.Progress()):
304
- try:
305
- if not song_input or not voice_model:
306
- raise_exception('Ensure that the song input field and voice model field is filled.', is_webui)
307
-
308
- display_progress('[~] Starting AI Cover Generation Pipeline...', 0, is_webui, progress)
309
-
310
- with open(os.path.join(mdxnet_models_dir, 'model_data.json')) as infile:
311
- mdx_model_params = json.load(infile)
312
-
313
- # if youtube url
314
- if urlparse(song_input).scheme == 'https':
315
- input_type = 'yt'
316
- song_id = get_youtube_video_id(song_input)
317
- if song_id is None:
318
- error_msg = 'Invalid YouTube url.'
319
- raise_exception(error_msg, is_webui)
320
-
321
- # local audio file
322
- else:
323
- input_type = 'local'
324
- song_input = song_input.strip('\"')
325
- if os.path.exists(song_input):
326
- song_id = get_hash(song_input)
327
- else:
328
- error_msg = f'{song_input} does not exist.'
329
- song_id = None
330
- raise_exception(error_msg, is_webui)
331
-
332
- song_dir = os.path.join(output_dir, song_id)
333
-
334
- keep_orig, orig_song_path = get_audio_file(song_input, is_webui, input_type, progress)
335
- orig_song_path = convert_to_stereo(orig_song_path)
336
-
337
- import time
338
- start = time.time()
339
-
340
- (
341
- ai_vocals_path,
342
- ai_cover_path,
343
- instrumentals_path,
344
- backup_vocals_path,
345
- vocals_path,
346
- main_vocals_path
347
- ) = process_song(
348
- song_dir,
349
- song_input,
350
- mdx_model_params,
351
- song_id,
352
- is_webui,
353
- input_type,
354
- progress,
355
- keep_files,
356
- pitch_change,
357
- pitch_change_all,
358
- voice_model,
359
- index_rate,
360
- filter_radius,
361
- rms_mix_rate,
362
- protect,
363
- f0_method,
364
- crepe_hop_length,
365
- output_format,
366
- keep_orig,
367
- orig_song_path,
368
- )
369
-
370
- end = time.time()
371
- print(f"Execution time: {end - start:.4f} seconds")
372
- with sf.SoundFile(ai_vocals_path) as f:
373
- duration__ = len(f) / f.samplerate
374
- print(f"Audio duration: {duration__:.2f} seconds")
375
-
376
- display_progress('[~] Applying audio effects to Vocals...', 0.8, is_webui, progress)
377
- ai_vocals_mixed_path = add_audio_effects(ai_vocals_path, reverb_rm_size, reverb_wet, reverb_dry, reverb_damping)
378
-
379
- instrumentals_path, _ = run_mdx(
380
- mdx_model_params,
381
- os.path.join(output_dir, song_id),
382
- os.path.join(mdxnet_models_dir, "UVR-MDX-NET-Inst_HQ_4.onnx"),
383
- instrumentals_path,
384
- # exclude_main=False,
385
- exclude_inversion=True,
386
- suffix="Voiceless",
387
- denoise=False,
388
- keep_orig=True,
389
- base_device=""
390
- )
391
-
392
- if pitch_change_all != 0:
393
- display_progress('[~] Applying overall pitch change', 0.85, is_webui, progress)
394
- instrumentals_path = pitch_shift(instrumentals_path, pitch_change_all)
395
- backup_vocals_path = pitch_shift(backup_vocals_path, pitch_change_all)
396
-
397
- display_progress('[~] Combining AI Vocals and Instrumentals...', 0.9, is_webui, progress)
398
- combine_audio([ai_vocals_mixed_path, backup_vocals_path, instrumentals_path], ai_cover_path, main_gain, backup_gain, inst_gain, output_format)
399
-
400
- if not keep_files:
401
- display_progress('[~] Removing intermediate audio files...', 0.95, is_webui, progress)
402
- intermediate_files = [vocals_path, main_vocals_path, ai_vocals_mixed_path]
403
- if pitch_change_all != 0:
404
- intermediate_files += [instrumentals_path, backup_vocals_path]
405
- for file in intermediate_files:
406
- if file and os.path.exists(file):
407
- os.remove(file)
408
-
409
- return ai_cover_path
410
-
411
- except Exception as e:
412
- raise_exception(str(e), is_webui)
413
-
414
-
415
- if __name__ == '__main__':
416
- parser = argparse.ArgumentParser(description='Generate a AI cover song in the song_output/id directory.', add_help=True)
417
- parser.add_argument('-i', '--song-input', type=str, required=True, help='Link to a YouTube video or the filepath to a local mp3/wav file to create an AI cover of')
418
- parser.add_argument('-dir', '--rvc-dirname', type=str, required=True, help='Name of the folder in the rvc_models directory containing the RVC model file and optional index file to use')
419
- parser.add_argument('-p', '--pitch-change', type=int, required=True, help='Change the pitch of AI Vocals only. Generally, use 1 for male to female and -1 for vice-versa. (Octaves)')
420
- parser.add_argument('-k', '--keep-files', action=argparse.BooleanOptionalAction, help='Whether to keep all intermediate audio files generated in the song_output/id directory, e.g. Isolated Vocals/Instrumentals')
421
- parser.add_argument('-ir', '--index-rate', type=float, default=0.5, help='A decimal number e.g. 0.5, used to reduce/resolve the timbre leakage problem. If set to 1, more biased towards the timbre quality of the training dataset')
422
- parser.add_argument('-fr', '--filter-radius', type=int, default=3, help='A number between 0 and 7. If >=3: apply median filtering to the harvested pitch results. The value represents the filter radius and can reduce breathiness.')
423
- parser.add_argument('-rms', '--rms-mix-rate', type=float, default=0.25, help="A decimal number e.g. 0.25. Control how much to use the original vocal's loudness (0) or a fixed loudness (1).")
424
- parser.add_argument('-palgo', '--pitch-detection-algo', type=str, default='rmvpe', help='Best option is rmvpe (clarity in vocals), then mangio-crepe (smoother vocals).')
425
- parser.add_argument('-hop', '--crepe-hop-length', type=int, default=128, help='If pitch detection algo is mangio-crepe, controls how often it checks for pitch changes in milliseconds. The higher the value, the faster the conversion and less risk of voice cracks, but there is less pitch accuracy. Recommended: 128.')
426
- parser.add_argument('-pro', '--protect', type=float, default=0.33, help='A decimal number e.g. 0.33. Protect voiceless consonants and breath sounds to prevent artifacts such as tearing in electronic music. Set to 0.5 to disable. Decrease the value to increase protection, but it may reduce indexing accuracy.')
427
- parser.add_argument('-mv', '--main-vol', type=int, default=0, help='Volume change for AI main vocals in decibels. Use -3 to decrease by 3 decibels and 3 to increase by 3 decibels')
428
- parser.add_argument('-bv', '--backup-vol', type=int, default=0, help='Volume change for backup vocals in decibels')
429
- parser.add_argument('-iv', '--inst-vol', type=int, default=0, help='Volume change for instrumentals in decibels')
430
- parser.add_argument('-pall', '--pitch-change-all', type=int, default=0, help='Change the pitch/key of vocals and instrumentals. Changing this slightly reduces sound quality')
431
- parser.add_argument('-rsize', '--reverb-size', type=float, default=0.15, help='Reverb room size between 0 and 1')
432
- parser.add_argument('-rwet', '--reverb-wetness', type=float, default=0.2, help='Reverb wet level between 0 and 1')
433
- parser.add_argument('-rdry', '--reverb-dryness', type=float, default=0.8, help='Reverb dry level between 0 and 1')
434
- parser.add_argument('-rdamp', '--reverb-damping', type=float, default=0.7, help='Reverb damping between 0 and 1')
435
- parser.add_argument('-oformat', '--output-format', type=str, default='mp3', help='Output format of audio file. mp3 for smaller file size, wav for best quality')
436
- args = parser.parse_args()
437
-
438
- rvc_dirname = args.rvc_dirname
439
- if not os.path.exists(os.path.join(rvc_models_dir, rvc_dirname)):
440
- raise Exception(f'The folder {os.path.join(rvc_models_dir, rvc_dirname)} does not exist.')
441
-
442
- cover_path = song_cover_pipeline(args.song_input, rvc_dirname, args.pitch_change, args.keep_files,
443
- main_gain=args.main_vol, backup_gain=args.backup_vol, inst_gain=args.inst_vol,
444
- index_rate=args.index_rate, filter_radius=args.filter_radius,
445
- rms_mix_rate=args.rms_mix_rate, f0_method=args.pitch_detection_algo,
446
- crepe_hop_length=args.crepe_hop_length, protect=args.protect,
447
- pitch_change_all=args.pitch_change_all,
448
- reverb_rm_size=args.reverb_size, reverb_wet=args.reverb_wetness,
449
- reverb_dry=args.reverb_dryness, reverb_damping=args.reverb_damping,
450
- output_format=args.output_format)
451
  print(f'[+] Cover generated at {cover_path}')
 
1
+ import spaces
2
+ import torch
3
+ import argparse
4
+ import gc
5
+ import hashlib
6
+ import json
7
+ import os
8
+ import shlex
9
+ import subprocess
10
+ from contextlib import suppress
11
+ from urllib.parse import urlparse, parse_qs
12
+
13
+ import gradio as gr
14
+ import librosa
15
+ import numpy as np
16
+ import soundfile as sf
17
+ import sox
18
+ import yt_dlp
19
+ from pedalboard import Pedalboard, Reverb, Compressor, HighpassFilter
20
+ from pedalboard.io import AudioFile
21
+ from pydub import AudioSegment
22
+
23
+ from mdx import run_mdx
24
+ from rvc import Config, load_hubert, get_vc, rvc_infer
25
+ import logging
26
+
27
+ logging.getLogger("httpx").setLevel(logging.WARNING)
28
+
29
+ BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
30
+
31
+ mdxnet_models_dir = os.path.join(BASE_DIR, 'mdxnet_models')
32
+ rvc_models_dir = os.path.join(BASE_DIR, 'rvc_models')
33
+ output_dir = os.path.join(BASE_DIR, 'song_output')
34
+
35
+
36
+ def get_youtube_video_id(url, ignore_playlist=True):
37
+ """
38
+ Examples:
39
+ http://youtu.be/SA2iWivDJiE
40
+ http://www.youtube.com/watch?v=_oPAwA_Udwc&feature=feedu
41
+ http://www.youtube.com/embed/SA2iWivDJiE
42
+ http://www.youtube.com/v/SA2iWivDJiE?version=3&amp;hl=en_US
43
+ """
44
+ if "m.youtube.com" in url:
45
+ url = url.replace("m.youtube.com", "www.youtube.com")
46
+ query = urlparse(url)
47
+ if query.hostname == 'youtu.be':
48
+ if query.path[1:] == 'watch':
49
+ return query.query[2:]
50
+ return query.path[1:]
51
+
52
+ if query.hostname in {'www.youtube.com', 'youtube.com', 'music.youtube.com'}:
53
+ if not ignore_playlist:
54
+ # use case: get playlist id not current video in playlist
55
+ with suppress(KeyError):
56
+ return parse_qs(query.query)['list'][0]
57
+ if query.path == '/watch':
58
+ return parse_qs(query.query)['v'][0]
59
+ if query.path[:7] == '/watch/':
60
+ return query.path.split('/')[1]
61
+ if query.path[:7] == '/embed/':
62
+ return query.path.split('/')[2]
63
+ if query.path[:3] == '/v/':
64
+ return query.path.split('/')[2]
65
+
66
+ # returns None for invalid YouTube url
67
+ return None
68
+
69
+
70
+ def yt_download(link):
71
+ ydl_opts = {
72
+ 'format': 'bestaudio',
73
+ 'outtmpl': '%(title)s',
74
+ 'nocheckcertificate': True,
75
+ 'ignoreerrors': True,
76
+ 'no_warnings': True,
77
+ 'quiet': True,
78
+ 'extractaudio': True,
79
+ 'postprocessors': [{'key': 'FFmpegExtractAudio', 'preferredcodec': 'mp3'}],
80
+ }
81
+ with yt_dlp.YoutubeDL(ydl_opts) as ydl:
82
+ result = ydl.extract_info(link, download=True)
83
+ download_path = ydl.prepare_filename(result, outtmpl='%(title)s.mp3')
84
+
85
+ return download_path
86
+
87
+
88
+ def raise_exception(error_msg, is_webui):
89
+ if is_webui:
90
+ raise gr.Error(error_msg)
91
+ else:
92
+ raise Exception(error_msg)
93
+
94
+
95
+ def get_rvc_model(voice_model, is_webui):
96
+ rvc_model_filename, rvc_index_filename = None, None
97
+ model_dir = os.path.join(rvc_models_dir, voice_model)
98
+ print(model_dir)
99
+ for file in os.listdir(model_dir):
100
+ print(file)
101
+ if os.path.isdir(file):
102
+ for ff in os.listdir(file):
103
+ print("subfile", ff)
104
+ ext = os.path.splitext(ff)[1]
105
+ if ext == '.pth':
106
+ rvc_model_filename = ff
107
+ if ext == '.index':
108
+ rvc_index_filename = ff
109
+ ext = os.path.splitext(file)[1]
110
+ if ext == '.pth':
111
+ rvc_model_filename = file
112
+ if ext == '.index':
113
+ rvc_index_filename = file
114
+
115
+ if rvc_model_filename is None:
116
+ error_msg = f'No model file exists in {model_dir}.'
117
+ raise_exception(error_msg, is_webui)
118
+
119
+ return os.path.join(model_dir, rvc_model_filename), os.path.join(model_dir, rvc_index_filename) if rvc_index_filename else ''
120
+
121
+
122
+ def get_audio_paths(song_dir):
123
+ orig_song_path = None
124
+ instrumentals_path = None
125
+ main_vocals_dereverb_path = None
126
+ backup_vocals_path = None
127
+
128
+ for file in os.listdir(song_dir):
129
+ if file.endswith('_Instrumental.wav'):
130
+ instrumentals_path = os.path.join(song_dir, file)
131
+ orig_song_path = instrumentals_path.replace('_Instrumental', '')
132
+
133
+ elif file.endswith('_Vocals_Main_DeReverb.wav'):
134
+ main_vocals_dereverb_path = os.path.join(song_dir, file)
135
+
136
+ elif file.endswith('_Vocals_Backup.wav'):
137
+ backup_vocals_path = os.path.join(song_dir, file)
138
+
139
+ return orig_song_path, instrumentals_path, main_vocals_dereverb_path, backup_vocals_path
140
+
141
+
142
+ def convert_to_stereo(audio_path):
143
+ wave, sr = librosa.load(audio_path, mono=False, sr=44100)
144
+
145
+ # check if mono
146
+ if type(wave[0]) != np.ndarray:
147
+ stereo_path = f'{os.path.splitext(audio_path)[0]}_stereo.wav'
148
+ command = shlex.split(f'ffmpeg -y -loglevel error -i "{audio_path}" -ac 2 -f wav "{stereo_path}"')
149
+ subprocess.run(command)
150
+ return stereo_path
151
+ else:
152
+ return audio_path
153
+
154
+
155
+ def pitch_shift(audio_path, pitch_change):
156
+ output_path = f'{os.path.splitext(audio_path)[0]}_p{pitch_change}.wav'
157
+ if not os.path.exists(output_path):
158
+ y, sr = sf.read(audio_path)
159
+ tfm = sox.Transformer()
160
+ tfm.pitch(pitch_change)
161
+ y_shifted = tfm.build_array(input_array=y, sample_rate_in=sr)
162
+ sf.write(output_path, y_shifted, sr)
163
+
164
+ return output_path
165
+
166
+
167
+ def get_hash(filepath):
168
+ with open(filepath, 'rb') as f:
169
+ file_hash = hashlib.blake2b()
170
+ while chunk := f.read(8192):
171
+ file_hash.update(chunk)
172
+
173
+ return file_hash.hexdigest()[:11]
174
+
175
+
176
+ def display_progress(message, percent, is_webui, progress=None):
177
+ if is_webui:
178
+ progress(percent, desc=message)
179
+ else:
180
+ print(message)
181
+
182
+
183
+ def preprocess_song(song_input, mdx_model_params, song_id, is_webui, input_type, progress, keep_orig, orig_song_path):
184
+
185
+ song_output_dir = os.path.join(output_dir, song_id)
186
+
187
+ display_progress('[~] Separating Vocals from Instrumental...', 0.1, is_webui, progress)
188
+ vocals_path, instrumentals_path = run_mdx(mdx_model_params, song_output_dir, os.path.join(mdxnet_models_dir, 'UVR-MDX-NET-Voc_FT.onnx'), orig_song_path, denoise=True, keep_orig=keep_orig)
189
+
190
+ display_progress('[~] Separating Main Vocals from Backup Vocals...', 0.2, is_webui, progress)
191
+ backup_vocals_path, main_vocals_path = run_mdx(mdx_model_params, song_output_dir, os.path.join(mdxnet_models_dir, 'UVR_MDXNET_KARA_2.onnx'), vocals_path, suffix='Backup', invert_suffix='Main', denoise=True)
192
+
193
+ display_progress('[~] Applying DeReverb to Vocals...', 0.3, is_webui, progress)
194
+ _, main_vocals_dereverb_path = run_mdx(mdx_model_params, song_output_dir, os.path.join(mdxnet_models_dir, 'Reverb_HQ_By_FoxJoy.onnx'), main_vocals_path, invert_suffix='DeReverb', exclude_main=True, denoise=True)
195
+
196
+ return orig_song_path, vocals_path, instrumentals_path, main_vocals_path, backup_vocals_path, main_vocals_dereverb_path
197
+
198
+
199
+ def get_audio_file(song_input, is_webui, input_type, progress):
200
+ keep_orig = False
201
+ if input_type == 'yt':
202
+ display_progress('[~] Downloading song...', 0, is_webui, progress)
203
+ song_link = song_input.split('&')[0]
204
+ orig_song_path = yt_download(song_link)
205
+ elif input_type == 'local':
206
+ orig_song_path = song_input
207
+ keep_orig = True
208
+ else:
209
+ orig_song_path = None
210
+ return keep_orig, orig_song_path
211
+
212
+ device = "cuda:0" if torch.cuda.is_available() else "cpu"
213
+ compute_half = True if torch.cuda.is_available() else False
214
+ config = Config(device, compute_half)
215
+ hubert_model = load_hubert("cuda", config.is_half, os.path.join(rvc_models_dir, 'hubert_base.pt'))
216
+ print(device, "half>>", config.is_half)
217
+
218
+ # @spaces.GPU(enable_queue=True)
219
+ def voice_change(voice_model, vocals_path, output_path, pitch_change, f0_method, index_rate, filter_radius, rms_mix_rate, protect, crepe_hop_length, is_webui):
220
+ rvc_model_path, rvc_index_path = get_rvc_model(voice_model, is_webui)
221
+
222
+ device = "cuda:0" if torch.cuda.is_available() else "cpu"
223
+ compute_half = True if torch.cuda.is_available() else False
224
+ config = Config(device, compute_half)
225
+
226
+ cpt, version, net_g, tgt_sr, vc = get_vc(device, config.is_half, config, rvc_model_path)
227
+
228
+ # convert main vocals
229
+ global hubert_model
230
+ rvc_infer(rvc_index_path, index_rate, vocals_path, output_path, pitch_change, f0_method, cpt, version, net_g, filter_radius, tgt_sr, rms_mix_rate, protect, crepe_hop_length, vc, hubert_model)
231
+ del hubert_model, cpt
232
+ gc.collect()
233
+
234
+
235
+ def add_audio_effects(audio_path, reverb_rm_size, reverb_wet, reverb_dry, reverb_damping):
236
+ output_path = f'{os.path.splitext(audio_path)[0]}_mixed.wav'
237
+
238
+ # Initialize audio effects plugins
239
+ board = Pedalboard(
240
+ [
241
+ HighpassFilter(),
242
+ Compressor(ratio=4, threshold_db=-15),
243
+ Reverb(room_size=reverb_rm_size, dry_level=reverb_dry, wet_level=reverb_wet, damping=reverb_damping)
244
+ ]
245
+ )
246
+
247
+ with AudioFile(audio_path) as f:
248
+ with AudioFile(output_path, 'w', f.samplerate, f.num_channels) as o:
249
+ # Read one second of audio at a time, until the file is empty:
250
+ while f.tell() < f.frames:
251
+ chunk = f.read(int(f.samplerate))
252
+ effected = board(chunk, f.samplerate, reset=False)
253
+ o.write(effected)
254
+
255
+ return output_path
256
+
257
+
258
+ def combine_audio(audio_paths, output_path, main_gain, backup_gain, inst_gain, output_format):
259
+ main_vocal_audio = AudioSegment.from_wav(audio_paths[0]) - 4 + main_gain
260
+ backup_vocal_audio = AudioSegment.from_wav(audio_paths[1]) - 6 + backup_gain
261
+ instrumental_audio = AudioSegment.from_wav(audio_paths[2]) - 7 + inst_gain
262
+ main_vocal_audio.overlay(backup_vocal_audio).overlay(instrumental_audio).export(output_path, format=output_format)
263
+
264
+
265
+ # @spaces.GPU(enable_queue=True, duration=130)
266
+ @spaces.GPU(duration=50)
267
+ def process_song(
268
+ song_dir, song_input, mdx_model_params, song_id, is_webui, input_type, progress,
269
+ keep_files, pitch_change, pitch_change_all, voice_model, index_rate, filter_radius,
270
+ rms_mix_rate, protect, f0_method, crepe_hop_length, output_format, keep_orig, orig_song_path
271
+ ):
272
+
273
+ if not os.path.exists(song_dir):
274
+ os.makedirs(song_dir)
275
+ orig_song_path, vocals_path, instrumentals_path, main_vocals_path, backup_vocals_path, main_vocals_dereverb_path = preprocess_song(song_input, mdx_model_params, song_id, is_webui, input_type, progress, keep_orig, orig_song_path)
276
+ else:
277
+ vocals_path, main_vocals_path = None, None
278
+ paths = get_audio_paths(song_dir)
279
+
280
+ # if any of the audio files aren't available or keep intermediate files, rerun preprocess
281
+ if any(path is None for path in paths) or keep_files:
282
+ orig_song_path, vocals_path, instrumentals_path, main_vocals_path, backup_vocals_path, main_vocals_dereverb_path = preprocess_song(song_input, mdx_model_params, song_id, is_webui, input_type, progress, keep_orig, orig_song_path)
283
+ else:
284
+ orig_song_path, instrumentals_path, main_vocals_dereverb_path, backup_vocals_path = paths
285
+
286
+ pitch_change = pitch_change * 12 + pitch_change_all
287
+ ai_vocals_path = os.path.join(song_dir, f'{os.path.splitext(os.path.basename(orig_song_path))[0]}_{voice_model}_p{pitch_change}_i{index_rate}_fr{filter_radius}_rms{rms_mix_rate}_pro{protect}_{f0_method}{"" if f0_method != "mangio-crepe" else f"_{crepe_hop_length}"}.wav')
288
+ ai_cover_path = os.path.join(song_dir, f'{os.path.splitext(os.path.basename(orig_song_path))[0]} ({voice_model} Ver).{output_format}')
289
+
290
+ if not os.path.exists(ai_vocals_path):
291
+ display_progress('[~] Converting voice using RVC...', 0.5, is_webui, progress)
292
+ voice_change(voice_model, main_vocals_dereverb_path, ai_vocals_path, pitch_change, f0_method, index_rate, filter_radius, rms_mix_rate, protect, crepe_hop_length, is_webui)
293
+
294
+ return ai_vocals_path, ai_cover_path, instrumentals_path, backup_vocals_path, vocals_path, main_vocals_path
295
+
296
+ # process_song.zerogpu = True
297
+
298
+ # @spaces.GPU(duration=140)
299
+ def song_cover_pipeline(song_input, voice_model, pitch_change, keep_files,
300
+ is_webui=0, main_gain=0, backup_gain=0, inst_gain=0, index_rate=0.5, filter_radius=3,
301
+ rms_mix_rate=0.25, f0_method='rmvpe', crepe_hop_length=128, protect=0.33, pitch_change_all=0,
302
+ reverb_rm_size=0.15, reverb_wet=0.2, reverb_dry=0.8, reverb_damping=0.7, output_format='mp3',
303
+ progress=gr.Progress()):
304
+ try:
305
+ if not song_input or not voice_model:
306
+ raise_exception('Ensure that the song input field and voice model field is filled.', is_webui)
307
+
308
+ display_progress('[~] Starting AI Cover Generation Pipeline...', 0, is_webui, progress)
309
+
310
+ with open(os.path.join(mdxnet_models_dir, 'model_data.json')) as infile:
311
+ mdx_model_params = json.load(infile)
312
+
313
+ # if youtube url
314
+ if urlparse(song_input).scheme == 'https':
315
+ input_type = 'yt'
316
+ song_id = get_youtube_video_id(song_input)
317
+ if song_id is None:
318
+ error_msg = 'Invalid YouTube url.'
319
+ raise_exception(error_msg, is_webui)
320
+
321
+ # local audio file
322
+ else:
323
+ input_type = 'local'
324
+ song_input = song_input.strip('\"')
325
+ if os.path.exists(song_input):
326
+ song_id = get_hash(song_input)
327
+ else:
328
+ error_msg = f'{song_input} does not exist.'
329
+ song_id = None
330
+ raise_exception(error_msg, is_webui)
331
+
332
+ song_dir = os.path.join(output_dir, song_id)
333
+
334
+ keep_orig, orig_song_path = get_audio_file(song_input, is_webui, input_type, progress)
335
+ orig_song_path = convert_to_stereo(orig_song_path)
336
+
337
+ import time
338
+ start = time.time()
339
+
340
+ (
341
+ ai_vocals_path,
342
+ ai_cover_path,
343
+ instrumentals_path,
344
+ backup_vocals_path,
345
+ vocals_path,
346
+ main_vocals_path
347
+ ) = process_song(
348
+ song_dir,
349
+ song_input,
350
+ mdx_model_params,
351
+ song_id,
352
+ is_webui,
353
+ input_type,
354
+ progress,
355
+ keep_files,
356
+ pitch_change,
357
+ pitch_change_all,
358
+ voice_model,
359
+ index_rate,
360
+ filter_radius,
361
+ rms_mix_rate,
362
+ protect,
363
+ f0_method,
364
+ crepe_hop_length,
365
+ output_format,
366
+ keep_orig,
367
+ orig_song_path,
368
+ )
369
+
370
+ end = time.time()
371
+ print(f"Execution time: {end - start:.4f} seconds")
372
+ with sf.SoundFile(ai_vocals_path) as f:
373
+ duration__ = len(f) / f.samplerate
374
+ print(f"Audio duration: {duration__:.2f} seconds")
375
+
376
+ display_progress('[~] Applying audio effects to Vocals...', 0.8, is_webui, progress)
377
+ ai_vocals_mixed_path = add_audio_effects(ai_vocals_path, reverb_rm_size, reverb_wet, reverb_dry, reverb_damping)
378
+
379
+ instrumentals_path, _ = run_mdx(
380
+ mdx_model_params,
381
+ os.path.join(output_dir, song_id),
382
+ os.path.join(mdxnet_models_dir, "UVR-MDX-NET-Inst_HQ_4.onnx"),
383
+ instrumentals_path,
384
+ # exclude_main=False,
385
+ exclude_inversion=True,
386
+ suffix="Voiceless",
387
+ denoise=False,
388
+ keep_orig=True,
389
+ base_device=""
390
+ )
391
+
392
+ if pitch_change_all != 0:
393
+ display_progress('[~] Applying overall pitch change', 0.85, is_webui, progress)
394
+ instrumentals_path = pitch_shift(instrumentals_path, pitch_change_all)
395
+ backup_vocals_path = pitch_shift(backup_vocals_path, pitch_change_all)
396
+
397
+ display_progress('[~] Combining AI Vocals and Instrumentals...', 0.9, is_webui, progress)
398
+ combine_audio([ai_vocals_mixed_path, backup_vocals_path, instrumentals_path], ai_cover_path, main_gain, backup_gain, inst_gain, output_format)
399
+
400
+ if not keep_files:
401
+ display_progress('[~] Removing intermediate audio files...', 0.95, is_webui, progress)
402
+ intermediate_files = [vocals_path, main_vocals_path, ai_vocals_mixed_path]
403
+ if pitch_change_all != 0:
404
+ intermediate_files += [instrumentals_path, backup_vocals_path]
405
+ for file in intermediate_files:
406
+ if file and os.path.exists(file):
407
+ os.remove(file)
408
+
409
+ return ai_cover_path
410
+
411
+ except Exception as e:
412
+ raise_exception(str(e), is_webui)
413
+
414
+
415
+ if __name__ == '__main__':
416
+ parser = argparse.ArgumentParser(description='Generate a AI cover song in the song_output/id directory.', add_help=True)
417
+ parser.add_argument('-i', '--song-input', type=str, required=True, help='Link to a YouTube video or the filepath to a local mp3/wav file to create an AI cover of')
418
+ parser.add_argument('-dir', '--rvc-dirname', type=str, required=True, help='Name of the folder in the rvc_models directory containing the RVC model file and optional index file to use')
419
+ parser.add_argument('-p', '--pitch-change', type=int, required=True, help='Change the pitch of AI Vocals only. Generally, use 1 for male to female and -1 for vice-versa. (Octaves)')
420
+ parser.add_argument('-k', '--keep-files', action=argparse.BooleanOptionalAction, help='Whether to keep all intermediate audio files generated in the song_output/id directory, e.g. Isolated Vocals/Instrumentals')
421
+ parser.add_argument('-ir', '--index-rate', type=float, default=0.5, help='A decimal number e.g. 0.5, used to reduce/resolve the timbre leakage problem. If set to 1, more biased towards the timbre quality of the training dataset')
422
+ parser.add_argument('-fr', '--filter-radius', type=int, default=3, help='A number between 0 and 7. If >=3: apply median filtering to the harvested pitch results. The value represents the filter radius and can reduce breathiness.')
423
+ parser.add_argument('-rms', '--rms-mix-rate', type=float, default=0.25, help="A decimal number e.g. 0.25. Control how much to use the original vocal's loudness (0) or a fixed loudness (1).")
424
+ parser.add_argument('-palgo', '--pitch-detection-algo', type=str, default='rmvpe', help='Best option is rmvpe (clarity in vocals), then mangio-crepe (smoother vocals).')
425
+ parser.add_argument('-hop', '--crepe-hop-length', type=int, default=128, help='If pitch detection algo is mangio-crepe, controls how often it checks for pitch changes in milliseconds. The higher the value, the faster the conversion and less risk of voice cracks, but there is less pitch accuracy. Recommended: 128.')
426
+ parser.add_argument('-pro', '--protect', type=float, default=0.33, help='A decimal number e.g. 0.33. Protect voiceless consonants and breath sounds to prevent artifacts such as tearing in electronic music. Set to 0.5 to disable. Decrease the value to increase protection, but it may reduce indexing accuracy.')
427
+ parser.add_argument('-mv', '--main-vol', type=int, default=0, help='Volume change for AI main vocals in decibels. Use -3 to decrease by 3 decibels and 3 to increase by 3 decibels')
428
+ parser.add_argument('-bv', '--backup-vol', type=int, default=0, help='Volume change for backup vocals in decibels')
429
+ parser.add_argument('-iv', '--inst-vol', type=int, default=0, help='Volume change for instrumentals in decibels')
430
+ parser.add_argument('-pall', '--pitch-change-all', type=int, default=0, help='Change the pitch/key of vocals and instrumentals. Changing this slightly reduces sound quality')
431
+ parser.add_argument('-rsize', '--reverb-size', type=float, default=0.15, help='Reverb room size between 0 and 1')
432
+ parser.add_argument('-rwet', '--reverb-wetness', type=float, default=0.2, help='Reverb wet level between 0 and 1')
433
+ parser.add_argument('-rdry', '--reverb-dryness', type=float, default=0.8, help='Reverb dry level between 0 and 1')
434
+ parser.add_argument('-rdamp', '--reverb-damping', type=float, default=0.7, help='Reverb damping between 0 and 1')
435
+ parser.add_argument('-oformat', '--output-format', type=str, default='mp3', help='Output format of audio file. mp3 for smaller file size, wav for best quality')
436
+ args = parser.parse_args()
437
+
438
+ rvc_dirname = args.rvc_dirname
439
+ if not os.path.exists(os.path.join(rvc_models_dir, rvc_dirname)):
440
+ raise Exception(f'The folder {os.path.join(rvc_models_dir, rvc_dirname)} does not exist.')
441
+
442
+ cover_path = song_cover_pipeline(args.song_input, rvc_dirname, args.pitch_change, args.keep_files,
443
+ main_gain=args.main_vol, backup_gain=args.backup_vol, inst_gain=args.inst_vol,
444
+ index_rate=args.index_rate, filter_radius=args.filter_radius,
445
+ rms_mix_rate=args.rms_mix_rate, f0_method=args.pitch_detection_algo,
446
+ crepe_hop_length=args.crepe_hop_length, protect=args.protect,
447
+ pitch_change_all=args.pitch_change_all,
448
+ reverb_rm_size=args.reverb_size, reverb_wet=args.reverb_wetness,
449
+ reverb_dry=args.reverb_dryness, reverb_damping=args.reverb_damping,
450
+ output_format=args.output_format)
451
  print(f'[+] Cover generated at {cover_path}')