File size: 42,394 Bytes
1e4a2ab |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 |
{
"set_lang": "Display language set to {lang}.",
"no_support_gpu": "Unfortunately, no compatible GPU is available to support your training.",
"text": "text",
"upload_success": "File {name} uploaded successfully.",
"download_url": "Download from the link",
"download_from_csv": "Download from the CSV model repository",
"search_models": "Search models",
"upload": "Upload",
"option_not_valid": "Invalid option!",
"list_model": "Model list",
"success": "Completed!",
"index": "index",
"model": "model",
"zip": "compress",
"search": "search",
"provide_file": "Please provide a valid {filename} file!",
"start": "Starting {start}...",
"not_found": "Not found {name}.",
"found": "Found {results} results!",
"download_music": "download music",
"download": "download",
"provide_url": "Please provide a url.",
"provide_name_is_save": "Please provide a model name to save.",
"not_support_url": "Your model url is not supported.",
"error_occurred": "An error occurred: {e}.",
"unable_analyze_model": "Unable to analyze the model!",
"download_pretrain": "Downloading pre-trained model...",
"provide_pretrain": "Please provide a pre-trained model url {dg}.",
"sr_not_same": "The sample rates of the two models are not the same.",
"architectures_not_same": "Cannot merge models. The architectures are not the same.",
"fushion_model": "model fusion",
"model_fushion_info": "The model {name} is fused from {pth_1} and {pth_2} with a ratio of {ratio}.",
"not_found_create_time": "Creation time not found.",
"format_not_valid": "Invalid format.",
"read_info": "Models trained on different applications may produce different information or may not be readable!",
"epoch": "epoch.",
"step": "step",
"sr": "Sample rate",
"f0": "pitch training",
"version": "version.",
"not_f0": "Pitch training not performed",
"trained_f0": "Pitch training performed",
"model_info": "Model Name: {model_name}\n\n Model Creator: {model_author}\n\nEpoch: {epochs}\n\nSteps: {steps}\n\nVersion: {version}\n\nSample Rate: {sr}\n\nPitch Training: {pitch_guidance}\n\nHash (ID): {model_hash}\n\nCreation Time: {creation_date_str}\n\nVocoder: {vocoder}\n\nEnergy: {rms_extract}\n",
"input_not_valid": "Please provide valid input!",
"output_not_valid": "Please provide valid output!",
"apply_effect": "apply effect",
"enter_the_text": "Please enter the text to speech!",
"choose_voice": "Please choose a voice!",
"convert": "Converting {name}...",
"separator_music": "music separation",
"notfound": "Not found",
"turn_on_use_audio": "Please enable using separated audio to proceed",
"turn_off_convert_backup": "Disable backup voice conversion to use the original voice",
"turn_off_merge_backup": "Disable merging backup voice to use the original voice",
"not_found_original_vocal": "Original vocal not found!",
"convert_vocal": "Converting voice...",
"convert_success": "Voice conversion completed!",
"convert_backup": "Converting backup voice...",
"convert_backup_success": "Backup voice conversion completed!",
"merge_backup": "Merging main voice with backup voice...",
"merge_success": "Merge completed.",
"is_folder": "Input is a folder: Converting all audio files in the folder...",
"not_found_in_folder": "No audio files found in the folder!",
"batch_convert": "Batch conversion in progress...",
"batch_convert_success": "Batch conversion successful!",
"create": "create",
"provide_name": "Please provide a model name.",
"not_found_data": "Data not found",
"not_found_data_preprocess": "Processed audio data not found, please reprocess.",
"not_found_data_extract": "Extracted audio data not found, please re-extract.",
"provide_pretrained": "Please provide pre-trained {dg}.",
"download_pretrained": "Download pre-trained {dg}{rvc_version} original",
"not_found_pretrain": "Pre-trained {dg} not found",
"not_use_pretrain": "No pre-trained model will be used",
"training": "training",
"rick_roll": "Click here if you want to be Rick Roll :) ---> [RickRoll]({rickroll})",
"terms_of_use": "**Please do not use the project for any unethical, illegal, or harmful purposes to individuals or organizations...**",
"exemption": "**In cases where users do not comply with the terms or violate them, I will not be responsible for any claims, damages, or liabilities, whether in contract, negligence, or other causes arising from, outside of, or related to the software, its use, or other transactions associated with it.**",
"separator_tab": "Music Separation",
"4_part": "A simple music separation system can separate into 4 parts: Instruments, Vocals, Main vocals, Backup vocals",
"clear_audio": "Clean audio",
"separator_backing": "Separate backup vocals",
"denoise_mdx": "Denoise MDX separation",
"use_mdx": "Use MDX",
"dereveb_audio": "Remove vocal reverb",
"dereveb_backing": "Remove backup reverb",
"separator_model": "Music separation model",
"separator_backing_model": "Backup separation model",
"shift": "Shift",
"shift_info": "Higher is better quality but slower and uses more resources",
"segments_size": "Segments Size",
"segments_size_info": "Higher is better quality but uses more resources",
"batch_size": "Batch size",
"batch_size_info": "Number of samples processed simultaneously in one training cycle. Higher can cause memory overflow",
"mdx_batch_size_info": "Number of samples processed at a time. Batch processing optimizes calculations. Large batches can cause memory overflow; small batches reduce resource efficiency",
"overlap": "Overlap",
"overlap_info": "Overlap amount between prediction windows",
"export_format": "Export format",
"export_info": "The export format to export the audio file in",
"output_separator": "Separated output",
"hop_length_info": "Analyzing the time transfer window when performing transformations is allowed. The detailed value is compact but requires more calculation",
"drop_audio": "Drop audio here",
"drop_text": "Drop text file here",
"use_url": "YouTube link",
"url_audio": "Link audio",
"downloads": "Downloads",
"clean_strength": "Audio cleaning strength",
"clean_strength_info": "Strength of the audio cleaner for filtering vocals during export",
"input_output": "Audio input, output",
"audio_path": "Input audio path",
"refresh": "Refresh",
"output_folder": "Output audio folder path",
"output_folder_info": "Enter the folder path where the audio will be exported",
"input_audio": "Audio input",
"instruments": "Instruments",
"original_vocal": "Original vocal",
"main_vocal": "Main vocal",
"backing_vocal": "Backup vocal",
"convert_audio": "Convert Audio",
"convert_info": "Convert audio using a trained voice model",
"autotune": "Auto-tune",
"use_audio": "Use separated audio",
"convert_original": "Convert original voice",
"convert_backing": "Convert backup voice",
"not_merge_backing": "Do not merge backup voice",
"merge_instruments": "Merge instruments",
"pitch": "Pitch",
"pitch_info": "Recommendation: set to 12 to change male voice to female and vice versa",
"model_accordion": "Model and index",
"model_name": "Model file",
"index_path": "Index file",
"index_strength": "Index strength",
"index_strength_info": "Higher values increase strength. However, lower values may reduce artificial effects in the audio",
"output_path": "Audio output path",
"output_path_info": "Enter the output path (leave it as .wav format; it will auto-correct during conversion)",
"setting": "General settings",
"f0_method": "Extraction method",
"f0_method_info": "Method used for data extraction",
"f0_method_hybrid": "HYBRID extraction method",
"f0_method_hybrid_info": "Combination of two or more different types of extracts",
"hubert_model": "Embedding model",
"hubert_info": "Pre-trained model to assist embedding",
"modelname": "Model name",
"modelname_info": "If you have your own model, just upload it and input the name here",
"split_audio": "Split audio",
"autotune_rate": "Auto-tune rate",
"autotune_rate_info": "Level of auto-tuning adjustment",
"resample": "Resample",
"resample_info": "Resample post-processing to the final sample rate; 0 means no resampling, NOTE: SOME FORMATS DO NOT SUPPORT SPEEDS OVER 48000",
"filter_radius": "Filter radius",
"filter_radius_info": "If greater than three, median filtering is applied. The value represents the filter radius and can reduce breathiness or noise.",
"rms_mix_rate": "RMS Mix Rate",
"rms_mix_rate_info": "Determines the blend ratio between the RMS energy of the original voice and the converted voice",
"protect": "Consonant protection",
"protect_info": "Protect distinct consonants and breathing sounds to prevent audio tearing and other artifacts. Increasing this value provides comprehensive protection. Reducing it may reduce protection but also minimize indexing effects",
"output_convert": "Converted audio",
"main_convert": "Convert main voice",
"main_or_backing": "Main voice + Backup voice",
"voice_or_instruments": "Voice + Instruments",
"convert_text": "Convert Text",
"convert_text_markdown": "## Convert Text to Speech",
"convert_text_markdown_2": "Convert text to speech and read aloud using the trained voice model",
"input_txt": "Input data from a text file (.txt)",
"text_to_speech": "Text to read",
"voice_speed": "Reading speed",
"voice_speed_info": "Speed of the voice",
"tts_1": "1. Convert Text to Speech",
"tts_2": "2. Convert Speech",
"voice": "Voices by country",
"output_tts": "Output speech path",
"output_tts_convert": "Converted speech output path",
"tts_output": "Enter the output path",
"output_tts_markdown": "Unconverted and converted audio",
"output_text_to_speech": "Generated speech from text-to-speech conversion",
"output_file_tts_convert": "Speech converted using the model",
"output_audio": "Audio output",
"provide_output": "Enter the output path",
"audio_effects": "Audio Effects",
"apply_audio_effects": "## Add Additional Audio Effects",
"audio_effects_edit": "Add effects to audio",
"reverb": "Reverb effect",
"chorus": "Chorus effect",
"delay": "Delay effect",
"more_option": "Additional options",
"phaser": "Phaser effect",
"compressor": "Compressor effect",
"apply": "Apply",
"reverb_freeze": "Freeze mode",
"reverb_freeze_info": "Create a continuous echo effect when this mode is enabled",
"room_size": "Room size",
"room_size_info": "Adjust the room space to create reverberation",
"damping": "Damping",
"damping_info": "Adjust the level of absorption to control the amount of reverberation",
"wet_level": "Reverb signal level",
"wet_level_info": "Adjust the level of the reverb signal effect",
"dry_level": "Original signal level",
"dry_level_info": "Adjust the level of the signal without effects",
"width": "Audio width",
"width_info": "Adjust the width of the audio space",
"chorus_depth": "Chorus depth",
"chorus_depth_info": "Adjust the intensity of the chorus to create a wider sound",
"chorus_rate_hz": "Frequency",
"chorus_rate_hz_info": "Adjust the oscillation speed of the chorus effect",
"chorus_mix": "Mix signals",
"chorus_mix_info": "Adjust the mix level between the original and the processed signal",
"chorus_centre_delay_ms": "Center delay (ms)",
"chorus_centre_delay_ms_info": "The delay time between stereo channels to create the chorus effect",
"chorus_feedback": "Feedback",
"chorus_feedback_info": "Adjust the amount of the effect signal fed back into the original signal",
"delay_seconds": "Delay time",
"delay_seconds_info": "Adjust the delay time between the original and the processed signal",
"delay_feedback": "Delay feedback",
"delay_feedback_info": "Adjust the amount of feedback signal, creating a repeating effect",
"delay_mix": "Delay signal mix",
"delay_mix_info": "Adjust the mix level between the original and delayed signal",
"fade": "Fade effect",
"bass_or_treble": "Bass and treble",
"limiter": "Threshold limiter",
"distortion": "Distortion effect",
"gain": "Audio gain",
"bitcrush": "Bit reduction effect",
"clipping": "Clipping effect",
"fade_in": "Fade-in effect (ms)",
"fade_in_info": "Time for the audio to gradually increase from 0 to normal level",
"fade_out": "Fade-out effect (ms)",
"fade_out_info": "the time it takes for the sound to fade from normal to zero",
"bass_boost": "Bass boost level (dB)",
"bass_boost_info": "amount of bass boost in audio track",
"bass_frequency": "Low-pass filter cutoff frequency (Hz)",
"bass_frequency_info": "frequencies are reduced. Low frequencies make the bass clearer",
"treble_boost": "Treble boost level (dB)",
"treble_boost_info": "high level of sound reinforcement in the audio track",
"treble_frequency": "High-pass filter cutoff frequency (Hz)",
"treble_frequency_info": "The frequency will be filtered out. The higher the frequency, the higher the sound will be retained.",
"limiter_threshold_db": "Limiter threshold",
"limiter_threshold_db_info": "Limit the maximum audio level to prevent it from exceeding the threshold",
"limiter_release_ms": "Release time",
"limiter_release_ms_info": "Time for the audio to return after being limited (Mili Seconds)",
"distortion_info": "Adjust the level of distortion to create a noisy effect",
"gain_info": "Adjust the volume level of the signal",
"clipping_threshold_db": "Clipping threshold",
"clipping_threshold_db_info": "Trim signals exceeding the threshold, creating a distorted sound",
"bitcrush_bit_depth": "Bit depth",
"bitcrush_bit_depth_info": "Reduce audio quality by decreasing bit depth, creating a distorted effect",
"phaser_depth": "Phaser depth",
"phaser_depth_info": "Adjust the depth of the effect, impacting its intensity",
"phaser_rate_hz": "Frequency",
"phaser_rate_hz_info": "Adjust the frequency of the phaser effect",
"phaser_mix": "Mix signal",
"phaser_mix_info": "Adjust the mix level between the original and processed signals",
"phaser_centre_frequency_hz": "Center frequency",
"phaser_centre_frequency_hz_info": "The center frequency of the phaser effect, affecting the adjusted frequencies",
"phaser_feedback": "Feedback",
"phaser_feedback_info": "Adjust the feedback level of the effect, creating a stronger or lighter phaser feel",
"compressor_threshold_db": "Compressor threshold",
"compressor_threshold_db_info": "The threshold level above which the audio will be compressed",
"compressor_ratio": "Compression ratio",
"compressor_ratio_info": "Adjust the level of audio compression when exceeding the threshold",
"compressor_attack_ms": "Attack time (ms)",
"compressor_attack_ms_info": "Time for compression to start taking effect after the audio exceeds the threshold",
"compressor_release_ms": "Release time",
"compressor_release_ms_info": "Time for the audio to return to normal after being compressed",
"create_dataset_url": "Link to audio (use commas for multiple links)",
"createdataset": "Create dataset",
"create_dataset_markdown": "## Create Dataset training from YouTube",
"create_dataset_markdown_2": "Process and create training datasets using YouTube links",
"denoise": "Denoise",
"skip": "Skip",
"model_ver": "Voice separation version",
"model_ver_info": "The model version for separating vocals",
"create_dataset_info": "Dataset creation information",
"output_data": "Dataset output",
"output_data_info": "Output data after creation",
"skip_start": "Skip beginning",
"skip_start_info": "Skip the initial seconds of the audio; use commas for multiple audios",
"skip_end": "Skip end",
"skip_end_info": "Skip the final seconds of the audio; use commas for multiple audios",
"training_model": "Train Model",
"training_markdown": "Train and build a voice model with a set of voice data",
"training_model_name": "Name of the model during training (avoid special characters or spaces)",
"sample_rate": "Sample rate",
"sample_rate_info": "Sample rate of the model",
"training_version": "Model version",
"training_version_info": "Version of the model during training",
"training_pitch": "Pitch Guidance",
"upload_dataset": "Upload dataset",
"preprocess_effect": "Post processing",
"clear_dataset": "Clean dataset",
"preprocess_info": "Preprocessing information",
"preprocess_button": "1. Processing",
"extract_button": "2. Extract",
"extract_info": "Data extraction information",
"total_epoch": "Total epochs",
"total_epoch_info": "Total training epochs",
"save_epoch": "Save frequency",
"save_epoch_info": "Frequency of saving the model during training to allow retraining",
"create_index": "Create index",
"index_algorithm": "Index algorithm",
"index_algorithm_info": "Algorithm for creating the index",
"custom_dataset": "Custom dataset folder",
"custom_dataset_info": "Custom dataset folder for training data",
"overtraining_detector": "Overtraining detector",
"overtraining_detector_info": "Check for overtraining during model training",
"cleanup_training": "Clean Up",
"cleanup_training_info": "Clean up and retrain from scratch",
"cache_in_gpu": "Cache in GPU",
"cache_in_gpu_info": "Store the model in GPU cache memory",
"dataset_folder": "Folder containing dataset",
"threshold": "Overtraining threshold",
"setting_cpu_gpu": "CPU/GPU settings",
"gpu_number": "Number of GPUs used",
"gpu_number_info": "The order number of GPUs used in training. (Note: AMD GPUs do not support multi-GPU training)",
"save_only_latest": "Save only the latest",
"save_only_latest_info": "Save only the latest D and G models",
"save_every_weights": "Save all models",
"save_every_weights_info": "Save all models after each epoch",
"gpu_info": "GPU information",
"gpu_info_2": "Information about the GPU used during training",
"cpu_core": "Number of CPU cores available",
"cpu_core_info": "Number of CPU cores used during training",
"not_use_pretrain_2": "Do not use pretraining",
"not_use_pretrain_info": "Do not use pre-trained models",
"custom_pretrain": "Custom pretraining",
"custom_pretrain_info": "Customize pre-training settings",
"pretrain_file": "Pre-trained model file {dg}",
"train_info": "Training information",
"export_model": "5. Export Model",
"zip_model": "2. Compress model",
"output_zip": "Output file after compression",
"model_path": "Model path",
"model_ratio": "Model ratio",
"model_ratio_info": "Adjusting towards one side will make the model more like that side",
"output_model_path": "Model output path",
"fushion": "Model Fusion",
"fushion_markdown": "## Fushion Two Models",
"fushion_markdown_2": "Combine two voice models into a single model",
"read_model": "Read Information",
"read_model_markdown": "## Read Model Information",
"read_model_markdown_2": "Retrieve recorded information within the model",
"drop_model": "Drop model here",
"readmodel": "Read model",
"model_path_info": "Enter the path to the model file",
"modelinfo": "Model Information",
"download_markdown": "## Download Model",
"download_markdown_2": "Download voice models, pre-trained models, and embedding models",
"model_download": "Download voice model",
"model_url": "Link to the model",
"30s": "Please wait about 30 seconds. The system will restart automatically!",
"model_download_select": "Choose a model download method",
"model_warehouse": "Model repository",
"get_model": "Retrieve model",
"name_to_search": "Name to search",
"search_2": "Search",
"select_download_model": "Choose a searched model (Click to select)",
"download_pretrained_2": "Download pre-trained model",
"pretrained_url": "Pre-trained model link {dg}",
"select_pretrain": "Choose pre-trained model",
"select_pretrain_info": "Choose a pre-trained model to download",
"pretrain_sr": "Model sample rate",
"drop_pretrain": "Drop pre-trained model {dg} here",
"settings": "Settings",
"settings_markdown": "## Additional Settings",
"settings_markdown_2": "Customize additional features of the project",
"lang": "Language",
"lang_restart": "The display language in the project (When changing the language, the system will automatically restart after 30 seconds to update)",
"change_lang": "Change Language",
"theme": "Theme",
"theme_restart": "Theme type displayed in the interface (When changing the theme, the system will automatically restart after 30 seconds to update)",
"theme_button": "Change Theme",
"change_light_dark": "Switch Light/Dark Mode",
"tensorboard_url": "Tensorboard URL",
"errors_loading_audio": "Error loading audio",
"apply_error": "An error occurred while applying effects: {e}",
"indexpath": "Index path",
"split_total": "Total parts split",
"process_audio_error": "An error occurred while processing the audio",
"merge_error": "An error occurred while merging audio",
"not_found_convert_file": "Processed file not found",
"convert_batch": "Batch conversion...",
"found_audio": "Found {audio_files} audio files for conversion.",
"not_found_audio": "No audio files found!",
"error_convert": "An error occurred during audio conversion: {e}",
"convert_batch_success": "Batch conversion completed successfully in {elapsed_time} seconds. Output {output_path}",
"convert_audio_success": "File {input_path} converted successfully in {elapsed_time} seconds. Output {output_path}",
"read_faiss_index_error": "An error occurred while reading the FAISS index: {e}",
"read_model_error": "Failed to load model: {e}",
"starting_download": "Starting download",
"version_not_valid": "Invalid vocal separation version",
"skip<audio": "Cannot skip as skip time is less than audio file length",
"skip>audio": "Cannot skip as skip time is greater than audio file length",
"=<0": "Skip time is less than or equal to 0 and has been skipped",
"skip_warning": "Skip duration ({seconds} seconds) exceeds audio length ({total_duration} seconds). Skipping.",
"download_success": "Download completed successfully",
"create_dataset_error": "An error occurred while creating the training dataset",
"create_dataset_success": "Training dataset creation completed in {elapsed_time} seconds",
"skip_start_audio": "Successfully skipped start of audio: {input_file}",
"skip_end_audio": "Successfully skipped end of audio: {input_file}",
"merge_audio": "Merged all parts containing audio",
"separator_process": "Separating vocals: {input}...",
"not_found_main_vocal": "Main vocal not found!",
"not_found_backing_vocal": "Backup vocal not found!",
"not_found_instruments": "Instruments not found",
"merge_instruments_process": "Merging vocals with instruments...",
"dereverb": "Removing vocal reverb",
"dereverb_success": "Successfully removed vocal reverb",
"save_index": "Index file saved",
"create_index_error": "An error occurred while creating the index",
"sr_not_16000": "Sample rate must be 16000",
"extract_file_error": "An error occurred while extracting the file",
"extract_f0_method": "Starting pitch extraction using {num_processes} cores with method {f0_method}...",
"extract_f0": "Pitch Extraction",
"extract_f0_success": "Pitch extraction completed in {elapsed_time} seconds.",
"NaN": "contains NaN values and will be ignored.",
"start_extract_hubert": "Starting Embedding extraction...",
"process_error": "An error occurred during processing",
"extract_hubert_success": "Embedding extraction completed in {elapsed_time} seconds.",
"export_process": "Model path",
"extract_error": "An error occurred during data extraction",
"extract_success": "Data extraction successful",
"start_preprocess": "Starting data preprocessing with {num_processes} cores...",
"not_integer": "Voice ID folder must be an integer; instead got",
"preprocess_success": "Preprocessing completed in {elapsed_time} seconds.",
"preprocess_model_success": "Preprocessing data for the model completed successfully",
"turn_on_dereverb": "Reverb removal for backup vocals requires enabling reverb removal",
"turn_on_separator_backing": "Backup vocal separation requires enabling vocal separation",
"backing_model_ver": "Backup vocal separation model version",
"clean_audio_success": "Audio cleaned successfully!",
"separator_error": "An error occurred during music separation",
"separator_success": "Music separation completed in {elapsed_time} seconds",
"separator_process_2": "Processing music separation",
"separator_success_2": "Music separation successful!",
"separator_process_backing": "Processing backup vocal separation",
"separator_process_backing_success": "Backup vocal separation successful!",
"process_original": "Processing original vocal reverb removal...",
"process_original_success": "Original vocal reverb removal successful!",
"process_main": "Processing main vocal reverb removal...",
"process_main_success": "Main vocal reverb removal successful!",
"process_backing": "Processing backup vocal reverb removal...",
"process_backing_success": "Backup vocal reverb removal successful!",
"save_every_epoch": "Save model after: ",
"total_e": "Total epochs: ",
"dorg": "Pre-trained G: {pretrainG} | Pre-trained D: {pretrainD}",
"training_f0": "Pitch Guidance",
"not_gpu": "No GPU detected, reverting to CPU (not recommended)",
"not_found_checkpoint": "Checkpoint file not found: {checkpoint_path}",
"save_checkpoint": "Reloaded checkpoint '{checkpoint_path}' (epoch {checkpoint_dict})",
"save_model": "Saved model '{checkpoint_path}' (epoch {iteration})",
"sr_does_not_match": "{sample_rate} Sample rate does not match target {sample_rate2} Sample rate",
"time_or_speed_training": "time={current_time} | training speed={elapsed_time_str}",
"savemodel": "Saved model '{model_dir}' (epoch {epoch} and step {step})",
"model_author": "Credit model to {model_author}",
"unregistered": "Model unregistered",
"not_author": "Model not credited",
"training_author": "Model creator name",
"training_author_info": "To credit the model, enter your name here",
"extract_model_error": "An error occurred while extracting the model",
"start_training": "Starting training",
"import_pretrain": "Loaded pre-trained model ({dg}) '{pretrain}'",
"not_using_pretrain": "No pre-trained model ({dg}) will be used",
"overtraining_find": "Overtraining detected at epoch {epoch} with smoothed generator loss {smoothed_value_gen} and smoothed discriminator loss {smoothed_value_disc}",
"best_epoch": "New best epoch {epoch} with smoothed generator loss {smoothed_value_gen} and smoothed discriminator loss {smoothed_value_disc}",
"success_training": "Training completed with {epoch} epochs, {global_step} steps, and {loss_gen_all} total generator loss.",
"training_info": "Lowest generator loss: {lowest_value_rounded} at epoch {lowest_value_epoch}, step {lowest_value_step}",
"model_training_info": "{model_name} | epoch={epoch} | step={global_step} | {epoch_recorder} | lowest value={lowest_value_rounded} (epoch {lowest_value_epoch} and step {lowest_value_step}) | remaining epochs for overtraining: g/total: {remaining_epochs_gen} d/total: {remaining_epochs_disc} | smoothed generator loss={smoothed_value_gen} | smoothed discriminator loss={smoothed_value_disc}",
"model_training_info_2": "{model_name} | epoch={epoch} | step={global_step} | {epoch_recorder} | lowest value={lowest_value_rounded} (epoch {lowest_value_epoch} and step {lowest_value_step})",
"model_training_info_3": "{model_name} | epoch={epoch} | step={global_step} | {epoch_recorder}",
"training_error": "An error occurred while training the model:",
"separator_info": "Initializing with output path: {output_dir}, output format: {output_format}",
"none_ffmpeg": "FFmpeg is not installed. Please install FFmpeg to use this package.",
"running_in_cpu": "Unable to configure hardware acceleration, running in CPU mode",
"running_in_cuda": "CUDA available in Torch, setting Torch device to CUDA",
"running_in_amd": "AMD available in Torch, setting Torch device to AMD",
"onnx_have": "ONNXruntime available {have}, enabling acceleration",
"onnx_not_have": "{have} not available in ONNXruntime; acceleration will NOT be enabled",
"download_error": "Failed to download file from {url}, response code: {status_code}",
"vip_print": "Hey there, if you haven't subscribed, please consider supporting UVR's developer, Anjok07, by subscribing here: https://patreon.com/uvr",
"loading_model": "Loading model {model_filename}...",
"model_type_not_support": "Unsupported model type: {model_type}",
"starting_separator": "Starting separation process for audio file path",
"separator_success_3": "Separation process completed.",
"separator_duration": "Separation duration",
"dims": "Cannot use sin/cos position encoding with odd dimensions (dim={dims})",
"activation": "activation must be relu/gelu, not {activation}",
"length_or_training_length": "Provided length {length} exceeds training duration {training_length}",
"type_not_valid": "Invalid type for",
"del_parameter": "Removing non-existent parameter ",
"convert_shape": "Converted mix shape: {shape}",
"not_success": "Process was not successful: ",
"resample_error": "Error during resampling",
"shapes": "Shapes",
"wav_resolution": "Resolution type",
"warnings": "Warning: Extremely aggressive values detected",
"warnings_2": "Warning: NaN or infinite values detected in wave input. Shape",
"process_file": "Processing file... \n",
"save_instruments": "Saving reverse track...",
"assert": "Audio files must have the same shape - Mix: {mixshape}, Inst: {instrumentalshape}",
"rubberband": "Rubberband CLI cannot be executed. Please ensure Rubberband-CLI is installed.",
"rate": "Rate must be strictly positive",
"gdown_error": "Could not retrieve the public link for the file. You may need to change its permissions to 'Anyone with the link' or there may already be excessive access permissions.",
"gdown_value_error": "A path or ID must be specified",
"missing_url": "URL is missing",
"mac_not_match": "MAC does not match",
"file_not_access": "File is not accessible",
"int_resp==-3": "Request failed, retrying",
"search_separate": "Search for separate files...",
"found_choice": "Found {choice}",
"separator==0": "No separate files found!",
"select_separate": "Select separate files",
"start_app": "Starting interface...",
"provide_audio": "Enter the path to the audio file",
"set_torch_mps": "Set Torch device to MPS",
"googletts": "Convert text using Google",
"pitch_info_2": "Pitch adjustment for text-to-speech converter",
"waveform": "Waveform must have the shape (# frames, # channels)",
"freq_mask_smooth_hz": "freq_mask_smooth_hz must be at least {hz}Hz",
"time_mask_smooth_ms": "time_mask_smooth_ms must be at least {ms}ms",
"x": "x must be greater",
"xn": "xn must be greater",
"not_found_pid": "No processes found!",
"end_pid": "Process terminated!",
"not_found_separate_model": "No separation model files found!",
"not_found_pretrained": "No pretrained model files found!",
"not_found_log": "No log files found!",
"not_found_predictors": "No predictor model files found!",
"not_found_embedders": "No embedder model files found!",
"provide_folder": "Please provide a valid folder!",
"empty_folder": "The data folder is empty!",
"vocoder": "Vocoder",
"vocoder_info": "A vocoder analyzes and synthesizes human speech signals for voice transformation.\n\nDefault: This option is HiFi-GAN-NSF, compatible with all RVCs\n\nMRF-HiFi-GAN: Higher fidelity.\n\nRefineGAN: Superior sound quality.",
"code_error": "Error: Received status code",
"json_error": "Error: Unable to parse response.",
"requests_error": "Request failed: {e}",
"memory_efficient_training": "Using memory-efficient training",
"not_use_pretrain_error_download": "Will not use pretrained models due to missing files",
"provide_file_settings": "Please provide a preset settings file!",
"load_presets": "Loaded preset file {presets}",
"provide_filename_settings": "Please provide a preset file name!",
"choose1": "Please select one to export!",
"export_settings": "Exported preset file {name}",
"use_presets": "Using preset file",
"file_preset": "Preset file",
"load_file": "Load file",
"export_file": "Export preset file",
"save_clean": "Save cleanup",
"save_autotune": "Save autotune",
"save_pitch": "Save pitch",
"save_index_2": "Save index impact",
"save_resample": "Save resampling",
"save_filter": "Save median filter",
"save_envelope": "Save sound envelope",
"save_protect": "Save sound protection",
"save_split": "Save sound split",
"filename_to_save": "File name to save",
"upload_presets": "Upload preset file",
"stop": "Stop process",
"stop_separate": "Stop Music Separation",
"stop_convert": "Stop Conversion",
"stop_create_dataset": "Stop Dataset Creation",
"stop_training": "Stop Training",
"stop_preprocess": "Stop Data Processing",
"stop_extract": "Stop Data Extraction",
"not_found_presets": "No preset files found in the folder!",
"port": "Port {port} is unavailable! Lowering port by one...",
"empty_json": "{file}: Corrupted or empty",
"thank": "Thank you for reporting the issue, and apologies for any inconvenience caused!",
"error_read_log": "An error occurred while reading log files!",
"error_send": "An error occurred while sending the report! Please contact me on Discord: pham_huynh_anh!",
"report_bugs": "Report Bugs",
"agree_log": "Agree to provide all log files",
"error_info": "Error description",
"error_info_2": "Provide more information about the error",
"report_bug_info": "Report bugs encountered during program usage",
"sr_info": "NOTE: SOME FORMATS DO NOT SUPPORT RATES ABOVE 48000",
"report_info": "If possible, agree to provide log files to help with debugging.\n\nIf log files are not provided, please describe the error in detail, including when and where it occurred.\n\nIf this reporting system also fails, you can reach out via [ISSUE]({github}) or Discord: `pham_huynh_anh`",
"default_setting": "An error occurred during separation, resetting all settings to default...",
"dataset_folder1": "Please enter the data folder name",
"checkpointing_err": "Pretrained model parameters such as sample rate or architecture do not match the selected model.",
"start_onnx_export": "Start converting model to onnx...",
"convert_model": "Convert Model",
"pytorch2onnx": "Converting PYTORCH Model to ONNX Model",
"pytorch2onnx_markdown": "Convert RVC model from pytorch to onnx to optimize audio conversion",
"error_readfile": "An error occurred while reading the file!",
"f0_onnx_mode": "F0 ONNX Mode",
"f0_onnx_mode_info": "Extracting pitch using the ONNX model can help improve speed",
"formantshift": "Pitch and Formant Shift",
"formant_qfrency": "Frequency for Formant Shift",
"formant_timbre": "Timbre for Formant Transformation",
"time_frames": "Time (Frames)",
"Frequency": "Frequency (Hz)",
"f0_extractor_tab": "F0 Extraction",
"f0_extractor_markdown": "## Pitch Extraction",
"f0_extractor_markdown_2": "F0 pitch extraction is intended for use in audio conversion inference",
"start_extract": "Starting extraction process...",
"extract_done": "Extraction process completed!",
"f0_file": "Use pre-extracted F0 file",
"upload_f0": "Upload F0 file",
"f0_file_2": "F0 File",
"clean_f0_file": "Clean up F0 file",
"embed_mode": "Embedders Mode",
"embed_mode_info": "Extracting embeddings using different models",
"close": "The application is shutting down...",
"start_whisper": "Starting voice recognition with Whisper...",
"whisper_done": "Voice recognition complete!",
"process_audio": "Preprocessing audio...",
"process_done_start_convert": "Audio processing complete! proceeding with audio conversion...",
"convert_with_whisper": "Convert Audio With Whisper",
"convert_with_whisper_info": "Convert audio using a trained speech model with a Whisper model for speech recognition\n\nWhisper will recognize different voices then cut the individual voices and use the RVC model to convert those segments\n\nThe Whisper model may not work properly which may cause strange output",
"num_spk": "Number of voices",
"num_spk_info": "Number of voices in the audio",
"model_size": "Whisper model size",
"model_size_info": "Whisper model size\n\nLarge models can produce strange outputs",
"title": "Simple high-quality and high-performance voice and instrument conversion and training tool for Vietnamese people",
"fp16_not_support": "CPU, MPS and OCL does not support fp16 well, convert fp16 -> fp32",
"precision": "Precision",
"precision_info": "Precision of inference and model training\n\nNote: CPU Does not support fp16",
"update_precision": "Update Precision",
"start_update_precision": "Start updating precision",
"deterministic": "Deterministic algorithm",
"deterministic_info": "When enabled, highly deterministic algorithms are used, ensuring that each run of the same input data will yield the same results.\n\nWhen disabled, more optimal algorithms may be selected but may not be fully deterministic, resulting in different training results between runs.",
"benchmark": "Benchmark algorithm",
"benchmark_info": "When enabled, it will test and select the most optimized algorithm for the specific hardware and size. This can help speed up training.\n\nWhen disabled, it will not perform this algorithm optimization, which can reduce speed but ensures that each run uses the same algorithm, which is useful if you want to reproduce exactly.",
"font": "Font",
"font_info": "Interface font\n\nVisit [Google Font](https://fonts.google.com) to choose your favorite font.",
"change_font": "Change Font",
"f0_unlock": "Unlock all",
"f0_unlock_info": "Unlock all pitch extraction methods",
"srt": "SRT file is empty or corrupt!",
"optimizer": "Optimizer",
"optimizer_info": "Optimizer in training, AdamW is default, RAdam is another optimizer",
"main_volume": "Main audio file volume",
"main_volume_info": "Main audio file volume. Should be between -4 and 0.",
"combination_volume": "Combination audio file volume",
"combination_volume_info": "Combination audio file volume. Should keep the volume of the combination file lower than the main audio.",
"inference": "Inference",
"extra": "Extra",
"running_local_url": "Running Interface On Local Url",
"running_share_url": "Running Interface On Public Url",
"translate": "Translate",
"source_lang": "Input language",
"target_lang": "Output language",
"prompt_warning": "Please enter text to start translating!",
"read_error": "An error occurred while reading the text file!",
"quirk": "Quirk Effects",
"quirk_info": "## Weird Effects for Audio",
"quirk_label": "Quirk effects",
"quirk_label_info": "Quirk effects that can be used to apply to audio",
"quirk_markdown": "Apply quirky effects to your audio to make it weird and weird.",
"gradio_start": "Interface loaded successfully after",
"quirk_choice": {"Random": 0, "Voice Crack": 1, "Horror": 2, "Robot": 3, "Baby": 4, "Depression": 5, "Voice Jerking": 6, "Oldster": 7, "Echo": 8, "Devil": 9, "Distorted Voice": 10, "Online Sales": 11, "Drag": 12, "Uncomfortable": 13, "Noise": 14, "Connectivity Issue": 15, "Disorder": 16},
"proposal_pitch": "Automatically propose pitch",
"hybrid_calc": "Hybrid calculation for method: {f0_method}...",
"proposal_f0": "Proposed pitch: {up_key}",
"startautotune": "Start autotune pitch...",
"proposal_pitch_threshold": "Proposal Pitch Threshold",
"proposal_pitch_threshold_info": "Proposal Pitch Threshold, for male models use 155.0 and female models use 255.0",
"rms_start_extract": "Starting audio energy extraction with {num_processes} cores...",
"rms_success_extract": "Energy extraction completed in {elapsed_time} seconds.",
"train&energy": "Training with energy",
"train&energy_info": "Training model with RMS energy",
"editing": "Editing",
"check_assets_error": "Downloading assets failed {count} times in a row! Please download manually and place in the assets folder: https://huggingface.co/AnhP/Vietnamese-RVC-Project"
} |