|
|
|
import datasets |
|
import os |
|
import logging |
|
|
|
|
|
logging.basicConfig(level=logging.INFO, format="%(levelname)s: %(message)s") |
|
|
|
|
|
_ORIGINAL_VIVOS_CITATION = """\ |
|
@inproceedings{vivos-dataset-2017, |
|
title = {VIVOS - A Vietnamese Voice Corpus for Speech Synthesis and Speech Recognition}, |
|
author = {Nguyen, Dat Quoc and Nguyen, Bach Xuan and Do, Luan Thanh and Nguyen, Chi Mai and Pham, Hung Duy and Nguyen, Tuan Anh}, |
|
booktitle = {Proceedings of the 8th International Conference on Language and Automata Theory and Applications (LATA 2017)}, |
|
year = {2017}, |
|
pages = {87--96}, |
|
publisher = {Springer International Publishing}, |
|
url = {https://link.springer.com/chapter/10.1007/978-3-319-53733-7_8} |
|
} |
|
""" |
|
|
|
_MCP_CLOUDWORDS_PROCESSED_CITATION = """\ |
|
@misc{mcp_cloudwords_vivos_processed_2025, |
|
author = {MCP Cloudwords}, |
|
title = {MCP Cloudwords Processed Version of the VIVOS ASR Dataset for Vietnamese}, |
|
year = {2025}, |
|
howpublished = {Dataset available on Hugging Face Hub at [TODO: YOUR_USERNAME/YOUR_DATASET_NAME_ON_HUB]} |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
This dataset is a processed version of the VIVOS ASR Vietnamese speech corpus (original source: https://ailab.hcmus.edu.vn/vivos), |
|
prepared by MCP Cloudwords for use in Automatic Speech Recognition (ASR) tasks. |
|
|
|
The VIVOS corpus is a valuable public resource for Vietnamese speech processing. |
|
This MCP Cloudwords version aims to make the VIVOS data more readily usable by providing: |
|
- Original audio files in WAV format, organized into 'train' and 'test' splits. |
|
- Corresponding transcriptions. |
|
- Metadata files ('train_meta.txt', 'test_meta.txt') mapping audio files to transcriptions and durations. |
|
These files use relative paths to the audio files. |
|
|
|
Key processing steps by MCP Cloudwords: |
|
- Generation of 'train_meta.txt' and 'test_meta.txt' from original VIVOS 'prompts.txt'. |
|
- Calculation and inclusion of audio durations. |
|
- Conversion of transcriptions to uppercase. |
|
|
|
Users should cite the original VIVOS dataset and acknowledge MCP Cloudwords' processing. |
|
""" |
|
|
|
_HOMEPAGE = "Original VIVOS: https://ailab.hcmus.edu.vn/vivos\nProcessed by MCP Cloudwords: [TODO: YOUR_DATASET_URL_ON_HUB]" |
|
_LICENSE = "CC BY-NC-SA 4.0 (Please verify with original VIVOS license)" |
|
|
|
|
|
|
|
|
|
class VivosASRVi(datasets.GeneratorBasedBuilder): |
|
"""VIVOS Vietnamese ASR Dataset, processed by MCP Cloudwords.""" |
|
|
|
VERSION = datasets.Version("1.0.0") |
|
|
|
|
|
|
|
|
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig( |
|
name="default", |
|
version=VERSION, |
|
description="Processed version of VIVOS ASR dataset by MCP Cloudwords." |
|
) |
|
] |
|
|
|
|
|
DEFAULT_CONFIG_NAME = "default" |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
"audio": datasets.Audio(sampling_rate=16000), |
|
"transcription": datasets.Value("string"), |
|
"duration": datasets.Value("float32"), |
|
"speaker_id": datasets.Value("string"), |
|
"file_id": datasets.Value("string"), |
|
} |
|
), |
|
supervised_keys=("audio", "transcription"), |
|
homepage=_HOMEPAGE, |
|
license=_LICENSE, |
|
citation=f"{_ORIGINAL_VIVOS_CITATION}\n\n{_MCP_CLOUDWORDS_PROCESSED_CITATION}", |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
base_path = dl_manager.manual_dir or "." |
|
logging.info(f"Using base_path for splits: {os.path.abspath(base_path)}") |
|
|
|
train_meta = os.path.join(base_path, "train_meta.txt") |
|
test_meta = os.path.join(base_path, "test_meta.txt") |
|
|
|
if not os.path.exists(train_meta): |
|
raise FileNotFoundError( |
|
f"Required metadata file 'train_meta.txt' not found at {os.path.abspath(train_meta)}") |
|
if not os.path.exists(test_meta): |
|
raise FileNotFoundError(f"Required metadata file 'test_meta.txt' not found at {os.path.abspath(test_meta)}") |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={"meta_filepath": train_meta, "base_data_path": base_path}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
gen_kwargs={"meta_filepath": test_meta, "base_data_path": base_path}, |
|
), |
|
] |
|
|
|
def _generate_examples(self, meta_filepath, base_data_path): |
|
logging.info(f"Generating examples from: {meta_filepath}") |
|
processed_count = 0 |
|
error_count = 0 |
|
with open(meta_filepath, encoding="utf-8") as f: |
|
for uid, line in enumerate(f): |
|
try: |
|
rel_path, transcription, duration_str = line.strip().split("|", 2) |
|
duration = float(duration_str) |
|
abs_path = os.path.join(base_data_path, rel_path) |
|
|
|
if not os.path.exists(abs_path): |
|
logging.warning(f"Audio file not found: {abs_path}. Skipping line: {line.strip()}") |
|
error_count += 1 |
|
continue |
|
|
|
path_parts = rel_path.split('/') |
|
speaker_id = path_parts[-2] if len(path_parts) >= 2 else "unknown_speaker" |
|
file_id = os.path.splitext(path_parts[-1])[0] |
|
|
|
yield uid, { |
|
"audio": abs_path, |
|
"transcription": transcription, |
|
"duration": duration, |
|
"speaker_id": speaker_id, |
|
"file_id": file_id, |
|
} |
|
processed_count += 1 |
|
except ValueError as e: |
|
logging.error(f"Invalid format in line: {line.strip()}. Error: {e}. Skipping.") |
|
error_count += 1 |
|
except Exception as e: |
|
logging.error(f"Unexpected error processing line: {line.strip()}. Error: {e}. Skipping.") |
|
error_count += 1 |
|
logging.info( |
|
f"Finished generating examples from {meta_filepath}. Processed: {processed_count}, Errors/Skipped: {error_count}") |
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
current_dataset_dir = os.path.dirname(os.path.abspath(__file__)) |
|
print(f"Loading dataset from: {current_dataset_dir}") |
|
print("Ensure 'train_meta.txt' and 'test_meta.txt' exist in this directory, and audio files are correctly pathed.") |
|
|
|
|
|
|
|
config_to_load = "default" |
|
|
|
|
|
|
|
|
|
print(f"--- Attempting to load with config_name: '{config_to_load if config_to_load else 'implicit default'}' ---") |
|
|
|
for split in ["train", "test"]: |
|
try: |
|
print( |
|
f"\nAttempting to load '{split}' split with config '{config_to_load if config_to_load else 'implicit default'}'...") |
|
dataset_params = { |
|
"path": current_dataset_dir, |
|
"split": split, |
|
"trust_remote_code": True |
|
} |
|
if config_to_load: |
|
dataset_params["name"] = config_to_load |
|
|
|
dataset = datasets.load_dataset(**dataset_params) |
|
|
|
print(f"SUCCESS: Loaded '{split}' split successfully!") |
|
print(f"Number of samples in '{split}': {len(dataset)}") |
|
if len(dataset) > 0: |
|
print(f"First sample in '{split}':") |
|
print(dataset[0]) |
|
except FileNotFoundError as e: |
|
print(f"ERROR: FileNotFoundError while loading '{split}': {e}") |
|
except Exception as e: |
|
print( |
|
f"ERROR: An unexpected error occurred loading '{split}' with config '{config_to_load if config_to_load else 'implicit default'}': {e}") |
|
import traceback |
|
|
|
traceback.print_exc() |