pershyfon commited on
Commit
2308400
·
verified ·
1 Parent(s): 0e7a2bd

Create su_id_asr_split.py

Browse files
Files changed (1) hide show
  1. su_id_asr_split.py +150 -0
su_id_asr_split.py ADDED
@@ -0,0 +1,150 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import csv
2
+ import os
3
+ from typing import Dict, List
4
+
5
+ import datasets
6
+
7
+ from seacrowd.utils import schemas
8
+ from seacrowd.utils.configs import SEACrowdConfig
9
+ from seacrowd.utils.constants import (DEFAULT_SEACROWD_VIEW_NAME,
10
+ DEFAULT_SOURCE_VIEW_NAME, Tasks)
11
+
12
+ _DATASETNAME = "su_id_asr"
13
+ _SOURCE_VIEW_NAME = DEFAULT_SOURCE_VIEW_NAME
14
+ _UNIFIED_VIEW_NAME = DEFAULT_SEACROWD_VIEW_NAME
15
+
16
+ _LANGUAGES = ["sun"]
17
+ _LOCAL = False
18
+ _CITATION = """\
19
+ @inproceedings{sodimana18_sltu,
20
+ author={Keshan Sodimana and Pasindu {De Silva} and Supheakmungkol Sarin and Oddur Kjartansson and Martin Jansche and Knot Pipatsrisawat and Linne Ha},
21
+ title={{A Step-by-Step Process for Building TTS Voices Using Open Source Data and Frameworks for Bangla, Javanese, Khmer, Nepali, Sinhala, and Sundanese}},
22
+ year=2018,
23
+ booktitle={Proc. 6th Workshop on Spoken Language Technologies for Under-Resourced Languages (SLTU 2018)},
24
+ pages={66--70},
25
+ doi={10.21437/SLTU.2018-14}
26
+ }
27
+ """
28
+
29
+ _DESCRIPTION = """\
30
+ Sundanese ASR training data set containing ~220K utterances.
31
+ This dataset was collected by Google in Indonesia.
32
+ """
33
+
34
+ _HOMEPAGE = "https://indonlp.github.io/nusa-catalogue/card.html?su_id_asr"
35
+
36
+ _LICENSE = "Attribution-ShareAlike 4.0 International."
37
+
38
+ _URLs = {
39
+ "su_id_asr_test": "https://univindonesia-my.sharepoint.com/:u:/g/personal/sabrina_aviana_office_ui_ac_id/Eds4-Yht811OrI56EdLSEpkBg4SuudSazxSRmSo3d_SqLg?e=WMuSdC&download=1",
40
+ "su_id_asr_val": "https://univindonesia-my.sharepoint.com/:u:/g/personal/sabrina_aviana_office_ui_ac_id/EamAbFjx5J1AjUO1apFjFDwBuoMfPeB1oB_vsL6YxxGjkw?e=vFdjzW&download=1",
41
+ "su_id_asr_train": "https://univindonesia-my.sharepoint.com/:u:/g/personal/sabrina_aviana_office_ui_ac_id/ESwkEg0Fx_FKhoZbUZx6qPABZFXO8LpEEcUZGisLhrzTQQ?e=tzyFwr&download=1",
42
+ }
43
+
44
+ _SUPPORTED_TASKS = [Tasks.SPEECH_RECOGNITION]
45
+
46
+ _SOURCE_VERSION = "1.0.0"
47
+ _SEACROWD_VERSION = "2024.06.20"
48
+
49
+
50
+ class SuIdASR(datasets.GeneratorBasedBuilder):
51
+ """su_id contains ~220K utterances for Sundanese ASR training data."""
52
+
53
+ BUILDER_CONFIGS = [
54
+ SEACrowdConfig(
55
+ name="su_id_asr_source",
56
+ version=datasets.Version(_SOURCE_VERSION),
57
+ description="SU_ID_ASR source schema",
58
+ schema="source",
59
+ subset_id="su_id_asr",
60
+ ),
61
+ SEACrowdConfig(
62
+ name="su_id_asr_seacrowd_sptext",
63
+ version=datasets.Version(_SEACROWD_VERSION),
64
+ description="SU_ID_ASR Nusantara schema",
65
+ schema="seacrowd_sptext",
66
+ subset_id="su_id_asr",
67
+ ),
68
+ ]
69
+
70
+ DEFAULT_CONFIG_NAME = "su_id_asr_source"
71
+
72
+ def _info(self):
73
+ if self.config.schema == "source":
74
+ features = datasets.Features(
75
+ {
76
+ "id": datasets.Value("string"),
77
+ "speaker_id": datasets.Value("string"),
78
+ "path": datasets.Value("string"),
79
+ "audio": datasets.Audio(sampling_rate=16_000),
80
+ "text": datasets.Value("string"),
81
+ }
82
+ )
83
+ elif self.config.schema == "seacrowd_sptext":
84
+ features = schemas.speech_text_features
85
+
86
+ return datasets.DatasetInfo(
87
+ description=_DESCRIPTION,
88
+ features=features,
89
+ homepage=_HOMEPAGE,
90
+ license=_LICENSE,
91
+ citation=_CITATION,
92
+ task_templates=[datasets.AutomaticSpeechRecognition(audio_column="audio", transcription_column="text")],
93
+ )
94
+
95
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
96
+ return [
97
+ datasets.SplitGenerator(
98
+ name=datasets.Split.TRAIN,
99
+ gen_kwargs={"filepath": dl_manager.download_and_extract(_URLs["su_id_asr_train"])},
100
+ ),
101
+ datasets.SplitGenerator(
102
+ name=datasets.Split.VALIDATION,
103
+ gen_kwargs={"filepath": dl_manager.download_and_extract(_URLs["su_id_asr_dev"])},
104
+ ),
105
+ datasets.SplitGenerator(
106
+ name=datasets.Split.TEST,
107
+ gen_kwargs={"filepath": dl_manager.download_and_extract(_URLs["su_id_asr_test"])},
108
+ )
109
+ ]
110
+
111
+ def _generate_examples(self, filepath: str):
112
+
113
+ if self.config.schema == "source" or self.config.schema == "seacrowd_sptext":
114
+
115
+ tsv_file = os.path.join(filepath, "asr_sundanese", "utt_spk_text.tsv")
116
+
117
+ with open(tsv_file, "r") as file:
118
+ tsv_file = csv.reader(file, delimiter="\t")
119
+
120
+ for line in tsv_file:
121
+ audio_id, speaker_id, transcription_text = line[0], line[1], line[2]
122
+
123
+ wav_path = os.path.join(filepath, "asr_sundanese", "data", "{}".format(audio_id[:2]), "{}.flac".format(audio_id))
124
+
125
+ if os.path.exists(wav_path):
126
+ if self.config.schema == "source":
127
+ ex = {
128
+ "id": audio_id,
129
+ "speaker_id": speaker_id,
130
+ "path": wav_path,
131
+ "audio": wav_path,
132
+ "text": transcription_text,
133
+ }
134
+ yield audio_id, ex
135
+ elif self.config.schema == "seacrowd_sptext":
136
+ ex = {
137
+ "id": audio_id,
138
+ "speaker_id": speaker_id,
139
+ "path": wav_path,
140
+ "audio": wav_path,
141
+ "text": transcription_text,
142
+ "metadata": {
143
+ "speaker_age": None,
144
+ "speaker_gender": None,
145
+ },
146
+ }
147
+ yield audio_id, ex
148
+
149
+ else:
150
+ raise ValueError(f"Invalid config: {self.config.name}")