bagasshw commited on
Commit
fb84fc1
·
verified ·
1 Parent(s): f1fda8d

Create cleaned_jv_openslr_10k.py

Browse files
Files changed (1) hide show
  1. cleaned_jv_openslr_10k.py +172 -0
cleaned_jv_openslr_10k.py ADDED
@@ -0,0 +1,172 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import csv
17
+ import os
18
+ from pathlib import Path
19
+ from typing import List
20
+
21
+ import datasets
22
+
23
+ from seacrowd.utils import schemas
24
+ from seacrowd.utils.configs import SEACrowdConfig
25
+ from seacrowd.utils.constants import Tasks
26
+
27
+ _CITATION = """\
28
+ @inproceedings{kjartansson-etal-sltu2018,
29
+ title = {{Crowd-Sourced Speech Corpora for Javanese, Sundanese, Sinhala, Nepali, and Bangladeshi Bengali}},
30
+ author = {Oddur Kjartansson and Supheakmungkol Sarin and Knot Pipatsrisawat and Martin Jansche and Linne Ha},
31
+ booktitle = {Proc. The 6th Intl. Workshop on Spoken Language Technologies for Under-Resourced Languages (SLTU)},
32
+ year = {2018},
33
+ address = {Gurugram, India},
34
+ month = aug,
35
+ pages = {52--55},
36
+ URL = {http://dx.doi.org/10.21437/SLTU.2018-11},
37
+ }
38
+ """
39
+
40
+ _DATASETNAME = "jv_id_asr"
41
+
42
+ _DESCRIPTION = """\
43
+ This data set contains transcribed audio data for Javanese. The data set consists of wave files, and a TSV file.
44
+ The file utt_spk_text.tsv contains a FileID, UserID and the transcription of audio in the file.
45
+ The data set has been manually quality checked, but there might still be errors.
46
+ This dataset was collected by Google in collaboration with Reykjavik University and Universitas Gadjah Mada in Indonesia.
47
+ """
48
+
49
+ _HOMEPAGE = "http://openslr.org/35/"
50
+ _LANGUAGES = ["jav"]
51
+ _LOCAL = False
52
+
53
+ _LICENSE = "Attribution-ShareAlike 4.0 International"
54
+
55
+ _URLs = {
56
+ "jv_id_asr_train": "https://univindonesia-my.sharepoint.com/:u:/g/personal/bimasena_putra_office_ui_ac_id/EV7Bg0Ik0t5OmlQhDSsiW84BXgTTJwEgqnuv1E-RAzcYfw?e=46bm2H&download=1",
57
+ "jv_id_asr_dev": "https://univindonesia-my.sharepoint.com/:u:/g/personal/bimasena_putra_office_ui_ac_id/EfnpOTriq6VNiiPpYYoV5TEBIhHE77NNSo2jcM-dXvJyiQ?e=SFtYUY&download=1",
58
+ "jv_id_asr_test": "https://univindonesia-my.sharepoint.com/:u:/g/personal/bimasena_putra_office_ui_ac_id/EQWc1L4e5RVJuwgg4BZssooBqn8cxAlbwsHdJG-_OUBpVQ?e=tcqwYc&download=1",
59
+ }
60
+
61
+ _SUPPORTED_TASKS = [Tasks.SPEECH_RECOGNITION] # example: [Tasks.TRANSLATION, Tasks.NAMED_ENTITY_RECOGNITION, Tasks.RELATION_EXTRACTION]
62
+
63
+ _SOURCE_VERSION = "1.0.0"
64
+
65
+ _SEACROWD_VERSION = "2024.06.20"
66
+
67
+
68
+ class JvIdASR(datasets.GeneratorBasedBuilder):
69
+ """Javanese ASR training data set containing ~185K utterances."""
70
+
71
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
72
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
73
+
74
+ BUILDER_CONFIGS = [
75
+ SEACrowdConfig(
76
+ name="jv_id_asr_source",
77
+ version=SOURCE_VERSION,
78
+ description="jv_id_asr source schema",
79
+ schema="source",
80
+ subset_id="jv_id_asr",
81
+ ),
82
+ SEACrowdConfig(
83
+ name="jv_id_asr_seacrowd_sptext",
84
+ version=SEACROWD_VERSION,
85
+ description="jv_id_asr Nusantara schema",
86
+ schema="seacrowd_sptext",
87
+ subset_id="jv_id_asr",
88
+ ),
89
+ ]
90
+
91
+ DEFAULT_CONFIG_NAME = "jv_id_asr_source"
92
+
93
+ def _info(self) -> datasets.DatasetInfo:
94
+ if self.config.schema == "source":
95
+ features = datasets.Features(
96
+ {
97
+ "id": datasets.Value("string"),
98
+ "speaker_id": datasets.Value("string"),
99
+ "path": datasets.Value("string"),
100
+ "audio": datasets.Audio(sampling_rate=16_000),
101
+ "text": datasets.Value("string"),
102
+ }
103
+ )
104
+ elif self.config.schema == "seacrowd_sptext":
105
+ features = schemas.speech_text_features
106
+
107
+ return datasets.DatasetInfo(
108
+ description=_DESCRIPTION,
109
+ features=features,
110
+ homepage=_HOMEPAGE,
111
+ license=_LICENSE,
112
+ citation=_CITATION,
113
+ )
114
+
115
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
116
+ return [
117
+ datasets.SplitGenerator(
118
+ name=datasets.Split.TRAIN,
119
+ gen_kwargs={"filepath": dl_manager.download_and_extract(_URLs["jv_id_asr_train"])},
120
+ ),
121
+ datasets.SplitGenerator(
122
+ name=datasets.Split.VALIDATION,
123
+ gen_kwargs={"filepath": dl_manager.download_and_extract(_URLs["jv_id_asr_dev"])},
124
+ ),
125
+ datasets.SplitGenerator(
126
+ name=datasets.Split.TEST,
127
+ gen_kwargs={"filepath": dl_manager.download_and_extract(_URLs["jv_id_asr_test"])},
128
+ )
129
+ ]
130
+
131
+ def _generate_examples(self, filepath: str):
132
+ # Load the DatasetDict arrow file
133
+ filtered_dataset = datasets.load_from_disk("jv_dataset_2") # Replace with the actual path
134
+
135
+ # Combine IDs from all splits into a set
136
+ filtered_ids = set()
137
+ for split in ["train", "validation", "test"]:
138
+ filtered_ids.update(filtered_dataset[split]["id"])
139
+
140
+ # Path to the TSV file
141
+ tsv_file = os.path.join(filepath, "asr_javanese", "utt_spk_text.tsv")
142
+ with open(tsv_file, "r") as f:
143
+ tsv_file = csv.reader(f, delimiter="\t")
144
+ for line in tsv_file:
145
+ audio_id, sp_id, text = line[0], line[1], line[2]
146
+ wav_path = os.path.join(filepath, "asr_javanese", "data", "{}".format(audio_id[:2]), "{}.flac".format(audio_id))
147
+
148
+ # Filter based on the IDs
149
+ if audio_id in filtered_ids and os.path.exists(wav_path):
150
+ if self.config.schema == "source":
151
+ ex = {
152
+ "id": audio_id,
153
+ "speaker_id": sp_id,
154
+ "path": wav_path,
155
+ "audio": wav_path,
156
+ "text": text,
157
+ }
158
+ yield audio_id, ex
159
+ elif self.config.schema == "seacrowd_sptext":
160
+ ex = {
161
+ "id": audio_id,
162
+ "speaker_id": sp_id,
163
+ "path": wav_path,
164
+ "audio": wav_path,
165
+ "text": text,
166
+ "metadata": {
167
+ "speaker_age": None,
168
+ "speaker_gender": None,
169
+ },
170
+ }
171
+ yield audio_id, ex
172
+ f.close()