init commit
Browse files- librispeech-phones-and-mel.py +159 -0
- librispeech_phones_and_mel.py +0 -364
librispeech-phones-and-mel.py
ADDED
@@ -0,0 +1,159 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""LibriSpeech dataset with phone alignments, prosody and mel spectrograms."""
|
2 |
+
|
3 |
+
import os
|
4 |
+
from pathlib import Path
|
5 |
+
import hashlib
|
6 |
+
import pickle
|
7 |
+
|
8 |
+
import datasets
|
9 |
+
import pandas as pd
|
10 |
+
import numpy as np
|
11 |
+
from tqdm.contrib.concurrent import process_map
|
12 |
+
from tqdm.auto import tqdm
|
13 |
+
from multiprocessing import cpu_count
|
14 |
+
from PIL import Image
|
15 |
+
|
16 |
+
logger = datasets.logging.get_logger(__name__)
|
17 |
+
|
18 |
+
_VERSION = "0.0.1"
|
19 |
+
|
20 |
+
_CITATION = """\
|
21 |
+
@inproceedings{panayotov2015librispeech,
|
22 |
+
title={Librispeech: an asr corpus based on public domain audio books},
|
23 |
+
author={Panayotov, Vassil and Chen, Guoguo and Povey, Daniel and Khudanpur, Sanjeev},
|
24 |
+
booktitle={2015 IEEE international conference on acoustics, speech and signal processing (ICASSP)},
|
25 |
+
pages={5206--5210},
|
26 |
+
year={2015},
|
27 |
+
organization={IEEE}
|
28 |
+
}
|
29 |
+
"""
|
30 |
+
|
31 |
+
_DESCRIPTION = """\
|
32 |
+
Dataset containing Mel Spectrograms, Prosody and Phone Alignments for the LibriSpeech dataset.
|
33 |
+
"""
|
34 |
+
|
35 |
+
_URLS = {
|
36 |
+
"dev.clean": "https://huggingface.co/datasets/cdminix/librispeech-phones-and-mel/resolve/main/data/dev_clean.tar.gz",
|
37 |
+
"dev.other": "https://huggingface.co/datasets/cdminix/librispeech-phones-and-mel/resolve/main/data/dev_other.tar.gz",
|
38 |
+
"test.clean": "https://huggingface.co/datasets/cdminix/librispeech-phones-and-mel/resolve/main/data/test_clean.tar.gz",
|
39 |
+
"test.other": "https://huggingface.co/datasets/cdminix/librispeech-phones-and-mel/resolve/main/data/test_other.tar.gz",
|
40 |
+
"train.clean.100": "https://huggingface.co/datasets/cdminix/librispeech-phones-and-mel/resolve/main/data/train_clean_100.tar.gz",
|
41 |
+
"train.clean.360": "https://huggingface.co/datasets/cdminix/librispeech-phones-and-mel/resolve/main/data/train_clean_360.tar.gz",
|
42 |
+
"train.other.500": "https://huggingface.co/datasets/cdminix/librispeech-phones-and-mel/resolve/main/data/train_other_500.tar.gz",
|
43 |
+
}
|
44 |
+
|
45 |
+
|
46 |
+
class LibrispeechConfig(datasets.BuilderConfig):
|
47 |
+
"""BuilderConfig for LibriTTSAlign."""
|
48 |
+
|
49 |
+
def __init__(self, **kwargs):
|
50 |
+
"""BuilderConfig for LibriTTSAlign.
|
51 |
+
|
52 |
+
Args:
|
53 |
+
**kwargs: keyword arguments forwarded to super.
|
54 |
+
"""
|
55 |
+
super(LibrispeechConfig, self).__init__(**kwargs)
|
56 |
+
|
57 |
+
|
58 |
+
class Librispeech(datasets.GeneratorBasedBuilder):
|
59 |
+
"""LibriTTSAlign dataset."""
|
60 |
+
|
61 |
+
BUILDER_CONFIGS = [
|
62 |
+
LibrispeechConfig(
|
63 |
+
name="libritts",
|
64 |
+
version=datasets.Version(_VERSION, ""),
|
65 |
+
),
|
66 |
+
]
|
67 |
+
|
68 |
+
def _info(self):
|
69 |
+
features = {
|
70 |
+
"id": datasets.Value("string"),
|
71 |
+
"speaker_id": datasets.Value("string"),
|
72 |
+
"chapter_id": datasets.Value("string"),
|
73 |
+
"phones": datasets.Value("string"),
|
74 |
+
"mel": datasets.Value("string"),
|
75 |
+
"prosody": datasets.Value("string"),
|
76 |
+
"speaker_utterance": datasets.Value("string"),
|
77 |
+
"mean_speaker_utterance": datasets.Value("string"),
|
78 |
+
"mean_speaker": datasets.Value("string"),
|
79 |
+
}
|
80 |
+
|
81 |
+
return datasets.DatasetInfo(
|
82 |
+
description=_DESCRIPTION,
|
83 |
+
features=datasets.Features(features),
|
84 |
+
supervised_keys=None,
|
85 |
+
homepage="https://huggingface.co/datasets/cdminix/librispeech-phones-and-mel",
|
86 |
+
citation=_CITATION,
|
87 |
+
task_templates=None,
|
88 |
+
)
|
89 |
+
|
90 |
+
def _split_generators(self, dl_manager):
|
91 |
+
splits = [
|
92 |
+
datasets.SplitGenerator(
|
93 |
+
name=key,
|
94 |
+
gen_kwargs={"data_path": dl_manager.download_and_extract(value)},
|
95 |
+
)
|
96 |
+
for key, value in _URLS.items()
|
97 |
+
]
|
98 |
+
|
99 |
+
return splits
|
100 |
+
|
101 |
+
def _df_from_path(self, path):
|
102 |
+
mel_path = Path(path)
|
103 |
+
prosody_path = Path(str(mel_path).replace("_mel.png", "_prosody.png"))
|
104 |
+
phones_path = Path(str(mel_path).replace("_mel.png", "_phones.npy"))
|
105 |
+
overall_speaker_path = Path(str(mel_path).replace("_mel.png", "_speaker.npy"))
|
106 |
+
temporal_speaker_path = Path(str(mel_path).replace("_mel.png", "_speaker.png"))
|
107 |
+
mean_speaker_path = mel_path.parent.parent / "mean_speaker.npy"
|
108 |
+
|
109 |
+
speaker_id = mel_path.parent.parent.name
|
110 |
+
chapter_id = mel_path.parent.name
|
111 |
+
_id = str(mel_path).replace("_mel.png", "")
|
112 |
+
|
113 |
+
return {
|
114 |
+
"id": _id,
|
115 |
+
"speaker_id": speaker_id,
|
116 |
+
"chapter_id": chapter_id,
|
117 |
+
"phones": phones_path,
|
118 |
+
"mel": mel_path,
|
119 |
+
"prosody": prosody_path,
|
120 |
+
"speaker_utterance": temporal_speaker_path,
|
121 |
+
"mean_speaker_utterance": overall_speaker_path,
|
122 |
+
"mean_speaker": mean_speaker_path,
|
123 |
+
}
|
124 |
+
|
125 |
+
def _df_from_paths_mp(self, paths):
|
126 |
+
return pd.DataFrame(
|
127 |
+
process_map(
|
128 |
+
self._df_from_path,
|
129 |
+
paths,
|
130 |
+
desc="Reading files",
|
131 |
+
max_workers=cpu_count(),
|
132 |
+
chunksize=100,
|
133 |
+
)
|
134 |
+
)
|
135 |
+
|
136 |
+
def _create_mean_speaker(self, df):
|
137 |
+
# Create mean speaker for each speaker
|
138 |
+
for _, speaker_df in df.groupby("speaker_id"):
|
139 |
+
if not Path(speaker_df["mean_speaker"].iloc[0]).exists():
|
140 |
+
mean_speaker = np.mean(
|
141 |
+
np.array(
|
142 |
+
[np.load(path) for path in speaker_df["mean_speaker_utterance"]]
|
143 |
+
),
|
144 |
+
axis=0,
|
145 |
+
)
|
146 |
+
np.save(speaker_df["mean_speaker"].iloc[0], mean_speaker)
|
147 |
+
|
148 |
+
def _generate_examples(self, data_path):
|
149 |
+
"""Generate examples."""
|
150 |
+
logger.info("⏳ Generating examples from = %s", data_path)
|
151 |
+
|
152 |
+
paths = sorted(list(Path(data_path).rglob("*_mel.png")))
|
153 |
+
|
154 |
+
df = self._df_from_paths_mp(paths)
|
155 |
+
|
156 |
+
self._create_mean_speaker(df)
|
157 |
+
|
158 |
+
for i, row in tqdm(df.iterrows(), desc="Generating examples"):
|
159 |
+
yield row["id"], row.to_dict()
|
librispeech_phones_and_mel.py
DELETED
@@ -1,364 +0,0 @@
|
|
1 |
-
"""LibriSpeech dataset with phone alignments, prosody and mel spectrograms."""
|
2 |
-
|
3 |
-
import os
|
4 |
-
from pathlib import Path
|
5 |
-
import hashlib
|
6 |
-
import pickle
|
7 |
-
|
8 |
-
import datasets
|
9 |
-
import pandas as pd
|
10 |
-
import numpy as np
|
11 |
-
from tqdm.contrib.concurrent import process_map
|
12 |
-
from tqdm.auto import tqdm
|
13 |
-
from multiprocessing import cpu_count
|
14 |
-
|
15 |
-
logger = datasets.logging.get_logger(__name__)
|
16 |
-
|
17 |
-
_VERSION = "0.0.1"
|
18 |
-
|
19 |
-
_CITATION = """\
|
20 |
-
|
21 |
-
"""
|
22 |
-
|
23 |
-
_DESCRIPTION = """\
|
24 |
-
Dataset containing Mel Spectrograms, Prosody and Phone Alignments for the LibriSpeech dataset.
|
25 |
-
"""
|
26 |
-
|
27 |
-
|
28 |
-
def get_speaker_prompts(speaker, hash_ds):
|
29 |
-
ds = hash_ds.df
|
30 |
-
speaker_prompts = ds[ds["speaker"] == speaker]
|
31 |
-
speaker_prompts = tuple(speaker_prompts["audio"])
|
32 |
-
return speaker_prompts
|
33 |
-
|
34 |
-
|
35 |
-
class LibriTTSAlignConfig(datasets.BuilderConfig):
|
36 |
-
"""BuilderConfig for LibriTTSAlign."""
|
37 |
-
|
38 |
-
def __init__(self, sampling_rate=22050, hop_length=256, win_length=1024, **kwargs):
|
39 |
-
"""BuilderConfig for LibriTTSAlign.
|
40 |
-
|
41 |
-
Args:
|
42 |
-
**kwargs: keyword arguments forwarded to super.
|
43 |
-
"""
|
44 |
-
super(LibriTTSAlignConfig, self).__init__(**kwargs)
|
45 |
-
|
46 |
-
self.sampling_rate = sampling_rate
|
47 |
-
self.hop_length = hop_length
|
48 |
-
self.win_length = win_length
|
49 |
-
|
50 |
-
if _PATH is None:
|
51 |
-
raise ValueError(
|
52 |
-
"Please set the environment variable LIBRITTS_PATH to point to the LibriTTS dataset directory."
|
53 |
-
)
|
54 |
-
elif _PATH == os.environ.get("HF_DATASETS_CACHE", None):
|
55 |
-
logger.warning(
|
56 |
-
"Please set the environment variable LIBRITTS_PATH to point to the LibriTTS dataset directory. Using HF_DATASETS_CACHE as a fallback."
|
57 |
-
)
|
58 |
-
|
59 |
-
|
60 |
-
class LibriTTSAlign(datasets.GeneratorBasedBuilder):
|
61 |
-
"""LibriTTSAlign dataset."""
|
62 |
-
|
63 |
-
BUILDER_CONFIGS = [
|
64 |
-
LibriTTSAlignConfig(
|
65 |
-
name="libritts",
|
66 |
-
version=datasets.Version(_VERSION, ""),
|
67 |
-
),
|
68 |
-
]
|
69 |
-
|
70 |
-
def _info(self):
|
71 |
-
features = {
|
72 |
-
"id": datasets.Value("string"),
|
73 |
-
"speaker": datasets.Value("string"),
|
74 |
-
"text": datasets.Value("string"),
|
75 |
-
"start": datasets.Value("float32"),
|
76 |
-
"end": datasets.Value("float32"),
|
77 |
-
# phone features
|
78 |
-
"phones": datasets.Sequence(datasets.Value("string")),
|
79 |
-
"phone_durations": datasets.Sequence(datasets.Value("int32")),
|
80 |
-
# audio feature
|
81 |
-
"audio": datasets.Value("string"),
|
82 |
-
"audio_speaker_prompt": datasets.Sequence(datasets.Value("string")),
|
83 |
-
}
|
84 |
-
|
85 |
-
return datasets.DatasetInfo(
|
86 |
-
description=_DESCRIPTION,
|
87 |
-
features=datasets.Features(features),
|
88 |
-
supervised_keys=None,
|
89 |
-
homepage="https://github.com/MiniXC/MeasureCollator",
|
90 |
-
citation=_CITATION,
|
91 |
-
task_templates=None,
|
92 |
-
)
|
93 |
-
|
94 |
-
def _split_generators(self, dl_manager):
|
95 |
-
ds_dict = {}
|
96 |
-
for name, url in _URLS.items():
|
97 |
-
ds_dict[name] = self._create_alignments_ds(name, url)
|
98 |
-
splits = [
|
99 |
-
datasets.SplitGenerator(
|
100 |
-
name=key.replace("-", "."), gen_kwargs={"ds": self._create_data(value)}
|
101 |
-
)
|
102 |
-
for key, value in ds_dict.items()
|
103 |
-
]
|
104 |
-
# dataframe with all data
|
105 |
-
data_train = self._create_data(
|
106 |
-
[
|
107 |
-
ds_dict["train-clean-100"],
|
108 |
-
ds_dict["train-clean-360"],
|
109 |
-
ds_dict["train-other-500"],
|
110 |
-
]
|
111 |
-
)
|
112 |
-
data_dev = self._create_data([ds_dict["dev-clean"], ds_dict["dev-other"]])
|
113 |
-
data_test = self._create_data([ds_dict["test-clean"], ds_dict["test-other"]])
|
114 |
-
splits += [
|
115 |
-
datasets.SplitGenerator(
|
116 |
-
name="train.all",
|
117 |
-
gen_kwargs={
|
118 |
-
"ds": data_train,
|
119 |
-
},
|
120 |
-
),
|
121 |
-
datasets.SplitGenerator(
|
122 |
-
name="dev.all",
|
123 |
-
gen_kwargs={
|
124 |
-
"ds": data_dev,
|
125 |
-
},
|
126 |
-
),
|
127 |
-
datasets.SplitGenerator(
|
128 |
-
name="test.all",
|
129 |
-
gen_kwargs={
|
130 |
-
"ds": data_test,
|
131 |
-
},
|
132 |
-
),
|
133 |
-
]
|
134 |
-
data_all = pd.concat([data_train, data_dev, data_test])
|
135 |
-
# create a new split which takes one sample from each speaker in data_all and puts it into the dev split
|
136 |
-
# we then remove these samples from data_all
|
137 |
-
speakers = data_all["speaker"].unique()
|
138 |
-
# seed for reproducibility
|
139 |
-
np.random.seed(42)
|
140 |
-
self.data_all = data_all
|
141 |
-
del data_all
|
142 |
-
data_dev_all = [
|
143 |
-
x
|
144 |
-
for x in process_map(
|
145 |
-
self._create_dev_split,
|
146 |
-
speakers,
|
147 |
-
chunksize=1000,
|
148 |
-
max_workers=_MAX_WORKERS,
|
149 |
-
desc="creating dev split",
|
150 |
-
tqdm_class=tqdm,
|
151 |
-
)
|
152 |
-
if x is not None
|
153 |
-
]
|
154 |
-
data_dev_all = pd.concat(data_dev_all)
|
155 |
-
data_all = self.data_all
|
156 |
-
data_all = data_all[data_all["speaker"].isin(data_dev_all["speaker"].unique())]
|
157 |
-
data_all = data_all[
|
158 |
-
~data_all["basename"].isin(data_dev_all["basename"].unique())
|
159 |
-
]
|
160 |
-
del self.data_all
|
161 |
-
self.speaker2idxs = {}
|
162 |
-
self.speaker2idxs["all"] = {
|
163 |
-
speaker: idx
|
164 |
-
for idx, speaker in enumerate(
|
165 |
-
sorted(list(data_dev_all["speaker"].unique()))
|
166 |
-
)
|
167 |
-
}
|
168 |
-
self.speaker2idxs["train"] = {
|
169 |
-
speaker: idx
|
170 |
-
for idx, speaker in enumerate(sorted(list(data_train["speaker"].unique())))
|
171 |
-
}
|
172 |
-
self.speaker2idxs["dev"] = {
|
173 |
-
speaker: idx
|
174 |
-
for idx, speaker in enumerate(sorted(list(data_dev["speaker"].unique())))
|
175 |
-
}
|
176 |
-
self.speaker2idxs["test"] = {
|
177 |
-
speaker: idx
|
178 |
-
for idx, speaker in enumerate(sorted(list(data_test["speaker"].unique())))
|
179 |
-
}
|
180 |
-
splits += [
|
181 |
-
datasets.SplitGenerator(
|
182 |
-
name="train",
|
183 |
-
gen_kwargs={
|
184 |
-
"ds": data_all,
|
185 |
-
},
|
186 |
-
),
|
187 |
-
datasets.SplitGenerator(
|
188 |
-
name="dev",
|
189 |
-
gen_kwargs={
|
190 |
-
"ds": data_dev_all,
|
191 |
-
},
|
192 |
-
),
|
193 |
-
]
|
194 |
-
self.alignments_ds = None
|
195 |
-
self.data = None
|
196 |
-
return splits
|
197 |
-
|
198 |
-
def _create_dev_split(self, speaker):
|
199 |
-
data_speaker = self.data_all[self.data_all["speaker"] == speaker]
|
200 |
-
if len(data_speaker) < 10:
|
201 |
-
print(f"Speaker {speaker} has only {len(data_speaker)} samples, skipping")
|
202 |
-
return None
|
203 |
-
else:
|
204 |
-
data_speaker = data_speaker.sample(2)
|
205 |
-
return data_speaker
|
206 |
-
|
207 |
-
def _create_alignments_ds(self, name, url):
|
208 |
-
self.empty_textgrids = 0
|
209 |
-
ds_hash = hashlib.md5(
|
210 |
-
os.path.join(_PATH, f"{name}-alignments").encode()
|
211 |
-
).hexdigest()
|
212 |
-
pkl_path = os.path.join(_PATH, f"{ds_hash}.pkl")
|
213 |
-
if os.path.exists(pkl_path):
|
214 |
-
ds = pickle.load(open(pkl_path, "rb"))
|
215 |
-
else:
|
216 |
-
tgt_dir = os.path.join(_PATH, f"{name}-alignments")
|
217 |
-
src_dir = os.path.join(_PATH, f"{name}-data")
|
218 |
-
if os.path.exists(tgt_dir):
|
219 |
-
src_dir = None
|
220 |
-
url = None
|
221 |
-
if os.path.exists(src_dir):
|
222 |
-
url = None
|
223 |
-
ds = LibrittsRDataset(
|
224 |
-
target_directory=tgt_dir,
|
225 |
-
source_directory=src_dir,
|
226 |
-
source_url=url,
|
227 |
-
verbose=_VERBOSE,
|
228 |
-
tmp_directory=os.path.join(_PATH, f"{name}-tmp"),
|
229 |
-
chunk_size=1000,
|
230 |
-
)
|
231 |
-
pickle.dump(ds, open(pkl_path, "wb"))
|
232 |
-
return ds, ds_hash
|
233 |
-
|
234 |
-
def _create_data(self, data):
|
235 |
-
entries = []
|
236 |
-
self.phone_cache = {}
|
237 |
-
self.phone_converter = Converter()
|
238 |
-
if not isinstance(data, list):
|
239 |
-
data = [data]
|
240 |
-
hashes = [ds_hash for ds, ds_hash in data]
|
241 |
-
ds = [ds for ds, ds_hash in data]
|
242 |
-
self.ds = ds
|
243 |
-
del data
|
244 |
-
for i, ds in enumerate(ds):
|
245 |
-
if os.path.exists(os.path.join(_PATH, f"{hashes[i]}-entries.pkl")):
|
246 |
-
add_entries = pickle.load(
|
247 |
-
open(os.path.join(_PATH, f"{hashes[i]}-entries.pkl"), "rb")
|
248 |
-
)
|
249 |
-
else:
|
250 |
-
add_entries = [
|
251 |
-
entry
|
252 |
-
for entry in process_map(
|
253 |
-
self._create_entry,
|
254 |
-
zip([i] * len(ds), np.arange(len(ds))),
|
255 |
-
chunksize=10_000,
|
256 |
-
max_workers=_MAX_WORKERS,
|
257 |
-
desc=f"processing dataset {hashes[i]}",
|
258 |
-
tqdm_class=tqdm,
|
259 |
-
)
|
260 |
-
if entry is not None
|
261 |
-
]
|
262 |
-
pickle.dump(
|
263 |
-
add_entries,
|
264 |
-
open(os.path.join(_PATH, f"{hashes[i]}-entries.pkl"), "wb"),
|
265 |
-
)
|
266 |
-
entries += add_entries
|
267 |
-
if self.empty_textgrids > 0:
|
268 |
-
logger.warning(f"Found {self.empty_textgrids} empty textgrids")
|
269 |
-
del self.ds, self.phone_cache, self.phone_converter
|
270 |
-
df = pd.DataFrame(
|
271 |
-
entries,
|
272 |
-
columns=[
|
273 |
-
"phones",
|
274 |
-
"duration",
|
275 |
-
"start",
|
276 |
-
"end",
|
277 |
-
"audio",
|
278 |
-
"speaker",
|
279 |
-
"text",
|
280 |
-
"basename",
|
281 |
-
],
|
282 |
-
)
|
283 |
-
return df
|
284 |
-
|
285 |
-
def _create_entry(self, dsi_idx):
|
286 |
-
dsi, idx = dsi_idx
|
287 |
-
item = self.ds[dsi][idx]
|
288 |
-
start, end = item["phones"][0][0], item["phones"][-1][1]
|
289 |
-
|
290 |
-
phones = []
|
291 |
-
durations = []
|
292 |
-
|
293 |
-
for i, p in enumerate(item["phones"]):
|
294 |
-
s, e, phone = p
|
295 |
-
phone.replace("ˌ", "")
|
296 |
-
r_phone = phone.replace("0", "").replace("1", "")
|
297 |
-
if len(r_phone) > 0:
|
298 |
-
phone = r_phone
|
299 |
-
if "[" not in phone:
|
300 |
-
o_phone = phone
|
301 |
-
if o_phone not in self.phone_cache:
|
302 |
-
phone = self.phone_converter(phone, _PHONESET, lang=None)[0]
|
303 |
-
self.phone_cache[o_phone] = phone
|
304 |
-
phone = self.phone_cache[o_phone]
|
305 |
-
phones.append(phone)
|
306 |
-
durations.append(
|
307 |
-
int(
|
308 |
-
np.round(e * self.config.sampling_rate / self.config.hop_length)
|
309 |
-
- np.round(s * self.config.sampling_rate / self.config.hop_length)
|
310 |
-
)
|
311 |
-
)
|
312 |
-
|
313 |
-
if start >= end:
|
314 |
-
self.empty_textgrids += 1
|
315 |
-
return None
|
316 |
-
|
317 |
-
return (
|
318 |
-
phones,
|
319 |
-
durations,
|
320 |
-
start,
|
321 |
-
end,
|
322 |
-
item["wav"],
|
323 |
-
str(item["speaker"]).split("/")[-1],
|
324 |
-
item["transcript"],
|
325 |
-
Path(item["wav"]).name,
|
326 |
-
)
|
327 |
-
|
328 |
-
def _generate_examples(self, ds):
|
329 |
-
j = 0
|
330 |
-
hash_col = "audio"
|
331 |
-
hash_ds = HashableDataFrame(ds, hash_col)
|
332 |
-
for i, row in ds.iterrows():
|
333 |
-
# 10kB is the minimum size of a wav file for our purposes
|
334 |
-
if Path(row["audio"]).stat().st_size >= 10_000:
|
335 |
-
if len(row["phones"]) < 384:
|
336 |
-
speaker_prompts = get_speaker_prompts(row["speaker"], hash_ds)
|
337 |
-
result = {
|
338 |
-
"id": row["basename"],
|
339 |
-
"speaker": row["speaker"],
|
340 |
-
"text": row["text"],
|
341 |
-
"start": row["start"],
|
342 |
-
"end": row["end"],
|
343 |
-
"phones": row["phones"],
|
344 |
-
"phone_durations": row["duration"],
|
345 |
-
"audio": str(row["audio"]),
|
346 |
-
"audio_speaker_prompt": speaker_prompts,
|
347 |
-
}
|
348 |
-
yield j, result
|
349 |
-
j += 1
|
350 |
-
|
351 |
-
|
352 |
-
class HashableDataFrame:
|
353 |
-
def __init__(self, df, hash_col):
|
354 |
-
self.df = df
|
355 |
-
self.hash_col = hash_col
|
356 |
-
self.hash = hashlib.md5(self.df[self.hash_col].values).hexdigest()
|
357 |
-
# to integer
|
358 |
-
self.hash = int(self.hash, 16)
|
359 |
-
|
360 |
-
def __hash__(self):
|
361 |
-
return self.hash
|
362 |
-
|
363 |
-
def __eq__(self, other):
|
364 |
-
return self.hash == other.hash
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|