|
import datasets |
|
import json |
|
import numpy |
|
import tarfile |
|
import io |
|
from io import BytesIO |
|
|
|
CHUNK_COUNT = 330 |
|
|
|
_FEATURES = datasets.Features( |
|
{ |
|
"mbid": datasets.Value("string"), |
|
"title": datasets.Value("string"), |
|
"artists": datasets.Value("string"), |
|
"isrc": datasets.Value("string"), |
|
"length": datasets.Value("string"), |
|
"date": datasets.Value("string"), |
|
"ytid": datasets.Value("string"), |
|
} |
|
) |
|
|
|
class MusicBrainzLoaderStream(datasets.GeneratorBasedBuilder): |
|
"""MusicBrainz Dataset""" |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description="MusicBrainz Dataset", |
|
features=_FEATURES, |
|
homepage="None", |
|
citation="None", |
|
license="None" |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
|
|
_CHUNK_LIST = [x for x in range(CHUNK_COUNT)] |
|
|
|
|
|
_list = [] |
|
|
|
|
|
for chunk in _CHUNK_LIST: |
|
_list.append(dl_manager.download(f"data/{chunk+1}.jsonl")) |
|
|
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"chunks": _list, |
|
}, |
|
), |
|
] |
|
|
|
def _generate_examples(self, chunks): |
|
"""Generate images and labels for splits.""" |
|
for chunk in chunks: |
|
with open(chunk, mode='r') as infile: |
|
for line in infile: |
|
mbid, title, artists, isrc, length, date, ytid = json.loads(line) |
|
yield mbid, { |
|
"mbid": mbid, |
|
"title": title, |
|
"artists": json.dumps(artists), |
|
"isrc": json.dumps(isrc), |
|
"length": length, |
|
"date": date, |
|
"ytid": ytid |
|
} |