Datasets:
Tasks:
Text Retrieval
Modalities:
Text
Sub-tasks:
document-retrieval
Size:
10M - 100M
ArXiv:
License:
Xinyu Crystina ZHANG
Merge branch 'main' of https://huggingface.co/datasets/miracl/miracl-corpus
d921ec7
# coding=utf-8 | |
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. | |
# | |
# Licensed under the Apache License, Version 2.0 (the 'License'); | |
# you may not use this file except in compliance with the License. | |
# You may obtain a copy of the License at | |
# | |
# http://www.apache.org/licenses/LICENSE-2.0 | |
# | |
# Unless required by applicable law or agreed to in writing, software | |
# distributed under the License is distributed on an 'AS IS' BASIS, | |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
# See the License for the specific language governing permissions and | |
# limitations under the License. | |
# Lint as: python3 | |
import json | |
import datasets | |
from dataclasses import dataclass | |
_CITATION = ''' | |
''' | |
languages2filesize = { | |
'ar': 5, | |
'bn': 1, | |
'en': 66 , | |
'es': 21, | |
'fa': 5, | |
'fi': 4, | |
'fr': 30, | |
'hi': 2, | |
'id': 3, | |
'ja': 14, | |
'ko': 3, | |
'ru': 20, | |
'sw': 1, | |
'te': 2, | |
'th': 2, | |
'zh': 10, | |
'de': 32, | |
'yo': 1, | |
} | |
_DESCRIPTION = 'dataset load script for MIRACL' | |
_DATASET_URLS = { | |
lang: { | |
'train': [ | |
f'https://huggingface.co/datasets/miracl/miracl-corpus/resolve/main/miracl-corpus-v1.0-{lang}/docs-{i}.jsonl.gz' for i in range(n) | |
] | |
} for lang, n in languages2filesize.items() | |
} | |
class MIRACLCorpus(datasets.GeneratorBasedBuilder): | |
BUILDER_CONFIGS = [ | |
datasets.BuilderConfig( | |
version=datasets.Version('1.0.0'), | |
name=lang, | |
description=f'MIRACL dataset in language {lang}.' | |
) for lang in languages2filesize | |
] | |
def _info(self): | |
features = datasets.Features({ | |
'docid': datasets.Value('string'), | |
'title': datasets.Value('string'), | |
'text': datasets.Value('string'), | |
}) | |
return datasets.DatasetInfo( | |
# This is the description that will appear on the datasets page. | |
description=_DESCRIPTION, | |
# This defines the different columns of the dataset and their types | |
features=features, # Here we define them above because they are different between the two configurations | |
supervised_keys=None, | |
# Homepage of the dataset for documentation | |
homepage='https://project-miracl.github.io', | |
# License for the dataset if available | |
license='', | |
# Citation for the dataset | |
citation=_CITATION, | |
) | |
def _split_generators(self, dl_manager): | |
lang = self.config.name | |
downloaded_files = dl_manager.download_and_extract(_DATASET_URLS[lang]) | |
splits = [ | |
datasets.SplitGenerator( | |
name='train', | |
gen_kwargs={ | |
'filepaths': downloaded_files['train'], | |
}, | |
), | |
] | |
return splits | |
def _generate_examples(self, filepaths): | |
for filepath in sorted(filepaths): | |
with open(filepath, encoding="utf-8") as f: | |
for line in f: | |
data = json.loads(line) | |
yield data['docid'], data | |