|
from __future__ import absolute_import, division, print_function |
|
|
|
from typing import List |
|
|
|
import datasets |
|
|
|
|
|
DESCRIPTION = open('README.md', 'r').read() |
|
|
|
|
|
DOWNLOAD_URL = "https://textae.blob.core.windows.net/optimus/data/datasets/wikipedia.segmented.nltk.txt" |
|
|
|
|
|
class WikiSentences(datasets.GeneratorBasedBuilder): |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
'text': datasets.Value("string"), |
|
} |
|
), |
|
homepage="https://github.com/ChunyuanLI/Optimus/blob/master/download_datasets.md", |
|
) |
|
|
|
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]: |
|
path = dl_manager.download(DOWNLOAD_URL) |
|
return [ |
|
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": path}), |
|
] |
|
|
|
def _generate_examples(self, filepath): |
|
with open(filepath, encoding="utf-8") as txt_file: |
|
for i, line in enumerate(txt_file): |
|
line = line.strip() |
|
if len(line) <= 64: |
|
yield i, {"text": line} |
|
|