File size: 6,127 Bytes
7d04c1d 2bd5e5d 7d04c1d aae723d 7d04c1d 267e2bf 7d04c1d 267e2bf 7d04c1d 267e2bf 7d04c1d 267e2bf 7d04c1d 267e2bf 19d12ae 7d04c1d 267e2bf aae723d 267e2bf aae723d 7d04c1d 267e2bf 7d04c1d 267e2bf 19d12ae 7d04c1d 19d12ae 7d04c1d 267e2bf |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 |
import datasets
import json
import requests
from urllib.parse import urlencode
from pathlib import Path
import os
_NAME = 'RuREBus'
_CITATION = '''
@inproceedings{rurebus,
Address = {Moscow, Russia},
Author = {Ivanin, Vitaly and Artemova, Ekaterina and Batura, Tatiana and Ivanov, Vladimir and Sarkisyan, Veronika and Tutubalina, Elena and Smurov, Ivan},
Title = {RuREBus-2020 Shared Task: Russian Relation Extraction for Business},
Booktitle = {Computational Linguistics and Intellectual Technologies: Proceedings of the International Conference “Dialog” [Komp’iuternaia Lingvistika i Intellektual’nye Tehnologii: Trudy Mezhdunarodnoj Konferentsii “Dialog”]},
Year = {2020}
}
'''.strip()
_DESCRIPTION = 'Russian Relation Extraction for Business'
_HOMEPAGE = 'https://github.com/dialogue-evaluation/RuREBus'
_VERSION = '1.0.0'
class RuREBusBuilder(datasets.GeneratorBasedBuilder):
base_url = 'https://cloud-api.yandex.net/v1/disk/public/resources/download?'
public_key = 'https://disk.yandex.ru/d/t1WakmYXlL6jBw'
final_url = base_url + urlencode(dict(public_key=public_key))
response = requests.get(final_url)
raw_txt_url = response.json()['href']
_DATA_URLS = {
'train': 'data/train.jsonl',
'test': 'data/test.jsonl',
}
_RAW_TXT_URLS = {
'raw_txt': raw_txt_url
}
_TYPES_PATHS = {'ent_types': 'ent_types.txt',
'rel_types': 'rel_types.txt'}
VERSION = datasets.Version(_VERSION)
BUILDER_CONFIGS = [
datasets.BuilderConfig('data',
version=VERSION,
description='Annotated data'),
datasets.BuilderConfig('raw_txt',
version=VERSION,
description='Raw texts without annotations'),
datasets.BuilderConfig('ent_types',
version=VERSION,
description='All possible entity types'),
datasets.BuilderConfig('rel_types',
version=VERSION,
description='All possible relation types'),
]
DEFAULT_CONFIG_NAME = 'data'
def _info(self) -> datasets.DatasetInfo:
if self.config.name == 'data':
features = datasets.Features({
'id': datasets.Value('int32'),
'text': datasets.Value('string'),
'entities': datasets.Sequence(datasets.Value('string')),
'relations': datasets.Sequence(datasets.Value('string'))
})
elif self.config.name == 'raw_txt':
features = datasets.Features({
'region': datasets.Value('string'),
'district': datasets.Value('string'),
'title': datasets.Value('string'),
'text': datasets.Value('string')
})
else:
features = datasets.Features({'type': datasets.Value('string')})
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
homepage=_HOMEPAGE,
citation=_CITATION
)
def _split_generators(self, dl_manager: datasets.DownloadManager):
if self.config.name == 'data':
files = dl_manager.download(self._DATA_URLS)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={'filepath': files['train']},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={'filepath': files['test']},
),
]
elif self.config.name == 'raw_txt':
folder = dl_manager.download_and_extract(self._RAW_TXT_URLS)['raw_txt']
return [
datasets.SplitGenerator(
name='raw_txt',
gen_kwargs={'filepath': folder},
)
]
else:
files = dl_manager.download(self._TYPES_PATHS)
return [
datasets.SplitGenerator(
name=self.config.name,
gen_kwargs={'filepath': files[self.config.name]},
)
]
def _generate_examples(self, filepath):
if self.config.name == 'data':
with open(filepath, encoding='utf-8') as f:
for i, line in enumerate(f):
yield i, json.loads(line)
elif self.config.name == 'raw_txt':
path = os.path.join(filepath, 'MED_txt/unparsed_txt')
i = 0
for root, dirs, files in os.walk(path):
if files:
root = Path(root)
region = root.parent.name.encode('cp437').decode('cp866')
district = root.name.encode('cp437').decode('cp866')
titles = {}
with open(root / 'name_dict.txt', encoding='utf-8') as f_titles:
for line in f_titles:
key, title = line.split(maxsplit=1)[1].split('_', maxsplit=1)
titles[key] = title.strip()
for file in files:
if file != 'name_dict.txt':
file = Path(file)
key = file.name.split('_', maxsplit=1)[0]
title = titles[key]
with open(root / file, encoding='utf-8') as f:
text = f.read()
item = {
'region': region,
'district': district,
'title': title,
'text': text
}
yield i, item
i += 1
else:
with open(filepath, encoding='utf-8') as f:
for i, line in enumerate(f):
yield i, {'type': line.strip()}
|