|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""TODO: Add a description here.""" |
|
|
|
|
|
import csv |
|
import json |
|
import os |
|
import pandas as pd |
|
|
|
import datasets |
|
|
|
|
|
|
|
|
|
_CITATION = """ """ |
|
|
|
|
|
|
|
_DESCRIPTION = """ """ |
|
|
|
|
|
_HOMEPAGE = "" |
|
|
|
|
|
_LICENSE = "" |
|
|
|
|
|
|
|
|
|
_URLS = { |
|
"first_domain": "https://huggingface.co/great-new-dataset-first_domain.zip", |
|
"second_domain": "https://huggingface.co/great-new-dataset-second_domain.zip", |
|
} |
|
AVAILABLE_LANGUAGES=['DE'] |
|
SECTORS=['1'] |
|
AVAILABLE_FEATURES={ |
|
'1': datasets.Features({ |
|
'celex_id': datasets.Value("string"), |
|
'text_html_raw': datasets.Value("string"), |
|
'text_html_cleaned': datasets.Value("string"), |
|
'text_cleaned': datasets.Value("string"), |
|
'form': datasets.Sequence(datasets.Value("string")), |
|
'subject_matter': datasets.Sequence(datasets.Value("string")), |
|
'current_consolidated_version': datasets.Sequence(datasets.Value("string")), |
|
'harmonisation_of_customs_law_community_transit': datasets.Sequence(datasets.Value("string")), |
|
'harmonisation_of_customs_law_customs_territory': datasets.Sequence(datasets.Value("string")), |
|
'harmonisation_of_customs_law_value_for_customs_purposes': datasets.Sequence(datasets.Value("string")), |
|
'directory_code': datasets.Sequence(datasets.Value("string")), |
|
'eurovoc': datasets.Sequence(datasets.Value("string")), |
|
'customs_duties_community_tariff_quotas': datasets.Sequence(datasets.Value("string")), |
|
'customs_duties_authorisation_to_defer_application_of_cct': datasets.Sequence(datasets.Value("string")), |
|
'harmonisation_of_customs_law_various': datasets.Sequence(datasets.Value("string")), |
|
'customs_duties_suspensions': datasets.Sequence(datasets.Value("string"))}) |
|
} |
|
SECTOR_DESCRIPTIONS={ |
|
'1':"" |
|
} |
|
|
|
|
|
class SuperEurlexConfig(datasets.BuilderConfig): |
|
"""BuilderConfig for SuperGLUE.""" |
|
|
|
def __init__(self, sector, language, features, citation, url, **kwargs): |
|
"""BuilderConfig for SuperGLUE. |
|
|
|
Args: |
|
sector: sector of the wanted data |
|
language: the language code for the language in which the text shall |
|
be written in |
|
features: *list[string]*, list of the features that will appear in the |
|
feature dict. |
|
citation: *string*, citation for the data set. |
|
url: *string*, url for information about the data set. |
|
**kwargs: keyword arguments forwarded to super. |
|
""" |
|
name=sector+'.'+language |
|
super().__init__(name=name, version=datasets.Version("0.1.0"), **kwargs) |
|
self.features = features |
|
self.language = language |
|
self.sector = sector |
|
self.text_data_url = f"text_data/{language}/{sector}.jsonl" |
|
self.meta_data_url = f"meta_data/{sector}.jsonl" |
|
self.citation = citation |
|
self.url = url |
|
|
|
|
|
class SuperEurlex(datasets.GeneratorBasedBuilder): |
|
"""TODO: Short description of my dataset.""" |
|
|
|
VERSION = datasets.Version("1.1.0") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
BUILDER_CONFIGS = [ |
|
SuperEurlexConfig( |
|
sector=sect, |
|
language=lang, |
|
description=SECTOR_DESCRIPTIONS[sect], |
|
features=AVAILABLE_FEATURES[sect], |
|
citation=_CITATION, |
|
url=_HOMEPAGE) |
|
for lang in AVAILABLE_LANGUAGES for sect in SECTORS |
|
] |
|
|
|
DEFAULT_CONFIG_NAME = "3.DE" |
|
|
|
def _info(self): |
|
|
|
features = AVAILABLE_FEATURES[self.config.sector] |
|
info = datasets.DatasetInfo( |
|
|
|
description=_DESCRIPTION, |
|
|
|
features=features, |
|
|
|
|
|
|
|
|
|
homepage=_HOMEPAGE, |
|
|
|
license=_LICENSE, |
|
|
|
citation=_CITATION, |
|
) |
|
return info |
|
|
|
def _split_generators(self, dl_manager): |
|
|
|
|
|
|
|
|
|
|
|
|
|
urls = {'text': self.config.text_data_url, |
|
'meta': self.config.meta_data_url} |
|
data_dir = dl_manager.download_and_extract(urls) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
|
|
gen_kwargs={ |
|
"text": data_dir['text'], |
|
"meta": data_dir['meta'], |
|
"language": self.config.language, |
|
"sector": self.config.sector, |
|
'split': 'train' |
|
}, |
|
) |
|
] |
|
|
|
|
|
def _generate_examples(self, text, meta, sector, language, split): |
|
|
|
|
|
print(text) |
|
print(meta) |
|
print(sector) |
|
print(split) |
|
print(sector) |
|
|
|
print("Reading Text Data...") |
|
text_data = pd.read_json(text, lines=True) |
|
text_data['celex_id'] = text_data['celex_id'].apply(lambda x: x[0] if isinstance(x,list) else x) |
|
print("Reading Meta Data...") |
|
meta_data = pd.read_json(meta, lines=True) |
|
meta_data['celex_id'] = meta_data['celex_id'].apply(lambda x: x[0] if isinstance(x, list) else x) |
|
print("Combining Text & Meta Data...") |
|
combined_data = pd.merge(text_data, meta_data, on='celex_id') |
|
print("Converting To final dataset...") |
|
dataset = datasets.Dataset.from_pandas(combined_data) |
|
dataset = dataset.remove_columns('__index_level_0__') |
|
for i, sample in enumerate(dataset): |
|
yield i, sample |
|
|
|
|
|
|
|
print("Hello World") |
|
if __name__ == '__main__': |
|
import datasets as ds |
|
import sys |
|
print(sys.argv[0]) |
|
dataset = ds.load_dataset(sys.argv[0],'1.DE') |
|
print(dataset) |
|
for sample in dataset['train']: |
|
continue |
|
|