File size: 4,423 Bytes
9c4bf01
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
# Config file by Simon Hengchen, https://hengchen.net
import os
import gzip
import datasets

logger = datasets.logging.get_logger(__name__)


_DESCRIPTION = """

This is a version of the Kubhist 2 dataset created, curated and made available by Språkbanken Text (SBX) at the University of Gothenburg (Sweden) under the CC BY 4.0 license. 

This is a a corpus of OCRed newspapers from Sweden spanning the 1640s to the 1900s.

The original data is available with many types of annotation in XML at https://spraakbanken.gu.se/en/resources/kubhist2. 

A good description of the data is available in this blog entry by Dana Dannélls: https://spraakbanken.gu.se/blogg/index.php/2019/09/15/the-kubhist-corpus-of-swedish-newspapers/



In a nutshell, this hugginface dataset version offers:

- only the OCRed text

- available in decadal subsets



License is CC BY 4.0 with attribution.

"""

_HOMEPAGE = "https://github.com/iguanodon-ai/kubhist2"
_LICENSE = "CC BY 4.0"

_BASE_DIR = "text"

# Use relative paths; Hugging Face viewer runs in a sandbox
_URLS = {'1640': os.path.join(_BASE_DIR, '1640/1640.txt.gz'),
        '1650': os.path.join(_BASE_DIR, '1650/1650.txt.gz'),
        '1660': os.path.join(_BASE_DIR, '1660/1660.txt.gz'),
        '1670': os.path.join(_BASE_DIR, '1670/1670.txt.gz'),
        '1680': os.path.join(_BASE_DIR, '1680/1680.txt.gz'),
        '1690': os.path.join(_BASE_DIR, '1690/1690.txt.gz'),
        '1700': os.path.join(_BASE_DIR, '1700/1700.txt.gz'),
        '1710': os.path.join(_BASE_DIR, '1710/1710.txt.gz'),
        '1720': os.path.join(_BASE_DIR, '1720/1720.txt.gz'),
        '1730': os.path.join(_BASE_DIR, '1730/1730.txt.gz'),
        '1740': os.path.join(_BASE_DIR, '1740/1740.txt.gz'),
        '1750': os.path.join(_BASE_DIR, '1750/1750.txt.gz'),
        '1760': os.path.join(_BASE_DIR, '1760/1760.txt.gz'),
        '1770': os.path.join(_BASE_DIR, '1770/1770.txt.gz'),
        '1780': os.path.join(_BASE_DIR, '1780/1780.txt.gz'),
        '1790': os.path.join(_BASE_DIR, '1790/1790.txt.gz'),
        '1800': os.path.join(_BASE_DIR, '1800/1800.txt.gz'),
        '1810': os.path.join(_BASE_DIR, '1810/1810.txt.gz'),
        '1820': os.path.join(_BASE_DIR, '1820/1820.txt.gz'),
        '1830': os.path.join(_BASE_DIR, '1830/1830.txt.gz'),
        '1840': os.path.join(_BASE_DIR, '1840/1840.txt.gz'),
        '1850': os.path.join(_BASE_DIR, '1850/1850.txt.gz'),
        '1860': os.path.join(_BASE_DIR, '1860/1860.txt.gz'),
        '1870': os.path.join(_BASE_DIR, '1870/1870.txt.gz'),
        '1880': os.path.join(_BASE_DIR, '1880/1880.txt.gz'),
        '1890': os.path.join(_BASE_DIR, '1890/1890.txt.gz'),
        '1900': os.path.join(_BASE_DIR, '1900/1900.txt.gz'),
        'all': os.path.join(_BASE_DIR, 'all/all.txt.gz'),
        }


class Kubhist2Config(datasets.BuilderConfig):
    def __init__(self, period="all", **kwargs):
        if str(period) not in _URLS:
            self.period = "all"
        else:
            self.period = str(period)
        super().__init__(**kwargs)


class Kubhist2(datasets.GeneratorBasedBuilder):
    BUILDER_CONFIG_CLASS = Kubhist2Config

    BUILDER_CONFIGS = [
        Kubhist2Config(
            name=key,
            version=datasets.Version("1.0.0"),
            description=f"Kubhist2: {key}",
            period=key
        )
        for key in _URLS
    ]

    DEFAULT_CONFIG_NAME = "all"

    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features({
                "text": datasets.Value("string"),
            }),
            supervised_keys=None,
            homepage=_HOMEPAGE,
            license=_LICENSE,
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        # Use `manual_dir` so the user can run `load_dataset(..., data_dir="path")`
        data_path = os.path.join(dl_manager.manual_dir, _URLS[self.config.period])
        return [
            datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": data_path}),
        ]

    def _generate_examples(self, filepath):
        """Yields lines from the compressed .txt.gz file as individual examples."""
        with gzip.open(filepath, "rt", encoding="utf-8") as f:
            for i, line in enumerate(f):
                yield i, {"text": line.strip()}