File size: 3,695 Bytes
67b824d
6c10c7f
67b824d
 
 
 
 
 
 
 
 
 
2bbc403
67b824d
 
 
 
 
 
 
 
 
 
981aa06
 
 
 
 
 
2bbc403
981aa06
 
 
 
2bbc403
981aa06
 
 
67b824d
981aa06
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
67b824d
 
 
 
 
 
 
 
 
 
 
 
 
981aa06
 
 
 
 
 
 
 
 
67b824d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
import datasets as ds
import pandas as pd

_CITATION = """\
@InProceedings{yanaka-EtAl:2021:blackbox,
    author    = {Yanaka, Hitomi and Mineshima, Koji},
    title     = {Assessing the Generalization Capacity of Pre-trained Language Models through Japanese Adversarial Natural Language Inference},
    booktitle = {Proceedings of the 2021 EMNLP Workshop BlackboxNLP: Analyzing and Interpreting Neural Networks for NLP (BlackboxNLP2021)},
    year      = {2021},
}
"""

_DESCRIPTION = "The JaNLI (Japanese Adversarial NLI) dataset, inspired by the English HANS dataset, is designed to necessitate an understanding of Japanese linguistic phenomena and to illuminate the vulnerabilities of models."

_HOMEPAGE = "https://github.com/verypluming/JaNLI"

_LICENSE = "CC BY-SA 4.0"

_DOWNLOAD_URL = "https://raw.githubusercontent.com/verypluming/JaNLI/main/janli.tsv"


class JaNLIDataset(ds.GeneratorBasedBuilder):
    VERSION = ds.Version("1.0.0")
    DEFAULT_CONFIG_NAME = "base"

    BUILDER_CONFIGS = [
        ds.BuilderConfig(
            name="base",
            version=VERSION,
            description="A version adopting the column names of a typical NLI dataset.",
        ),
        ds.BuilderConfig(
            name="original",
            version=VERSION,
            description="The original version retaining the unaltered column names.",
        ),
    ]

    def _info(self) -> ds.DatasetInfo:
        if self.config.name == "base":
            features = ds.Features(
                {
                    "id": ds.Value("int64"),
                    "premise": ds.Value("string"),
                    "hypothesis": ds.Value("string"),
                    "label": ds.ClassLabel(names=["entailment", "non-entailment"]),
                    "heuristics": ds.Value("string"),
                    "number_of_NPs": ds.Value("int32"),
                    "semtag": ds.Value("string"),
                }
            )
        elif self.config.name == "original":
            features = ds.Features(
                {
                    "id": ds.Value("int64"),
                    "sentence_A_Ja": ds.Value("string"),
                    "sentence_B_Ja": ds.Value("string"),
                    "entailment_label_Ja": ds.ClassLabel(names=["entailment", "non-entailment"]),
                    "heuristics": ds.Value("string"),
                    "number_of_NPs": ds.Value("int32"),
                    "semtag": ds.Value("string"),
                }
            )

        return ds.DatasetInfo(
            description=_DESCRIPTION,
            citation=_CITATION,
            homepage=_HOMEPAGE,
            license=_LICENSE,
            features=features,
        )

    def _split_generators(self, dl_manager: ds.DownloadManager):
        data_path = dl_manager.download_and_extract(_DOWNLOAD_URL)
        df: pd.DataFrame = pd.read_table(data_path, header=0, sep="\t", index_col=0)
        df["id"] = df.index

        if self.config.name == "base":
            df = df.rename(
                columns={
                    "sentence_A_Ja": "premise",
                    "sentence_B_Ja": "hypothesis",
                    "entailment_label_Ja": "label",
                }
            )

        return [
            ds.SplitGenerator(
                name=ds.Split.TRAIN,
                gen_kwargs={"df": df[df["split"] == "train"]},
            ),
            ds.SplitGenerator(
                name=ds.Split.TEST,
                gen_kwargs={"df": df[df["split"] == "test"]},
            ),
        ]

    def _generate_examples(self, df: pd.DataFrame):
        df = df.drop("split", axis=1)
        for i, row in enumerate(df.to_dict("records")):
            yield i, row