Datasets:

ArXiv:
License:
holylovenia commited on
Commit
dd2a507
1 Parent(s): 6db5610

Upload indommlu.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. indommlu.py +291 -0
indommlu.py ADDED
@@ -0,0 +1,291 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import csv
2
+ from pathlib import Path
3
+ from typing import Dict, List, Tuple
4
+
5
+ import datasets
6
+
7
+ from seacrowd.utils import schemas
8
+ from seacrowd.utils.configs import SEACrowdConfig
9
+ from seacrowd.utils.constants import Licenses, Tasks
10
+
11
+ _CITATION = """
12
+ @inproceedings{koto-etal-2023-large,
13
+ title = "Large Language Models Only Pass Primary School Exams in {I}ndonesia: A Comprehensive Test on {I}ndo{MMLU}",
14
+ author = "Koto, Fajri and
15
+ Aisyah, Nurul and
16
+ Li, Haonan and
17
+ Baldwin, Timothy",
18
+ editor = "Bouamor, Houda and
19
+ Pino, Juan and
20
+ Bali, Kalika",
21
+ booktitle = "Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing",
22
+ month = dec,
23
+ year = "2023",
24
+ address = "Singapore",
25
+ publisher = "Association for Computational Linguistics",
26
+ url = "https://aclanthology.org/2023.emnlp-main.760",
27
+ doi = "10.18653/v1/2023.emnlp-main.760",
28
+ pages = "12359--12374",
29
+ }
30
+ """
31
+
32
+ _DATASETNAME = "indommlu"
33
+
34
+ _DESCRIPTION = """
35
+ IndoMMLU is the first multi-task language understanding benchmark for Indonesian culture and languages, which consists
36
+ of questions from primary school to university entrance exams in Indonesia. By employing professional teachers, we
37
+ obtain 14,906 questions across 63 tasks and education levels, with 46% of the questions focusing on assessing
38
+ proficiency in the Indonesian language and knowledge of nine local languages and cultures in Indonesia.
39
+ """
40
+
41
+ _HOMEPAGE = "https://huggingface.co/datasets/indolem/IndoMMLU"
42
+
43
+ _LANGUAGES = ["ind", "ban", "mad", "nij", "sun", "jav", "mak", "bjn", "abl"]
44
+
45
+ _LICENSE = Licenses.CC_BY_NC_SA_4_0.value
46
+
47
+ _LOCAL = False
48
+
49
+ _URLS = {_DATASETNAME: {"test": "https://huggingface.co/datasets/indolem/IndoMMLU/resolve/main/IndoMMLU.csv"}}
50
+
51
+ _SUPPORTED_TASKS = [Tasks.QUESTION_ANSWERING]
52
+
53
+ _SOURCE_VERSION = "1.0.0"
54
+
55
+ _SEACROWD_VERSION = "2024.06.20"
56
+
57
+
58
+ lang2subject = {"ind": "Bahasa Indonesia", "ban": "Bahasa Bali", "mad": "Bahasa Madura", "nij": "Bahasa Dayak Ngaju", "sun": "Bahasa Sunda", "jav": "Bahasa Jawa", "mak": "Bahasa Makassar", "bjn": "Bahasa Banjar", "abl": "Bahasa Lampung"}
59
+
60
+ subject2english = {
61
+ "Sejarah": "History",
62
+ "Geografi": "Geography",
63
+ "Bahasa Lampung": "Lampungic",
64
+ "IPS": "Social science",
65
+ "Bahasa Bali": "Balinese",
66
+ "Bahasa Makassar": "Makassarese",
67
+ "Bahasa Banjar": "Banjarese",
68
+ "Kimia": "Chemistry",
69
+ "Biologi": "Biology",
70
+ "IPA": "Science",
71
+ "Agama Kristen": "Christian religion",
72
+ "Kesenian": "Art",
73
+ "Agama Islam": "Islam religion",
74
+ "Agama Hindu": "Hindu religion",
75
+ "Bahasa Madura": "Madurese",
76
+ "Penjaskes": "Sport",
77
+ "Bahasa Indonesia": "Indonesian language",
78
+ "Fisika": "Physics",
79
+ "Budaya Alam Minangkabau": "Minangkabau culture",
80
+ "Bahasa Dayak Ngaju": "Dayak language",
81
+ "Sosiologi": "Sociology",
82
+ "Ekonomi": "Economy",
83
+ "Bahasa Sunda": "Sundanese",
84
+ "Bahasa Jawa": "Javanese",
85
+ "PPKN": "Civic education",
86
+ }
87
+
88
+ subject2group = {
89
+ "Sejarah": "Humanities",
90
+ "Geografi": "Social science",
91
+ "Bahasa Lampung": "Local languages and cultures",
92
+ "IPS": "Social science",
93
+ "Bahasa Bali": "Local languages and cultures",
94
+ "Bahasa Makassar": "Local languages and cultures",
95
+ "Bahasa Banjar": "Local languages and cultures",
96
+ "Kimia": "STEM",
97
+ "Biologi": "STEM",
98
+ "IPA": "STEM",
99
+ "Agama Kristen": "Humanities",
100
+ "Kesenian": "Humanities",
101
+ "Agama Islam": "Humanities",
102
+ "Agama Hindu": "Humanities",
103
+ "Bahasa Madura": "Local languages and cultures",
104
+ "Penjaskes": "Humanities",
105
+ "Bahasa Indonesia": "Indonesian language",
106
+ "Fisika": "STEM",
107
+ "Budaya Alam Minangkabau": "Local languages and cultures",
108
+ "Bahasa Dayak Ngaju": "Local languages and cultures",
109
+ "Sosiologi": "Social science",
110
+ "Ekonomi": "Social science",
111
+ "Bahasa Sunda": "Local languages and cultures",
112
+ "Bahasa Jawa": "Local languages and cultures",
113
+ "PPKN": "Social science",
114
+ }
115
+
116
+ special_case = ["SD-SMP-SMA", "SD-SMP"]
117
+ level_mapper = {
118
+ "SMA": "SMA", # SMA --> high school level"
119
+ "Seleksi PTN": "University entrance test",
120
+ "SD": "SD", # SD --> elementary school level
121
+ "SMP": "SMP", # SMP --> junior high school level
122
+ "Kelas I SD": "SD",
123
+ "Kelas X SMA": "SMA",
124
+ "Kelas XI SMA": "SMA",
125
+ "Kelas XII SMA": "SMA",
126
+ "V SD": "SD",
127
+ "VI SD": "SD",
128
+ "VII SMP": "SMP",
129
+ "VIII SMP ": "SMP",
130
+ "IX SMP": "SMP",
131
+ "Kelas III SD": "SD",
132
+ "Kelas IV SD": "SD",
133
+ "Kelas II SD": "SD",
134
+ }
135
+
136
+
137
+ def fix_level(level, kelas):
138
+ # Fixing Level
139
+ if level in special_case:
140
+ kelas = float(kelas)
141
+ if kelas >= 1 and kelas <= 6:
142
+ level = "SD"
143
+ elif kelas >= 7 and kelas <= 9:
144
+ level = "SMP"
145
+ elif kelas >= 10:
146
+ level = "SMA"
147
+ else:
148
+ print(level)
149
+ fixed_level = level_mapper[level]
150
+
151
+ # Fixing class
152
+ kelas = str(kelas)
153
+ if kelas.strip() in ["PTN", "2023-10-12 00:00:00"]:
154
+ fixed_kelas = 13
155
+ elif kelas == "4,5,6":
156
+ fixed_kelas = 6
157
+ else:
158
+ fixed_kelas = int(float(kelas.strip()))
159
+
160
+ # sanity check over the level and kelas
161
+ return fixed_level, fixed_kelas
162
+
163
+
164
+ def pass_schema_filter(schema, row):
165
+ if schema == "source":
166
+ return True
167
+ lang = schema.split("_")[1]
168
+ if lang not in _LANGUAGES: # seacrowd_qa
169
+ return True
170
+ if lang == "ind": # contains "Bahasa Indonesia" and all other non-language subjects
171
+ return (lang2subject[lang] == row["subject"]) or (row["subject"] not in lang2subject.values())
172
+ return lang2subject[lang] == row["subject"]
173
+
174
+
175
+ class IndoMMLUDataset(datasets.GeneratorBasedBuilder):
176
+ """IndoMMLU is the first multitask language understanding benchmark for Indonesian culture and languages."""
177
+
178
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
179
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
180
+
181
+ BUILDER_CONFIGS = [
182
+ SEACrowdConfig(
183
+ name=f"{_DATASETNAME}_source",
184
+ version=SOURCE_VERSION,
185
+ description=f"{_DATASETNAME} source schema",
186
+ schema="source",
187
+ subset_id=_DATASETNAME,
188
+ ),
189
+ SEACrowdConfig(
190
+ name=f"{_DATASETNAME}_seacrowd_qa",
191
+ version=SEACROWD_VERSION,
192
+ description=f"{_DATASETNAME} SEACrowd schema",
193
+ schema="seacrowd_qa",
194
+ subset_id=_DATASETNAME,
195
+ ),
196
+ ]
197
+ for lang in _LANGUAGES:
198
+ lang_config = SEACrowdConfig(
199
+ name=f"{_DATASETNAME}_{lang}_seacrowd_qa",
200
+ version=SEACROWD_VERSION,
201
+ description=f"{_DATASETNAME} {lang} SEACrowd schema",
202
+ schema=f"seacrowd_qa",
203
+ subset_id=_DATASETNAME,
204
+ )
205
+ BUILDER_CONFIGS.append(lang_config)
206
+
207
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_source"
208
+
209
+ def _info(self) -> datasets.DatasetInfo:
210
+ if self.config.schema == "source":
211
+ features = datasets.Features(
212
+ {
213
+ "subject": datasets.Value("string"),
214
+ "group": datasets.Value("string"),
215
+ "level": datasets.Value("string"),
216
+ "class": datasets.Value("string"),
217
+ "question": datasets.Value("string"),
218
+ "options": datasets.Value("string"),
219
+ "answer": datasets.Value("string"),
220
+ "is_for_fewshot": datasets.Value("string"),
221
+ }
222
+ )
223
+
224
+ else:
225
+ features = schemas.qa_features
226
+ features["meta"] = {
227
+ "subject": datasets.Value("string"),
228
+ "group": datasets.Value("string"),
229
+ "level": datasets.Value("string"),
230
+ "class": datasets.Value("string"),
231
+ "is_for_fewshot": datasets.Value("string"),
232
+ }
233
+
234
+ return datasets.DatasetInfo(
235
+ description=_DESCRIPTION,
236
+ features=features,
237
+ homepage=_HOMEPAGE,
238
+ license=_LICENSE,
239
+ citation=_CITATION,
240
+ )
241
+
242
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
243
+ """Returns SplitGenerators."""
244
+ urls = _URLS[_DATASETNAME]
245
+ data_dir = dl_manager.download_and_extract(urls)
246
+
247
+ return [
248
+ datasets.SplitGenerator(
249
+ name=datasets.Split.TEST,
250
+ gen_kwargs={"filepath": data_dir, "split": "test"},
251
+ ),
252
+ ]
253
+
254
+ def _generate_examples(self, filepath: Path, split: str) -> Tuple[int, Dict]:
255
+ data = csv.DictReader(open(filepath[split], newline=""))
256
+ print(self.config.schema)
257
+ for i, row in enumerate(data):
258
+ if pass_schema_filter(self.config.schema, row):
259
+ fixed_level, fixed_kelas = fix_level(row["level"], row["kelas"])
260
+ # The choices are in the format of ["A. xxx", "B. xxx", ...], but answer is only with ["A"], replacing both with only the answer content
261
+ choices = row["jawaban"].split("\n")
262
+ answer_choice = row["kunci"]
263
+ # Find the corresponding choice in the choices.
264
+ # Skip the 2 datapoint (i = 4223, 14150) with invalid answer_choice.
265
+ corresponding_choice = next((choice for choice in choices if choice.startswith(answer_choice)), None)
266
+ if corresponding_choice is None:
267
+ continue
268
+ else:
269
+ if self.config.schema == "source":
270
+ yield i, {
271
+ "subject": subject2english[row["subject"]],
272
+ "group": subject2group[row["subject"]],
273
+ "level": fixed_level,
274
+ "class": fixed_kelas,
275
+ "question": row["soal"],
276
+ "options": [opt[2:].strip() for opt in choices], # remove A., B., ... in the options,
277
+ "answer": corresponding_choice[2:].strip(), # remove A., B., ... in the answer
278
+ "is_for_fewshot": row["is_for_fewshot"],
279
+ }
280
+ else:
281
+ yield i, {
282
+ "id": str(i),
283
+ "question_id": str(i),
284
+ "document_id": str(i),
285
+ "question": row["soal"],
286
+ "type": "multiple_choice",
287
+ "choices": [opt[2:].strip() for opt in choices], # remove A., B., ... in the options
288
+ "context": "",
289
+ "answer": [corresponding_choice[2:].strip()], # remove A., B., ... in the answer,
290
+ "meta": {"subject": subject2english[row["subject"]], "group": subject2group[row["subject"]], "level": fixed_level, "class": fixed_kelas, "is_for_fewshot": row["is_for_fewshot"]},
291
+ }