conceptofmind commited on
Commit
bcd8cea
·
verified ·
1 Parent(s): 8738b19

Update megawika.py

Browse files
Files changed (1) hide show
  1. megawika.py +95 -141
megawika.py CHANGED
@@ -1,31 +1,4 @@
1
- # Copyright 2020 The HuggingFace Datasets Authors and
2
- # the Johns Hopkins University (JHU) Human Language Technology
3
- # Center of Excellence.
4
- #
5
- # Licensed under the Apache License, Version 2.0 (the "License");
6
- # you may not use this file except in compliance with the License.
7
- # You may obtain a copy of the License at
8
- #
9
- # http://www.apache.org/licenses/LICENSE-2.0
10
- #
11
- # Unless required by applicable law or agreed to in writing, software
12
- # distributed under the License is distributed on an "AS IS" BASIS,
13
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
- # See the License for the specific language governing permissions and
15
- # limitations under the License.
16
- """
17
- This file provides a HuggingFace dataset loader implementation for
18
- the JHU/HLTCOE MegaWika dataset.
19
- MegaWika is a multi- and crosslingual text dataset containing 30 million
20
- Wikipedia passages with their scraped and cleaned web citations. The
21
- passages span 50 Wikipedias in 50 languages, and the articles in which
22
- the passages were originally embedded are included for convenience. Where
23
- a Wikipedia passage is in a non-English language, an automated English
24
- translation is provided. Furthermore, nearly 130 million English
25
- question/answer pairs were extracted from the passages, and FrameNet events
26
- occurring in the passages are detected using the LOME FrameNet parser.
27
- """
28
-
29
 
30
  import csv
31
  import json
@@ -35,18 +8,9 @@ import pathlib
35
  from pathlib import Path
36
  import yaml
37
  from ast import literal_eval
38
-
39
  import datasets
40
 
41
- # import gzip
42
- # try:
43
- # import lzma as xz
44
- # except ImportError:
45
- # import pylzma as xz
46
-
47
-
48
- # TODO: Add BibTeX citation
49
- # Find for instance the citation on arxiv or on the dataset repo/website
50
  _CITATION = """\
51
  @article{barham2023megawika,
52
  title={MegaWika: Millions of reports and their sources across 50 diverse languages},
@@ -62,44 +26,66 @@ _CITATION = """\
62
  }
63
  """
64
 
65
- # TODO: Add description of the dataset here
66
- # You can copy an official description
67
  _DESCRIPTION = """\
68
  MegaWika is a multi- and crosslingual text dataset containing 30 million
69
- Wikipedia passages with their scraped and cleaned web citations. The
70
- passages span 50 Wikipedias in 50 languages, and the articles in which
71
- the passages were originally embedded are included for convenience. Where
72
- a Wikipedia passage is in a non-English language, an automated English
73
- translation is provided. Furthermore, nearly 130 million English
74
- question/answer pairs were extracted from the passages, and FrameNet events
75
- occurring in the passages are detected using the LOME FrameNet parser.
76
  """
77
 
78
  _HOMEPAGE = "https://huggingface.co/datasets/DataProvenanceInitiative/Megawika_subset"
79
-
80
  _LICENSE = "cc-by-sa-4.0"
81
-
82
  _URL = "https://huggingface.co/datasets/DataProvenanceInitiative/Megawika_subset"
83
 
84
- # Load the file paths for all the splits (per language currently)
85
-
86
- file_list_url = "https://huggingface.co/datasets/DataProvenanceInitiative/Megawika_subset/raw/main/files.yml"
87
-
88
- import urllib.request
89
- with urllib.request.urlopen(file_list_url) as f:
90
  try:
91
- fnames = yaml.safe_load(f)
92
- except yaml.YAMLError as exc:
93
- print("Error loading the file paths for the dataset splits. Aborting.")
94
- exit(1)
95
-
96
- _DATA_URL = fnames['fnames']
97
-
98
- _VARIANTS = ["all"] + list(_DATA_URL.keys())
99
-
 
 
 
 
 
 
 
 
 
100
 
101
  class MegaWika(datasets.GeneratorBasedBuilder):
102
- BUILDER_CONFIGS = [datasets.BuilderConfig(name) for name in _VARIANTS]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
103
 
104
  def _info(self):
105
  return datasets.DatasetInfo(
@@ -111,17 +97,13 @@ class MegaWika(datasets.GeneratorBasedBuilder):
111
  "entries": datasets.features.Sequence(
112
  {
113
  "id": datasets.Value("string"),
114
-
115
- # Wiki passage
116
  "passage": {
117
  "text": [datasets.Value("string")],
118
  "parse": datasets.Value("string"),
119
  "en_tokens": [datasets.Value("string")],
120
  "lang_tokens": [datasets.Value("string")],
121
- "en_lang_token_map": [[datasets.Value("int32")]] # list of pairs
122
  },
123
-
124
- # MT
125
  "mt": {
126
  "original": datasets.Value("string"),
127
  "original_sents": [datasets.Value("string")],
@@ -130,13 +112,9 @@ class MegaWika(datasets.GeneratorBasedBuilder):
130
  "translation_probs": [[datasets.Value("float32")]],
131
  "repetitious_translation": datasets.Value("bool")
132
  },
133
-
134
- # Source document
135
  "source_lang": datasets.Value("string"),
136
  "source_url": datasets.Value("string"),
137
  "source_text": datasets.Value("string"),
138
-
139
- # Question/answer pairs
140
  "qa_pairs": datasets.Sequence(
141
  {
142
  "question": datasets.Value("string"),
@@ -148,10 +126,10 @@ class MegaWika(datasets.GeneratorBasedBuilder):
148
  "argument": datasets.Value("string")
149
  }
150
  ),
151
- "en_matches_in_source": [[datasets.Value("int32")]], # list of pair of int indices
152
- "en_match_in_passage": [datasets.Value("int32")], # pair of int indices
153
- "lang_matches_in_source": [[datasets.Value("int32")]], # list of pair of int indices
154
- "lang_match_in_passage": [datasets.Value("int32")], # pair of int indices
155
  "passage": [datasets.Value("string")],
156
  "en_answer_tokens": [datasets.Value("string")],
157
  "match_disambiguated_question": datasets.Value("string"),
@@ -162,52 +140,49 @@ class MegaWika(datasets.GeneratorBasedBuilder):
162
  }
163
  ),
164
  supervised_keys=None,
165
- homepage=_URL,
 
166
  citation=_CITATION,
167
  )
168
 
169
  def _split_generators(self, dl_manager):
 
170
  if self.config.name == "all":
171
- data_sources = _DATA_URL
172
  else:
173
- data_sources = {self.config.name: _DATA_URL[self.config.name]}
 
 
174
 
175
  return [
176
  datasets.SplitGenerator(
177
- name=lang,
178
- gen_kwargs={
179
- "filepaths": dl_manager.download(data_sources[lang])
180
- }
181
  )
182
- for lang
183
- in data_sources
184
  ]
185
 
186
  def _get_qa_pair_list_features(self, qa_pair, feature_name):
187
- res = []
188
-
189
- if feature_name in qa_pair:
190
- if qa_pair[feature_name]:
191
- return qa_pair[feature_name]
192
- else:
193
- if feature_name.startswith('en'):
194
- feature_name = '_'.join(feature_name.split('_')[1:])
195
- return self._get_qa_pair_list_features(qa_pair, feature_name)
196
-
197
- return res
198
-
199
- def _generate_examples(self, filepaths):
200
- """This function returns the examples in the raw (text) form by iterating on all the files."""
201
- id_ = 0
202
  for filepath in filepaths:
203
- # logger.info("Generating examples from = %s", filepath)
204
  try:
205
  with open(filepath, "r", encoding="utf-8") as f:
206
  for line in f:
207
- if line:
208
  example = json.loads(line)
209
- if example is not None and isinstance(example, dict):
210
- yield id_, {
211
  "article_title": example.get("article_title", ""),
212
  "article_text": example.get("article_text", ""),
213
  "entries": [
@@ -216,19 +191,11 @@ class MegaWika(datasets.GeneratorBasedBuilder):
216
  "passage": {
217
  "text": entry['passage'].get("text", []),
218
  "parse": json.dumps(entry['passage'].get("parse", [{}])),
219
- "en_tokens": list(entry['passage'].get(
220
- "en_tokens",
221
- {
222
- token: token
223
- for tokens in entry['passage'].get("tokens", {})
224
- for token in tokens
225
- }
226
- ).values()),
227
  "lang_tokens": list(entry['passage'].get("lang_tokens", {}).values()),
228
  "en_lang_token_map": [
229
- (int(item[0]), int(item[1]))
230
- for item
231
- in entry['passage'].get("en_lang_token_map", {}).items()
232
  ]
233
  },
234
  "mt": {
@@ -237,7 +204,7 @@ class MegaWika(datasets.GeneratorBasedBuilder):
237
  "translation": entry.get("translation", ""),
238
  "translation_sents": entry.get("translation_sents", []),
239
  "translation_probs": entry.get("translation_probs", [[]]),
240
- "repetitious_translation": entry.get("repetitious_translation", None)
241
  },
242
  "source_lang": entry.get("source_lang", ""),
243
  "source_url": entry.get("source_url", ""),
@@ -248,34 +215,21 @@ class MegaWika(datasets.GeneratorBasedBuilder):
248
  "en_answer": qa_pair.get('en_answer', qa_pair.get('answer', "")),
249
  'lang_answer': qa_pair.get('lang_answer', ''),
250
  'frames': qa_pair.get('frames', []),
251
- "en_matches_in_source": self._get_qa_pair_list_features(qa_pair, "en_matches_in_source"),
252
- "en_match_in_passage": self._get_qa_pair_list_features(qa_pair, "en_match_in_passage"),
253
- "lang_matches_in_source": self._get_qa_pair_list_features(qa_pair, "lang_matches_in_source"),
254
- "lang_match_in_passage": self._get_qa_pair_list_features(qa_pair, "lang_match_in_passage"),
255
  "passage": qa_pair.get('passage', []),
256
  "en_answer_tokens": qa_pair.get('en_answer_tokens', qa_pair.get('answer_tokens', [])),
257
  "match_disambiguated_question": qa_pair.get('match_disambiguated_question', ""),
258
  }
259
- for qa_pair
260
- in entry.get('qa_pairs', [])
261
  ]
262
  }
263
- for entry
264
- in example.get("entries", [])
265
  ]
266
  }
267
- id_ += 1
268
- except:
269
- print("Error reading file:", filepath)
270
-
271
-
272
-
273
- # "entries": datasets.features.Sequence(
274
- # {
275
- # "qa_pairs": datasets.Sequence(
276
- # {
277
- # "question": datasets.Value("string"),
278
- # "answer": datasets.Value("string"),
279
- # }
280
- # )
281
- # }
 
1
+ """MegaWika dataset loading script for HuggingFace Datasets."""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
 
3
  import csv
4
  import json
 
8
  from pathlib import Path
9
  import yaml
10
  from ast import literal_eval
11
+ import urllib.request
12
  import datasets
13
 
 
 
 
 
 
 
 
 
 
14
  _CITATION = """\
15
  @article{barham2023megawika,
16
  title={MegaWika: Millions of reports and their sources across 50 diverse languages},
 
26
  }
27
  """
28
 
 
 
29
  _DESCRIPTION = """\
30
  MegaWika is a multi- and crosslingual text dataset containing 30 million
31
+ Wikipedia passages with their scraped and cleaned web citations across 50 languages.
 
 
 
 
 
 
32
  """
33
 
34
  _HOMEPAGE = "https://huggingface.co/datasets/DataProvenanceInitiative/Megawika_subset"
 
35
  _LICENSE = "cc-by-sa-4.0"
 
36
  _URL = "https://huggingface.co/datasets/DataProvenanceInitiative/Megawika_subset"
37
 
38
+ # Load language-specific file paths
39
+ def load_file_paths():
40
+ file_list_url = "https://huggingface.co/datasets/DataProvenanceInitiative/Megawika_subset/raw/main/files.yml"
 
 
 
41
  try:
42
+ with urllib.request.urlopen(file_list_url) as f:
43
+ return yaml.safe_load(f)['fnames']
44
+ except (yaml.YAMLError, urllib.error.URLError) as exc:
45
+ print(f"Error loading dataset file paths: {exc}")
46
+ return {}
47
+
48
+ class MegaWikaConfig(datasets.BuilderConfig):
49
+ """BuilderConfig for MegaWika."""
50
+
51
+ def __init__(self, language=None, **kwargs):
52
+ """BuilderConfig for MegaWika.
53
+
54
+ Args:
55
+ language: Language identifier for the dataset split
56
+ **kwargs: Keyword arguments forwarded to super.
57
+ """
58
+ super().__init__(**kwargs)
59
+ self.language = language
60
 
61
  class MegaWika(datasets.GeneratorBasedBuilder):
62
+ """MegaWika dataset."""
63
+
64
+ VERSION = datasets.Version("1.0.0")
65
+
66
+ # Load available languages
67
+ _DATA_URL = load_file_paths()
68
+ LANGUAGES = list(_DATA_URL.keys())
69
+
70
+ # Create configs for each language and an 'all' config
71
+ BUILDER_CONFIGS = ([
72
+ MegaWikaConfig(
73
+ name="all",
74
+ language=None,
75
+ version=VERSION,
76
+ description="Complete MegaWika dataset across all languages",
77
+ )
78
+ ] + [
79
+ MegaWikaConfig(
80
+ name=lang,
81
+ language=lang,
82
+ version=VERSION,
83
+ description=f"MegaWika dataset for {lang} language",
84
+ )
85
+ for lang in LANGUAGES
86
+ ])
87
+
88
+ DEFAULT_CONFIG_NAME = "all"
89
 
90
  def _info(self):
91
  return datasets.DatasetInfo(
 
97
  "entries": datasets.features.Sequence(
98
  {
99
  "id": datasets.Value("string"),
 
 
100
  "passage": {
101
  "text": [datasets.Value("string")],
102
  "parse": datasets.Value("string"),
103
  "en_tokens": [datasets.Value("string")],
104
  "lang_tokens": [datasets.Value("string")],
105
+ "en_lang_token_map": [[datasets.Value("int32")]]
106
  },
 
 
107
  "mt": {
108
  "original": datasets.Value("string"),
109
  "original_sents": [datasets.Value("string")],
 
112
  "translation_probs": [[datasets.Value("float32")]],
113
  "repetitious_translation": datasets.Value("bool")
114
  },
 
 
115
  "source_lang": datasets.Value("string"),
116
  "source_url": datasets.Value("string"),
117
  "source_text": datasets.Value("string"),
 
 
118
  "qa_pairs": datasets.Sequence(
119
  {
120
  "question": datasets.Value("string"),
 
126
  "argument": datasets.Value("string")
127
  }
128
  ),
129
+ "en_matches_in_source": [[datasets.Value("int32")]],
130
+ "en_match_in_passage": [datasets.Value("int32")],
131
+ "lang_matches_in_source": [[datasets.Value("int32")]],
132
+ "lang_match_in_passage": [datasets.Value("int32")],
133
  "passage": [datasets.Value("string")],
134
  "en_answer_tokens": [datasets.Value("string")],
135
  "match_disambiguated_question": datasets.Value("string"),
 
140
  }
141
  ),
142
  supervised_keys=None,
143
+ homepage=_HOMEPAGE,
144
+ license=_LICENSE,
145
  citation=_CITATION,
146
  )
147
 
148
  def _split_generators(self, dl_manager):
149
+ """Returns SplitGenerators."""
150
  if self.config.name == "all":
151
+ data_sources = self._DATA_URL
152
  else:
153
+ if self.config.name not in self._DATA_URL:
154
+ raise ValueError(f"Language {self.config.name} not found in available languages: {list(self._DATA_URL.keys())}")
155
+ data_sources = {self.config.name: self._DATA_URL[self.config.name]}
156
 
157
  return [
158
  datasets.SplitGenerator(
159
+ name=datasets.Split.TRAIN,
160
+ gen_kwargs={"filepaths": dl_manager.download(data_sources[lang]), "language": lang}
 
 
161
  )
162
+ for lang in data_sources
 
163
  ]
164
 
165
  def _get_qa_pair_list_features(self, qa_pair, feature_name):
166
+ """Helper function to extract QA pair features."""
167
+ if feature_name in qa_pair and qa_pair[feature_name]:
168
+ return qa_pair[feature_name]
169
+ elif feature_name.startswith('en'):
170
+ base_feature = '_'.join(feature_name.split('_')[1:])
171
+ if base_feature in qa_pair and qa_pair[base_feature]:
172
+ return qa_pair[base_feature]
173
+ return []
174
+
175
+ def _generate_examples(self, filepaths, language):
176
+ """Yields examples."""
177
+ _id = 0
 
 
 
178
  for filepath in filepaths:
 
179
  try:
180
  with open(filepath, "r", encoding="utf-8") as f:
181
  for line in f:
182
+ if line.strip():
183
  example = json.loads(line)
184
+ if isinstance(example, dict):
185
+ yield _id, {
186
  "article_title": example.get("article_title", ""),
187
  "article_text": example.get("article_text", ""),
188
  "entries": [
 
191
  "passage": {
192
  "text": entry['passage'].get("text", []),
193
  "parse": json.dumps(entry['passage'].get("parse", [{}])),
194
+ "en_tokens": list(entry['passage'].get("en_tokens", {}).values()),
 
 
 
 
 
 
 
195
  "lang_tokens": list(entry['passage'].get("lang_tokens", {}).values()),
196
  "en_lang_token_map": [
197
+ [int(k), int(v)] for k, v in
198
+ entry['passage'].get("en_lang_token_map", {}).items()
 
199
  ]
200
  },
201
  "mt": {
 
204
  "translation": entry.get("translation", ""),
205
  "translation_sents": entry.get("translation_sents", []),
206
  "translation_probs": entry.get("translation_probs", [[]]),
207
+ "repetitious_translation": entry.get("repetitious_translation", False)
208
  },
209
  "source_lang": entry.get("source_lang", ""),
210
  "source_url": entry.get("source_url", ""),
 
215
  "en_answer": qa_pair.get('en_answer', qa_pair.get('answer', "")),
216
  'lang_answer': qa_pair.get('lang_answer', ''),
217
  'frames': qa_pair.get('frames', []),
218
+ "en_matches_in_source": self._get_qa_pair_list_features(qa_pair, "en_matches_in_source"),
219
+ "en_match_in_passage": self._get_qa_pair_list_features(qa_pair, "en_match_in_passage"),
220
+ "lang_matches_in_source": self._get_qa_pair_list_features(qa_pair, "lang_matches_in_source"),
221
+ "lang_match_in_passage": self._get_qa_pair_list_features(qa_pair, "lang_match_in_passage"),
222
  "passage": qa_pair.get('passage', []),
223
  "en_answer_tokens": qa_pair.get('en_answer_tokens', qa_pair.get('answer_tokens', [])),
224
  "match_disambiguated_question": qa_pair.get('match_disambiguated_question', ""),
225
  }
226
+ for qa_pair in entry.get('qa_pairs', [])
 
227
  ]
228
  }
229
+ for entry in example.get("entries", [])
 
230
  ]
231
  }
232
+ _id += 1
233
+ except Exception as e:
234
+ print(f"Error reading file {filepath}: {str(e)}")
235
+ continue