Crystina commited on
Commit
791ced0
·
1 Parent(s): b2fca1e

init; add data for arabic id, ja, ru

Browse files
.gitattributes CHANGED
@@ -35,3 +35,7 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
35
  *.mp3 filter=lfs diff=lfs merge=lfs -text
36
  *.ogg filter=lfs diff=lfs merge=lfs -text
37
  *.wav filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
35
  *.mp3 filter=lfs diff=lfs merge=lfs -text
36
  *.ogg filter=lfs diff=lfs merge=lfs -text
37
  *.wav filter=lfs diff=lfs merge=lfs -text
38
+ arabic-train.jsonl.gz filter=lfs diff=lfs merge=lfs -text
39
+ indonesian-train.jsonl.gz filter=lfs diff=lfs merge=lfs -text
40
+ japanese-train.jsonl.gz filter=lfs diff=lfs merge=lfs -text
41
+ russian-train.jsonl.gz filter=lfs diff=lfs merge=lfs -text
arabic-train.jsonl.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4ceeaaff8d6af90fe0aa891c3413fca586b999ddf758eb47ffeaa8fa4da1e4ce
3
+ size 1861320847
indonesian-train.jsonl.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7b499df6a2b579c30f6e5b14e75e44ec96bd8e3d8f365c6db72ece7f416fd5b3
3
+ size 1583314419
japanese-train.jsonl.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7121322a6b0d477b939b5987c4473ae374cdd7ce46eabbc7d90e29988858e5fd
3
+ size 1781935060
mmarco-passage.py ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.Wikipedia
15
+
16
+ # Lint as: python3
17
+ """mMARCO Passage dataset."""
18
+
19
+ import json
20
+
21
+ import datasets
22
+
23
+ _CITATION = """
24
+ """
25
+
26
+ _DESCRIPTION = "dataset load script for mMARCO Passage"
27
+
28
+ languages = [
29
+ "arabic",
30
+ "indonesian",
31
+ "japanese",
32
+ "russian",
33
+ ]
34
+ _DATASET_URLS = {
35
+ lang: {
36
+ 'train': f"https://huggingface.co/datasets/crystina-z/mmarco-passage/resolve/main/${lang}-train.jsonl.gz",
37
+ } for lang in languages
38
+ }
39
+ _DATASET_URLS["english"] = "https://huggingface.co/datasets/Tevatron/msmarco-passage/resolve/main/train.jsonl.gz"
40
+
41
+
42
+ class MMarcoPassage(datasets.GeneratorBasedBuilder):
43
+ VERSION = datasets.Version("0.0.1")
44
+
45
+ BUILDER_CONFIGS = [
46
+ datasets.BuilderConfig(version=VERSION,
47
+ description="MS MARCO passage train/dev datasets"),
48
+ ]
49
+
50
+ def _info(self):
51
+ features = datasets.Features({
52
+ 'query_id': datasets.Value('string'),
53
+ 'query': datasets.Value('string'),
54
+ 'positive_passages': [
55
+ {'docid': datasets.Value('string'), 'title': datasets.Value('string'), 'text': datasets.Value('string')}
56
+ ],
57
+ 'negative_passages': [
58
+ {'docid': datasets.Value('string'), 'title': datasets.Value('string'), 'text': datasets.Value('string')}
59
+ ],
60
+ })
61
+ return datasets.DatasetInfo(
62
+ # This is the description that will appear on the datasets page.
63
+ description=_DESCRIPTION,
64
+ # This defines the different columns of the dataset and their types
65
+ features=features, # Here we define them above because they are different between the two configurations
66
+ supervised_keys=None,
67
+ # Homepage of the dataset for documentation
68
+ homepage="",
69
+ # License for the dataset if available
70
+ license="",
71
+ # Citation for the dataset
72
+ citation=_CITATION,
73
+ )
74
+
75
+ def _split_generators(self, dl_manager):
76
+ if self.config.data_files:
77
+ downloaded_files = self.config.data_files
78
+ else:
79
+ downloaded_files = dl_manager.download_and_extract(_DATASET_URLS)
80
+ splits = [
81
+ datasets.SplitGenerator(
82
+ name=split,
83
+ gen_kwargs={
84
+ "files": [downloaded_files[split]] if isinstance(downloaded_files[split], str) else downloaded_files[split],
85
+ },
86
+ ) for split in downloaded_files
87
+ ]
88
+ return splits
89
+
90
+ def _generate_examples(self, files):
91
+ """Yields examples."""
92
+ for filepath in files:
93
+ with open(filepath, encoding="utf-8") as f:
94
+ for line in f:
95
+ data = json.loads(line)
96
+ if data.get('negative_passages') is None:
97
+ data['negative_passages'] = []
98
+ if data.get('positive_passages') is None:
99
+ data['positive_passages'] = []
100
+ yield data['query_id'], data
101
+
russian-train.jsonl.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:53d1c01a867bae6a488e81d9f0f7c0336cde0a245b52cbadd0c8acc5f97d336b
3
+ size 2140260703