mpkato commited on
Commit
0183274
·
verified ·
1 Parent(s): 18ba755

Delete loading script

Browse files
Files changed (1) hide show
  1. miracl-japanese-small.py +0 -95
miracl-japanese-small.py DELETED
@@ -1,95 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- # Lint as: python3
17
-
18
- import json
19
- import datasets
20
-
21
- _CITATION = """
22
- """
23
-
24
- _DESCRIPTION = "dataset load script for MIRACL Japanese Small"
25
-
26
- _DATASET_URLS = {
27
- 'train': 'https://huggingface.co/datasets/mpkato/miracl-japanese-small/resolve/main/topics.miracl-v1.0-ja-train.jsonl.gz',
28
- 'dev': 'https://huggingface.co/datasets/mpkato/miracl-japanese-small/resolve/main/topics.miracl-v1.0-ja-dev.jsonl.gz',
29
- }
30
-
31
-
32
- class MIRACLJapaneseSmall(datasets.GeneratorBasedBuilder):
33
- BUILDER_CONFIGS = [
34
- datasets.BuilderConfig(
35
- version=datasets.Version('1.0.0'),
36
- description=f'MIRACL Japanese Small dataset.'
37
- )
38
- ]
39
-
40
- def _info(self):
41
- features = datasets.Features({
42
- 'query_id': datasets.Value('string'),
43
- 'query': datasets.Value('string'),
44
- 'positive_passages': [
45
- {'docid': datasets.Value('string'), 'text': datasets.Value('string'),
46
- 'title': datasets.Value('string')}
47
- ],
48
- 'negative_passages': [
49
- {'docid': datasets.Value('string'), 'text': datasets.Value('string'),
50
- 'title': datasets.Value('string')}
51
- ],
52
- })
53
-
54
- return datasets.DatasetInfo(
55
- # This is the description that will appear on the datasets page.
56
- description=_DESCRIPTION,
57
- # This defines the different columns of the dataset and their types
58
- features=features, # Here we define them above because they are different between the two configurations
59
- supervised_keys=None,
60
- # Homepage of the dataset for documentation
61
- homepage="",
62
- # License for the dataset if available
63
- license="",
64
- # Citation for the dataset
65
- citation=_CITATION,
66
- )
67
-
68
- def _split_generators(self, dl_manager):
69
- downloaded_files = dl_manager.download_and_extract(_DATASET_URLS)
70
- splits = [
71
- datasets.SplitGenerator(
72
- name="train",
73
- gen_kwargs={
74
- "filepath": downloaded_files["train"],
75
- },
76
- ),
77
- datasets.SplitGenerator(
78
- name='dev',
79
- gen_kwargs={
80
- "filepath": downloaded_files["dev"],
81
- },
82
- ),
83
- ]
84
- return splits
85
-
86
- def _generate_examples(self, filepath):
87
- """Yields examples."""
88
- with open(filepath, encoding="utf-8") as f:
89
- for line in f:
90
- data = json.loads(line)
91
- if data.get('negative_passages') is None:
92
- data['negative_passages'] = []
93
- if data.get('positive_passages') is None:
94
- data['positive_passages'] = []
95
- yield data['query_id'], data