theblackcat102 commited on
Commit
71ea52d
·
1 Parent(s): 34eb100

Delete codex-math-qa.py

Browse files
Files changed (1) hide show
  1. codex-math-qa.py +0 -156
codex-math-qa.py DELETED
@@ -1,156 +0,0 @@
1
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
- """TODO: Add a description here."""
15
-
16
- import json
17
- import datasets
18
-
19
-
20
- # TODO: Add BibTeX citation
21
- # Find for instance the citation on arxiv or on the dataset repo/website
22
- _CITATION = """\
23
- """
24
-
25
- # TODO: Add description of the dataset here
26
- # You can copy an official description
27
- _DESCRIPTION = """\
28
- Solution by codex-davinci-002 for math_qa
29
- """
30
-
31
- # TODO: Add a link to an official homepage for the dataset here
32
- _HOMEPAGE = ""
33
-
34
- # TODO: Add the licence for the dataset here if you can find it
35
- _LICENSE = ""
36
-
37
- # TODO: Add link to the official dataset URLs here
38
- # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
39
- # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
40
- _BASE_URL = 'https://huggingface.co/datasets/theblackcat102/codex-math-qa/resolve/main/'
41
-
42
-
43
-
44
- class CodexMathQA(datasets.GeneratorBasedBuilder):
45
-
46
- VERSION = datasets.Version("1.0.0")
47
-
48
- # This is an example of a dataset with multiple configurations.
49
- # If you don't want/need to define several sub-sets in your dataset,
50
- # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
51
-
52
- # If you need to make complex sub-parts in the datasets with configurable options
53
- # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
54
- # BUILDER_CONFIG_CLASS = MyBuilderConfig
55
-
56
- BUILDER_CONFIGS = [
57
- datasets.BuilderConfig(name="main", version=VERSION, description="training sets from math qa"),
58
- datasets.BuilderConfig(name="rational", version=VERSION, description="split with rationale text in prompt")
59
- ]
60
-
61
-
62
- DEFAULT_CONFIG_NAME = "main" # It's not mandatory to have a default configuration. Just use one if it make sense.
63
-
64
- def _info(self):
65
- features = datasets.Features(
66
- {
67
- "model_name":datasets.Value("string"),
68
- "question":datasets.Value("string"),
69
- "reply":datasets.Value("string"),
70
- "ping_date":datasets.Value("string"),
71
- "elapsed": datasets.Value("float64")
72
- }
73
- )
74
- return datasets.DatasetInfo(
75
- # This is the description that will appear on the datasets page.
76
- description=_DESCRIPTION,
77
- # This defines the different columns of the dataset and their types
78
- features=features, # Here we define them above because they are different between the two configurations
79
- # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
80
- # specify them. They'll be used if as_supervised=True in builder.as_dataset.
81
- # supervised_keys=("sentence", "label"),
82
- # Homepage of the dataset for documentation
83
- homepage=_HOMEPAGE,
84
- # License for the dataset if available
85
- license=_LICENSE,
86
- # Citation for the dataset
87
- citation=_CITATION,
88
- )
89
-
90
- def _split_generators(self, dl_manager):
91
- if "main" == self.config.name:
92
- return [
93
- datasets.SplitGenerator(
94
- name=datasets.Split.TRAIN,
95
- gen_kwargs={
96
- "filepath": dl_manager.download_and_extract(
97
- _BASE_URL+'train.jsonl'
98
- ),
99
- "split": "train",
100
- },
101
- ),
102
- datasets.SplitGenerator(
103
- name=datasets.Split.TEST,
104
- gen_kwargs={
105
- "filepath": dl_manager.download_and_extract(
106
- _BASE_URL+'test.jsonl'
107
- ),
108
- "split": "test",
109
- },
110
- ),
111
- datasets.SplitGenerator(
112
- name=datasets.Split.VALIDATION,
113
- gen_kwargs={
114
- "filepath": dl_manager.download_and_extract(
115
- _BASE_URL+'validation.jsonl'
116
- ),
117
- "split": "validation",
118
- },
119
- ),
120
- ]
121
- elif "rational" == self.config.name:
122
- return [
123
- datasets.SplitGenerator(
124
- name=datasets.Split.TEST,
125
- gen_kwargs={
126
- "filepath": dl_manager.download_and_extract(
127
- _BASE_URL+'rational-test.jsonl'
128
- ),
129
- "split": "test",
130
- },
131
- ),
132
- datasets.SplitGenerator(
133
- name=datasets.Split.VALIDATION,
134
- gen_kwargs={
135
- "filepath": dl_manager.download_and_extract(
136
- _BASE_URL+'rational-val.jsonl'
137
- ),
138
- "split": "validation",
139
- },
140
- ),
141
- ]
142
-
143
- # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
144
- def _generate_examples(self, filepath, split):
145
- # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
146
- # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
147
- with open(filepath, encoding="utf-8") as f:
148
- for key, row in enumerate(f):
149
- data = json.loads(row)
150
- yield key, {
151
- "model_name": data["model_name"],
152
- "question": data["question"],
153
- "reply": data["reply"],
154
- "ping_date": data["ping_date"],
155
- "elapsed": data["elapsed"],
156
- }