Datasets:

Modalities:
Text
Languages:
English
ArXiv:
Libraries:
Datasets
License:

Convert dataset to Parquet

#5
by SaylorTwift HF Staff - opened
README.md CHANGED
@@ -21,6 +21,85 @@ task_categories:
21
  task_ids:
22
  - closed-domain-qa
23
  paperswithcode_id: qasper
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
  ---
25
 
26
  # Dataset Card for Qasper
 
21
  task_ids:
22
  - closed-domain-qa
23
  paperswithcode_id: qasper
24
+ configs:
25
+ - config_name: qasper
26
+ data_files:
27
+ - split: train
28
+ path: qasper/train-*
29
+ - split: validation
30
+ path: qasper/validation-*
31
+ - split: test
32
+ path: qasper/test-*
33
+ default: true
34
+ dataset_info:
35
+ config_name: qasper
36
+ features:
37
+ - name: id
38
+ dtype: string
39
+ - name: title
40
+ dtype: string
41
+ - name: abstract
42
+ dtype: string
43
+ - name: full_text
44
+ sequence:
45
+ - name: section_name
46
+ dtype: string
47
+ - name: paragraphs
48
+ list: string
49
+ - name: qas
50
+ sequence:
51
+ - name: question
52
+ dtype: string
53
+ - name: question_id
54
+ dtype: string
55
+ - name: nlp_background
56
+ dtype: string
57
+ - name: topic_background
58
+ dtype: string
59
+ - name: paper_read
60
+ dtype: string
61
+ - name: search_query
62
+ dtype: string
63
+ - name: question_writer
64
+ dtype: string
65
+ - name: answers
66
+ sequence:
67
+ - name: answer
68
+ struct:
69
+ - name: unanswerable
70
+ dtype: bool
71
+ - name: extractive_spans
72
+ sequence: string
73
+ - name: yes_no
74
+ dtype: bool
75
+ - name: free_form_answer
76
+ dtype: string
77
+ - name: evidence
78
+ sequence: string
79
+ - name: highlighted_evidence
80
+ sequence: string
81
+ - name: annotation_id
82
+ dtype: string
83
+ - name: worker_id
84
+ dtype: string
85
+ - name: figures_and_tables
86
+ sequence:
87
+ - name: caption
88
+ dtype: string
89
+ - name: file
90
+ dtype: string
91
+ splits:
92
+ - name: train
93
+ num_bytes: 28466446
94
+ num_examples: 888
95
+ - name: validation
96
+ num_bytes: 9900193
97
+ num_examples: 281
98
+ - name: test
99
+ num_bytes: 15488891
100
+ num_examples: 416
101
+ download_size: 26179660
102
+ dataset_size: 53855530
103
  ---
104
 
105
  # Dataset Card for Qasper
dataset_infos.json DELETED
@@ -1,212 +0,0 @@
1
- {
2
- "qasper": {
3
- "description": "A dataset containing 1585 papers with 5049 information-seeking questions asked by regular readers of NLP papers, and answered by a separate set of NLP practitioners.\n",
4
- "citation": "@inproceedings{Dasigi2021ADO,\n title={A Dataset of Information-Seeking Questions and Answers Anchored in Research Papers},\n author={Pradeep Dasigi and Kyle Lo and Iz Beltagy and Arman Cohan and Noah A. Smith and Matt Gardner},\n year={2021}\n}\n",
5
- "homepage": "https://allenai.org/data/qasper",
6
- "license": "CC BY 4.0",
7
- "features": {
8
- "id": {
9
- "dtype": "string",
10
- "id": null,
11
- "_type": "Value"
12
- },
13
- "title": {
14
- "dtype": "string",
15
- "id": null,
16
- "_type": "Value"
17
- },
18
- "abstract": {
19
- "dtype": "string",
20
- "id": null,
21
- "_type": "Value"
22
- },
23
- "full_text": {
24
- "feature": {
25
- "section_name": {
26
- "dtype": "string",
27
- "id": null,
28
- "_type": "Value"
29
- },
30
- "paragraphs": [
31
- {
32
- "dtype": "string",
33
- "id": null,
34
- "_type": "Value"
35
- }
36
- ]
37
- },
38
- "length": -1,
39
- "id": null,
40
- "_type": "Sequence"
41
- },
42
- "qas": {
43
- "feature": {
44
- "question": {
45
- "dtype": "string",
46
- "id": null,
47
- "_type": "Value"
48
- },
49
- "question_id": {
50
- "dtype": "string",
51
- "id": null,
52
- "_type": "Value"
53
- },
54
- "nlp_background": {
55
- "dtype": "string",
56
- "id": null,
57
- "_type": "Value"
58
- },
59
- "topic_background": {
60
- "dtype": "string",
61
- "id": null,
62
- "_type": "Value"
63
- },
64
- "paper_read": {
65
- "dtype": "string",
66
- "id": null,
67
- "_type": "Value"
68
- },
69
- "search_query": {
70
- "dtype": "string",
71
- "id": null,
72
- "_type": "Value"
73
- },
74
- "question_writer": {
75
- "dtype": "string",
76
- "id": null,
77
- "_type": "Value"
78
- },
79
- "answers": {
80
- "feature": {
81
- "answer": {
82
- "unanswerable": {
83
- "dtype": "bool",
84
- "id": null,
85
- "_type": "Value"
86
- },
87
- "extractive_spans": {
88
- "feature": {
89
- "dtype": "string",
90
- "id": null,
91
- "_type": "Value"
92
- },
93
- "length": -1,
94
- "id": null,
95
- "_type": "Sequence"
96
- },
97
- "yes_no": {
98
- "dtype": "bool",
99
- "id": null,
100
- "_type": "Value"
101
- },
102
- "free_form_answer": {
103
- "dtype": "string",
104
- "id": null,
105
- "_type": "Value"
106
- },
107
- "evidence": {
108
- "feature": {
109
- "dtype": "string",
110
- "id": null,
111
- "_type": "Value"
112
- },
113
- "length": -1,
114
- "id": null,
115
- "_type": "Sequence"
116
- },
117
- "highlighted_evidence": {
118
- "feature": {
119
- "dtype": "string",
120
- "id": null,
121
- "_type": "Value"
122
- },
123
- "length": -1,
124
- "id": null,
125
- "_type": "Sequence"
126
- }
127
- },
128
- "annotation_id": {
129
- "dtype": "string",
130
- "id": null,
131
- "_type": "Value"
132
- },
133
- "worker_id": {
134
- "dtype": "string",
135
- "id": null,
136
- "_type": "Value"
137
- }
138
- },
139
- "length": -1,
140
- "id": null,
141
- "_type": "Sequence"
142
- }
143
- },
144
- "length": -1,
145
- "id": null,
146
- "_type": "Sequence"
147
- },
148
- "figures_and_tables": {
149
- "feature": {
150
- "caption": {
151
- "dtype": "string",
152
- "id": null,
153
- "_type": "Value"
154
- },
155
- "file": {
156
- "dtype": "string",
157
- "id": null,
158
- "_type": "Value"
159
- }
160
- },
161
- "length": -1,
162
- "id": null,
163
- "_type": "Sequence"
164
- }
165
- },
166
- "post_processed": null,
167
- "supervised_keys": null,
168
- "builder_name": "qasper",
169
- "config_name": "qasper",
170
- "version": {
171
- "version_str": "0.3.0",
172
- "description": null,
173
- "major": 0,
174
- "minor": 3,
175
- "patch": 0
176
- },
177
- "splits": {
178
- "train": {
179
- "name": "train",
180
- "num_bytes": 27277970,
181
- "num_examples": 888,
182
- "dataset_name": "qasper"
183
- },
184
- "validation": {
185
- "name": "validation",
186
- "num_bytes": 9535330,
187
- "num_examples": 281,
188
- "dataset_name": "qasper"
189
- },
190
- "test": {
191
- "name": "test",
192
- "num_bytes": 9535330,
193
- "num_examples": 416,
194
- "dataset_name": "qasper"
195
- }
196
- },
197
- "download_checksums": {
198
- "https://qasper-dataset.s3.us-west-2.amazonaws.com/qasper-train-dev-v0.3.tgz": {
199
- "num_bytes": 10835856,
200
- "checksum": "a28fdf966db827bcee3d873107d6b6669864fb7ca8fbf73a192f5e39191bdb5a"
201
- },
202
- "https://qasper-dataset.s3.us-west-2.amazonaws.com/qasper-test-and-evaluator-v0.3.tgz": {
203
- "num_bytes": 3865061,
204
- "checksum": "72a52a41193e2838b8074f80ac074b94f956b84886c36a61c58a7df4171bdd72"
205
- }
206
- },
207
- "download_size": 14700917,
208
- "post_processing_size": null,
209
- "dataset_size": 36813300,
210
- "size_in_bytes": 68556447
211
- }
212
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
qasper.py DELETED
@@ -1,152 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2022 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- # Lint as: python3
17
- """Qasper: A Dataset of Information-Seeking Questions and Answers Anchored in Research Papers."""
18
-
19
-
20
- import json
21
-
22
- import datasets
23
-
24
-
25
- logger = datasets.logging.get_logger(__name__)
26
-
27
-
28
- _CITATION = """\
29
- @inproceedings{Dasigi2021ADO,
30
- title={A Dataset of Information-Seeking Questions and Answers Anchored in Research Papers},
31
- author={Pradeep Dasigi and Kyle Lo and Iz Beltagy and Arman Cohan and Noah A. Smith and Matt Gardner},
32
- year={2021}
33
- }
34
- """
35
- _LICENSE = "CC BY 4.0"
36
- _DESCRIPTION = """\
37
- A dataset containing 1585 papers with 5049 information-seeking questions asked by regular readers of NLP papers, and answered by a separate set of NLP practitioners.
38
- """
39
-
40
- _HOMEPAGE = "https://allenai.org/data/qasper"
41
- _URL_TRAIN_DEV = "https://qasper-dataset.s3.us-west-2.amazonaws.com/qasper-train-dev-v0.3.tgz"
42
- _URL_TEST = "https://qasper-dataset.s3.us-west-2.amazonaws.com/qasper-test-and-evaluator-v0.3.tgz"
43
- _DATA_FILES = {"train": "qasper-train-v0.3.json",
44
- "dev": "qasper-dev-v0.3.json",
45
- "test": "qasper-test-v0.3.json"}
46
-
47
- _VERSION = "0.3.0"
48
-
49
-
50
- class Qasper(datasets.GeneratorBasedBuilder):
51
- """Qasper: A Dataset of Information-Seeking Q&A Anchored in Research Papers."""
52
-
53
- BUILDER_CONFIGS = [
54
- datasets.BuilderConfig(
55
- name="qasper",
56
- version=datasets.Version(_VERSION),
57
- description=_DESCRIPTION,
58
- )
59
- ]
60
-
61
- def _info(self):
62
-
63
- features = datasets.Features(
64
- {
65
- "id": datasets.Value("string"),
66
- "title": datasets.Value("string"),
67
- "abstract": datasets.Value("string"),
68
- "full_text": datasets.features.Sequence(
69
- {
70
- "section_name": datasets.Value("string"),
71
- "paragraphs": [datasets.Value("string")],
72
- }
73
- ),
74
- "qas": datasets.features.Sequence(
75
- {
76
- "question": datasets.Value("string"),
77
- "question_id": datasets.Value("string"),
78
- "nlp_background": datasets.Value("string"),
79
- "topic_background": datasets.Value("string"),
80
- "paper_read": datasets.Value("string"),
81
- "search_query": datasets.Value("string"),
82
- "question_writer": datasets.Value("string"),
83
- "answers": datasets.features.Sequence(
84
- {
85
- "answer": {
86
- "unanswerable": datasets.Value("bool"),
87
- "extractive_spans": datasets.features.Sequence(datasets.Value("string")),
88
- "yes_no": datasets.Value("bool"),
89
- "free_form_answer": datasets.Value("string"),
90
- "evidence": datasets.features.Sequence(datasets.Value("string")),
91
- "highlighted_evidence": datasets.features.Sequence(datasets.Value("string")),
92
- },
93
- "annotation_id": datasets.Value("string"),
94
- "worker_id": datasets.Value("string"),
95
- }
96
- ),
97
- }
98
- ),
99
- "figures_and_tables": datasets.features.Sequence(
100
- {
101
- "caption": datasets.Value("string"),
102
- "file": datasets.Value("string"),
103
- }
104
- ),
105
- }
106
- )
107
-
108
- return datasets.DatasetInfo(
109
- description=_DESCRIPTION,
110
- features=features,
111
- supervised_keys=None,
112
- homepage=_HOMEPAGE,
113
- license=_LICENSE,
114
- citation=_CITATION,
115
- )
116
-
117
- def _split_generators(self, dl_manager):
118
- archive_train_dev, archive_test = dl_manager.download((
119
- _URL_TRAIN_DEV, _URL_TEST)
120
- )
121
-
122
- return [
123
- datasets.SplitGenerator(
124
- name=datasets.Split.TRAIN,
125
- gen_kwargs={
126
- "filepath": _DATA_FILES["train"],
127
- "files": dl_manager.iter_archive(archive_train_dev)},
128
- ),
129
- datasets.SplitGenerator(
130
- name=datasets.Split.VALIDATION,
131
- gen_kwargs={
132
- "filepath": _DATA_FILES["dev"],
133
- "files": dl_manager.iter_archive(archive_train_dev)},
134
- ),
135
- datasets.SplitGenerator(
136
- name=datasets.Split.TEST,
137
- gen_kwargs={
138
- "filepath": _DATA_FILES["test"],
139
- "files": dl_manager.iter_archive(archive_test)},
140
- ),
141
- ]
142
-
143
- def _generate_examples(self, filepath, files):
144
- """This function returns the examples in the raw (text) form."""
145
- logger.info("generating examples from = %s", filepath)
146
- for path, f in files:
147
- if path == filepath:
148
- qasper = json.loads(f.read().decode("utf-8"))
149
- for id_ in qasper:
150
- qasper[id_]["id"] = id_
151
- yield id_, qasper[id_]
152
- break
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dummy/qasper/0.1.0/dummy_data.zip → qasper/test-00000-of-00001.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2b220315af309990e51221f07dfac6fad8b43b00b7d8f267b1f555c797bb5c2e
3
- size 15066
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d910a7671f01c99ee7d9c95f764433ed8ec50b3c81d764e025c2cc22b1681da7
3
+ size 7068905
qasper/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:68a20057dcccc1ddd42e26910ff87bef276c912808c0426a7d884a0873963e09
3
+ size 14368661
qasper/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6a1105448de2c14528f4e5781198cbc82d8f3b29fe9c5c34b452f7739731ada3
3
+ size 4742094