Datasets:

Modalities:
Text
Formats:
parquet
Languages:
English
Libraries:
Datasets
pandas
License:
jfries commited on
Commit
c292b3c
·
verified ·
1 Parent(s): de5030c

Convert dataset to Parquet (#4)

Browse files

- Convert dataset to Parquet (29180d9025a08ff69fd08a4fcdddb5b079c740a1)
- Add 'chemprot_shared_task_eval_source' config data files (c5057eca651c41d17959613cb15620a1417ddcde)
- Add 'chemprot_bigbio_kb' config data files (53e5a3d7a9cc2ebe56d982a4c10116c6e7df39a1)
- Delete loading script auxiliary file (2495bdeac0c2f93da48952892d06147dad9ce2b3)
- Delete loading script (3c448eb0fb55220ab2e804c0e681050175ad7a53)
- Delete data file (fe210651ce27c52293682501bcc0423e8dd03143)

README.md CHANGED
@@ -1,19 +1,219 @@
1
-
2
  ---
3
- language:
4
  - en
5
- bigbio_language:
6
  - English
7
  license: other
8
  multilinguality: monolingual
9
  bigbio_license_shortname: PUBLIC_DOMAIN_MARK_1p0
10
  pretty_name: ChemProt
11
  homepage: https://biocreative.bioinformatics.udel.edu/tasks/biocreative-vi/track-5/
12
- bigbio_pubmed: True
13
- bigbio_public: True
14
- bigbio_tasks:
15
  - RELATION_EXTRACTION
16
  - NAMED_ENTITY_RECOGNITION
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17
  ---
18
 
19
 
 
 
1
  ---
2
+ language:
3
  - en
4
+ bigbio_language:
5
  - English
6
  license: other
7
  multilinguality: monolingual
8
  bigbio_license_shortname: PUBLIC_DOMAIN_MARK_1p0
9
  pretty_name: ChemProt
10
  homepage: https://biocreative.bioinformatics.udel.edu/tasks/biocreative-vi/track-5/
11
+ bigbio_pubmed: true
12
+ bigbio_public: true
13
+ bigbio_tasks:
14
  - RELATION_EXTRACTION
15
  - NAMED_ENTITY_RECOGNITION
16
+ dataset_info:
17
+ - config_name: chemprot_bigbio_kb
18
+ features:
19
+ - name: id
20
+ dtype: string
21
+ - name: document_id
22
+ dtype: string
23
+ - name: passages
24
+ list:
25
+ - name: id
26
+ dtype: string
27
+ - name: type
28
+ dtype: string
29
+ - name: text
30
+ sequence: string
31
+ - name: offsets
32
+ sequence:
33
+ list: int32
34
+ - name: entities
35
+ list:
36
+ - name: id
37
+ dtype: string
38
+ - name: type
39
+ dtype: string
40
+ - name: text
41
+ sequence: string
42
+ - name: offsets
43
+ sequence:
44
+ list: int32
45
+ - name: normalized
46
+ list:
47
+ - name: db_name
48
+ dtype: string
49
+ - name: db_id
50
+ dtype: string
51
+ - name: events
52
+ list:
53
+ - name: id
54
+ dtype: string
55
+ - name: type
56
+ dtype: string
57
+ - name: trigger
58
+ struct:
59
+ - name: text
60
+ sequence: string
61
+ - name: offsets
62
+ sequence:
63
+ list: int32
64
+ - name: arguments
65
+ list:
66
+ - name: role
67
+ dtype: string
68
+ - name: ref_id
69
+ dtype: string
70
+ - name: coreferences
71
+ list:
72
+ - name: id
73
+ dtype: string
74
+ - name: entity_ids
75
+ sequence: string
76
+ - name: relations
77
+ list:
78
+ - name: id
79
+ dtype: string
80
+ - name: type
81
+ dtype: string
82
+ - name: arg1_id
83
+ dtype: string
84
+ - name: arg2_id
85
+ dtype: string
86
+ - name: normalized
87
+ list:
88
+ - name: db_name
89
+ dtype: string
90
+ - name: db_id
91
+ dtype: string
92
+ splits:
93
+ - name: sample
94
+ num_bytes: 174378
95
+ num_examples: 50
96
+ - name: train
97
+ num_bytes: 3509825
98
+ num_examples: 1020
99
+ - name: test
100
+ num_bytes: 2838045
101
+ num_examples: 800
102
+ - name: validation
103
+ num_bytes: 2098255
104
+ num_examples: 612
105
+ download_size: 3644874
106
+ dataset_size: 8620503
107
+ - config_name: chemprot_full_source
108
+ features:
109
+ - name: pmid
110
+ dtype: string
111
+ - name: text
112
+ dtype: string
113
+ - name: entities
114
+ sequence:
115
+ - name: id
116
+ dtype: string
117
+ - name: type
118
+ dtype: string
119
+ - name: text
120
+ dtype: string
121
+ - name: offsets
122
+ sequence: int64
123
+ - name: relations
124
+ sequence:
125
+ - name: type
126
+ dtype: string
127
+ - name: arg1
128
+ dtype: string
129
+ - name: arg2
130
+ dtype: string
131
+ splits:
132
+ - name: sample
133
+ num_bytes: 159878
134
+ num_examples: 50
135
+ - name: train
136
+ num_bytes: 3161241
137
+ num_examples: 1020
138
+ - name: test
139
+ num_bytes: 2550891
140
+ num_examples: 800
141
+ - name: validation
142
+ num_bytes: 1902042
143
+ num_examples: 612
144
+ download_size: 2938603
145
+ dataset_size: 7774052
146
+ - config_name: chemprot_shared_task_eval_source
147
+ features:
148
+ - name: pmid
149
+ dtype: string
150
+ - name: text
151
+ dtype: string
152
+ - name: entities
153
+ sequence:
154
+ - name: id
155
+ dtype: string
156
+ - name: type
157
+ dtype: string
158
+ - name: text
159
+ dtype: string
160
+ - name: offsets
161
+ sequence: int64
162
+ - name: relations
163
+ sequence:
164
+ - name: type
165
+ dtype: string
166
+ - name: arg1
167
+ dtype: string
168
+ - name: arg2
169
+ dtype: string
170
+ splits:
171
+ - name: sample
172
+ num_bytes: 157609
173
+ num_examples: 50
174
+ - name: train
175
+ num_bytes: 3109953
176
+ num_examples: 1020
177
+ - name: test
178
+ num_bytes: 2499388
179
+ num_examples: 800
180
+ - name: validation
181
+ num_bytes: 1876378
182
+ num_examples: 612
183
+ download_size: 2924370
184
+ dataset_size: 7643328
185
+ configs:
186
+ - config_name: chemprot_bigbio_kb
187
+ data_files:
188
+ - split: sample
189
+ path: chemprot_bigbio_kb/sample-*
190
+ - split: train
191
+ path: chemprot_bigbio_kb/train-*
192
+ - split: test
193
+ path: chemprot_bigbio_kb/test-*
194
+ - split: validation
195
+ path: chemprot_bigbio_kb/validation-*
196
+ - config_name: chemprot_full_source
197
+ data_files:
198
+ - split: sample
199
+ path: chemprot_full_source/sample-*
200
+ - split: train
201
+ path: chemprot_full_source/train-*
202
+ - split: test
203
+ path: chemprot_full_source/test-*
204
+ - split: validation
205
+ path: chemprot_full_source/validation-*
206
+ default: true
207
+ - config_name: chemprot_shared_task_eval_source
208
+ data_files:
209
+ - split: sample
210
+ path: chemprot_shared_task_eval_source/sample-*
211
+ - split: train
212
+ path: chemprot_shared_task_eval_source/train-*
213
+ - split: test
214
+ path: chemprot_shared_task_eval_source/test-*
215
+ - split: validation
216
+ path: chemprot_shared_task_eval_source/validation-*
217
  ---
218
 
219
 
bigbiohub.py DELETED
@@ -1,592 +0,0 @@
1
- from collections import defaultdict
2
- from dataclasses import dataclass
3
- from enum import Enum
4
- import logging
5
- from pathlib import Path
6
- from types import SimpleNamespace
7
- from typing import TYPE_CHECKING, Dict, Iterable, List, Tuple
8
-
9
- import datasets
10
-
11
- if TYPE_CHECKING:
12
- import bioc
13
-
14
- logger = logging.getLogger(__name__)
15
-
16
-
17
- BigBioValues = SimpleNamespace(NULL="<BB_NULL_STR>")
18
-
19
-
20
- @dataclass
21
- class BigBioConfig(datasets.BuilderConfig):
22
- """BuilderConfig for BigBio."""
23
-
24
- name: str = None
25
- version: datasets.Version = None
26
- description: str = None
27
- schema: str = None
28
- subset_id: str = None
29
-
30
-
31
- class Tasks(Enum):
32
- NAMED_ENTITY_RECOGNITION = "NER"
33
- NAMED_ENTITY_DISAMBIGUATION = "NED"
34
- EVENT_EXTRACTION = "EE"
35
- RELATION_EXTRACTION = "RE"
36
- COREFERENCE_RESOLUTION = "COREF"
37
- QUESTION_ANSWERING = "QA"
38
- TEXTUAL_ENTAILMENT = "TE"
39
- SEMANTIC_SIMILARITY = "STS"
40
- TEXT_PAIRS_CLASSIFICATION = "TXT2CLASS"
41
- PARAPHRASING = "PARA"
42
- TRANSLATION = "TRANSL"
43
- SUMMARIZATION = "SUM"
44
- TEXT_CLASSIFICATION = "TXTCLASS"
45
-
46
-
47
- entailment_features = datasets.Features(
48
- {
49
- "id": datasets.Value("string"),
50
- "premise": datasets.Value("string"),
51
- "hypothesis": datasets.Value("string"),
52
- "label": datasets.Value("string"),
53
- }
54
- )
55
-
56
- pairs_features = datasets.Features(
57
- {
58
- "id": datasets.Value("string"),
59
- "document_id": datasets.Value("string"),
60
- "text_1": datasets.Value("string"),
61
- "text_2": datasets.Value("string"),
62
- "label": datasets.Value("string"),
63
- }
64
- )
65
-
66
- qa_features = datasets.Features(
67
- {
68
- "id": datasets.Value("string"),
69
- "question_id": datasets.Value("string"),
70
- "document_id": datasets.Value("string"),
71
- "question": datasets.Value("string"),
72
- "type": datasets.Value("string"),
73
- "choices": [datasets.Value("string")],
74
- "context": datasets.Value("string"),
75
- "answer": datasets.Sequence(datasets.Value("string")),
76
- }
77
- )
78
-
79
- text_features = datasets.Features(
80
- {
81
- "id": datasets.Value("string"),
82
- "document_id": datasets.Value("string"),
83
- "text": datasets.Value("string"),
84
- "labels": [datasets.Value("string")],
85
- }
86
- )
87
-
88
- text2text_features = datasets.Features(
89
- {
90
- "id": datasets.Value("string"),
91
- "document_id": datasets.Value("string"),
92
- "text_1": datasets.Value("string"),
93
- "text_2": datasets.Value("string"),
94
- "text_1_name": datasets.Value("string"),
95
- "text_2_name": datasets.Value("string"),
96
- }
97
- )
98
-
99
- kb_features = datasets.Features(
100
- {
101
- "id": datasets.Value("string"),
102
- "document_id": datasets.Value("string"),
103
- "passages": [
104
- {
105
- "id": datasets.Value("string"),
106
- "type": datasets.Value("string"),
107
- "text": datasets.Sequence(datasets.Value("string")),
108
- "offsets": datasets.Sequence([datasets.Value("int32")]),
109
- }
110
- ],
111
- "entities": [
112
- {
113
- "id": datasets.Value("string"),
114
- "type": datasets.Value("string"),
115
- "text": datasets.Sequence(datasets.Value("string")),
116
- "offsets": datasets.Sequence([datasets.Value("int32")]),
117
- "normalized": [
118
- {
119
- "db_name": datasets.Value("string"),
120
- "db_id": datasets.Value("string"),
121
- }
122
- ],
123
- }
124
- ],
125
- "events": [
126
- {
127
- "id": datasets.Value("string"),
128
- "type": datasets.Value("string"),
129
- # refers to the text_bound_annotation of the trigger
130
- "trigger": {
131
- "text": datasets.Sequence(datasets.Value("string")),
132
- "offsets": datasets.Sequence([datasets.Value("int32")]),
133
- },
134
- "arguments": [
135
- {
136
- "role": datasets.Value("string"),
137
- "ref_id": datasets.Value("string"),
138
- }
139
- ],
140
- }
141
- ],
142
- "coreferences": [
143
- {
144
- "id": datasets.Value("string"),
145
- "entity_ids": datasets.Sequence(datasets.Value("string")),
146
- }
147
- ],
148
- "relations": [
149
- {
150
- "id": datasets.Value("string"),
151
- "type": datasets.Value("string"),
152
- "arg1_id": datasets.Value("string"),
153
- "arg2_id": datasets.Value("string"),
154
- "normalized": [
155
- {
156
- "db_name": datasets.Value("string"),
157
- "db_id": datasets.Value("string"),
158
- }
159
- ],
160
- }
161
- ],
162
- }
163
- )
164
-
165
-
166
- TASK_TO_SCHEMA = {
167
- Tasks.NAMED_ENTITY_RECOGNITION.name: "KB",
168
- Tasks.NAMED_ENTITY_DISAMBIGUATION.name: "KB",
169
- Tasks.EVENT_EXTRACTION.name: "KB",
170
- Tasks.RELATION_EXTRACTION.name: "KB",
171
- Tasks.COREFERENCE_RESOLUTION.name: "KB",
172
- Tasks.QUESTION_ANSWERING.name: "QA",
173
- Tasks.TEXTUAL_ENTAILMENT.name: "TE",
174
- Tasks.SEMANTIC_SIMILARITY.name: "PAIRS",
175
- Tasks.TEXT_PAIRS_CLASSIFICATION.name: "PAIRS",
176
- Tasks.PARAPHRASING.name: "T2T",
177
- Tasks.TRANSLATION.name: "T2T",
178
- Tasks.SUMMARIZATION.name: "T2T",
179
- Tasks.TEXT_CLASSIFICATION.name: "TEXT",
180
- }
181
-
182
- SCHEMA_TO_TASKS = defaultdict(set)
183
- for task, schema in TASK_TO_SCHEMA.items():
184
- SCHEMA_TO_TASKS[schema].add(task)
185
- SCHEMA_TO_TASKS = dict(SCHEMA_TO_TASKS)
186
-
187
- VALID_TASKS = set(TASK_TO_SCHEMA.keys())
188
- VALID_SCHEMAS = set(TASK_TO_SCHEMA.values())
189
-
190
- SCHEMA_TO_FEATURES = {
191
- "KB": kb_features,
192
- "QA": qa_features,
193
- "TE": entailment_features,
194
- "T2T": text2text_features,
195
- "TEXT": text_features,
196
- "PAIRS": pairs_features,
197
- }
198
-
199
-
200
- def get_texts_and_offsets_from_bioc_ann(ann: "bioc.BioCAnnotation") -> Tuple:
201
-
202
- offsets = [(loc.offset, loc.offset + loc.length) for loc in ann.locations]
203
-
204
- text = ann.text
205
-
206
- if len(offsets) > 1:
207
- i = 0
208
- texts = []
209
- for start, end in offsets:
210
- chunk_len = end - start
211
- texts.append(text[i : chunk_len + i])
212
- i += chunk_len
213
- while i < len(text) and text[i] == " ":
214
- i += 1
215
- else:
216
- texts = [text]
217
-
218
- return offsets, texts
219
-
220
-
221
- def remove_prefix(a: str, prefix: str) -> str:
222
- if a.startswith(prefix):
223
- a = a[len(prefix) :]
224
- return a
225
-
226
-
227
- def parse_brat_file(
228
- txt_file: Path,
229
- annotation_file_suffixes: List[str] = None,
230
- parse_notes: bool = False,
231
- ) -> Dict:
232
- """
233
- Parse a brat file into the schema defined below.
234
- `txt_file` should be the path to the brat '.txt' file you want to parse, e.g. 'data/1234.txt'
235
- Assumes that the annotations are contained in one or more of the corresponding '.a1', '.a2' or '.ann' files,
236
- e.g. 'data/1234.ann' or 'data/1234.a1' and 'data/1234.a2'.
237
- Will include annotator notes, when `parse_notes == True`.
238
- brat_features = datasets.Features(
239
- {
240
- "id": datasets.Value("string"),
241
- "document_id": datasets.Value("string"),
242
- "text": datasets.Value("string"),
243
- "text_bound_annotations": [ # T line in brat, e.g. type or event trigger
244
- {
245
- "offsets": datasets.Sequence([datasets.Value("int32")]),
246
- "text": datasets.Sequence(datasets.Value("string")),
247
- "type": datasets.Value("string"),
248
- "id": datasets.Value("string"),
249
- }
250
- ],
251
- "events": [ # E line in brat
252
- {
253
- "trigger": datasets.Value(
254
- "string"
255
- ), # refers to the text_bound_annotation of the trigger,
256
- "id": datasets.Value("string"),
257
- "type": datasets.Value("string"),
258
- "arguments": datasets.Sequence(
259
- {
260
- "role": datasets.Value("string"),
261
- "ref_id": datasets.Value("string"),
262
- }
263
- ),
264
- }
265
- ],
266
- "relations": [ # R line in brat
267
- {
268
- "id": datasets.Value("string"),
269
- "head": {
270
- "ref_id": datasets.Value("string"),
271
- "role": datasets.Value("string"),
272
- },
273
- "tail": {
274
- "ref_id": datasets.Value("string"),
275
- "role": datasets.Value("string"),
276
- },
277
- "type": datasets.Value("string"),
278
- }
279
- ],
280
- "equivalences": [ # Equiv line in brat
281
- {
282
- "id": datasets.Value("string"),
283
- "ref_ids": datasets.Sequence(datasets.Value("string")),
284
- }
285
- ],
286
- "attributes": [ # M or A lines in brat
287
- {
288
- "id": datasets.Value("string"),
289
- "type": datasets.Value("string"),
290
- "ref_id": datasets.Value("string"),
291
- "value": datasets.Value("string"),
292
- }
293
- ],
294
- "normalizations": [ # N lines in brat
295
- {
296
- "id": datasets.Value("string"),
297
- "type": datasets.Value("string"),
298
- "ref_id": datasets.Value("string"),
299
- "resource_name": datasets.Value(
300
- "string"
301
- ), # Name of the resource, e.g. "Wikipedia"
302
- "cuid": datasets.Value(
303
- "string"
304
- ), # ID in the resource, e.g. 534366
305
- "text": datasets.Value(
306
- "string"
307
- ), # Human readable description/name of the entity, e.g. "Barack Obama"
308
- }
309
- ],
310
- ### OPTIONAL: Only included when `parse_notes == True`
311
- "notes": [ # # lines in brat
312
- {
313
- "id": datasets.Value("string"),
314
- "type": datasets.Value("string"),
315
- "ref_id": datasets.Value("string"),
316
- "text": datasets.Value("string"),
317
- }
318
- ],
319
- },
320
- )
321
- """
322
-
323
- example = {}
324
- example["document_id"] = txt_file.with_suffix("").name
325
- with txt_file.open() as f:
326
- example["text"] = f.read()
327
-
328
- # If no specific suffixes of the to-be-read annotation files are given - take standard suffixes
329
- # for event extraction
330
- if annotation_file_suffixes is None:
331
- annotation_file_suffixes = [".a1", ".a2", ".ann"]
332
-
333
- if len(annotation_file_suffixes) == 0:
334
- raise AssertionError(
335
- "At least one suffix for the to-be-read annotation files should be given!"
336
- )
337
-
338
- ann_lines = []
339
- for suffix in annotation_file_suffixes:
340
- annotation_file = txt_file.with_suffix(suffix)
341
- try:
342
- with annotation_file.open() as f:
343
- ann_lines.extend(f.readlines())
344
- except Exception:
345
- continue
346
-
347
- example["text_bound_annotations"] = []
348
- example["events"] = []
349
- example["relations"] = []
350
- example["equivalences"] = []
351
- example["attributes"] = []
352
- example["normalizations"] = []
353
-
354
- if parse_notes:
355
- example["notes"] = []
356
-
357
- for line in ann_lines:
358
- line = line.strip()
359
- if not line:
360
- continue
361
-
362
- if line.startswith("T"): # Text bound
363
- ann = {}
364
- fields = line.split("\t")
365
-
366
- ann["id"] = fields[0]
367
- ann["type"] = fields[1].split()[0]
368
- ann["offsets"] = []
369
- span_str = remove_prefix(fields[1], (ann["type"] + " "))
370
- text = fields[2]
371
- for span in span_str.split(";"):
372
- start, end = span.split()
373
- ann["offsets"].append([int(start), int(end)])
374
-
375
- # Heuristically split text of discontiguous entities into chunks
376
- ann["text"] = []
377
- if len(ann["offsets"]) > 1:
378
- i = 0
379
- for start, end in ann["offsets"]:
380
- chunk_len = end - start
381
- ann["text"].append(text[i : chunk_len + i])
382
- i += chunk_len
383
- while i < len(text) and text[i] == " ":
384
- i += 1
385
- else:
386
- ann["text"] = [text]
387
-
388
- example["text_bound_annotations"].append(ann)
389
-
390
- elif line.startswith("E"):
391
- ann = {}
392
- fields = line.split("\t")
393
-
394
- ann["id"] = fields[0]
395
-
396
- ann["type"], ann["trigger"] = fields[1].split()[0].split(":")
397
-
398
- ann["arguments"] = []
399
- for role_ref_id in fields[1].split()[1:]:
400
- argument = {
401
- "role": (role_ref_id.split(":"))[0],
402
- "ref_id": (role_ref_id.split(":"))[1],
403
- }
404
- ann["arguments"].append(argument)
405
-
406
- example["events"].append(ann)
407
-
408
- elif line.startswith("R"):
409
- ann = {}
410
- fields = line.split("\t")
411
-
412
- ann["id"] = fields[0]
413
- ann["type"] = fields[1].split()[0]
414
-
415
- ann["head"] = {
416
- "role": fields[1].split()[1].split(":")[0],
417
- "ref_id": fields[1].split()[1].split(":")[1],
418
- }
419
- ann["tail"] = {
420
- "role": fields[1].split()[2].split(":")[0],
421
- "ref_id": fields[1].split()[2].split(":")[1],
422
- }
423
-
424
- example["relations"].append(ann)
425
-
426
- # '*' seems to be the legacy way to mark equivalences,
427
- # but I couldn't find any info on the current way
428
- # this might have to be adapted dependent on the brat version
429
- # of the annotation
430
- elif line.startswith("*"):
431
- ann = {}
432
- fields = line.split("\t")
433
-
434
- ann["id"] = fields[0]
435
- ann["ref_ids"] = fields[1].split()[1:]
436
-
437
- example["equivalences"].append(ann)
438
-
439
- elif line.startswith("A") or line.startswith("M"):
440
- ann = {}
441
- fields = line.split("\t")
442
-
443
- ann["id"] = fields[0]
444
-
445
- info = fields[1].split()
446
- ann["type"] = info[0]
447
- ann["ref_id"] = info[1]
448
-
449
- if len(info) > 2:
450
- ann["value"] = info[2]
451
- else:
452
- ann["value"] = ""
453
-
454
- example["attributes"].append(ann)
455
-
456
- elif line.startswith("N"):
457
- ann = {}
458
- fields = line.split("\t")
459
-
460
- ann["id"] = fields[0]
461
- ann["text"] = fields[2]
462
-
463
- info = fields[1].split()
464
-
465
- ann["type"] = info[0]
466
- ann["ref_id"] = info[1]
467
- ann["resource_name"] = info[2].split(":")[0]
468
- ann["cuid"] = info[2].split(":")[1]
469
- example["normalizations"].append(ann)
470
-
471
- elif parse_notes and line.startswith("#"):
472
- ann = {}
473
- fields = line.split("\t")
474
-
475
- ann["id"] = fields[0]
476
- ann["text"] = fields[2] if len(fields) == 3 else BigBioValues.NULL
477
-
478
- info = fields[1].split()
479
-
480
- ann["type"] = info[0]
481
- ann["ref_id"] = info[1]
482
- example["notes"].append(ann)
483
-
484
- return example
485
-
486
-
487
- def brat_parse_to_bigbio_kb(brat_parse: Dict) -> Dict:
488
- """
489
- Transform a brat parse (conforming to the standard brat schema) obtained with
490
- `parse_brat_file` into a dictionary conforming to the `bigbio-kb` schema (as defined in ../schemas/kb.py)
491
- :param brat_parse:
492
- """
493
-
494
- unified_example = {}
495
-
496
- # Prefix all ids with document id to ensure global uniqueness,
497
- # because brat ids are only unique within their document
498
- id_prefix = brat_parse["document_id"] + "_"
499
-
500
- # identical
501
- unified_example["document_id"] = brat_parse["document_id"]
502
- unified_example["passages"] = [
503
- {
504
- "id": id_prefix + "_text",
505
- "type": "abstract",
506
- "text": [brat_parse["text"]],
507
- "offsets": [[0, len(brat_parse["text"])]],
508
- }
509
- ]
510
-
511
- # get normalizations
512
- ref_id_to_normalizations = defaultdict(list)
513
- for normalization in brat_parse["normalizations"]:
514
- ref_id_to_normalizations[normalization["ref_id"]].append(
515
- {
516
- "db_name": normalization["resource_name"],
517
- "db_id": normalization["cuid"],
518
- }
519
- )
520
-
521
- # separate entities and event triggers
522
- unified_example["events"] = []
523
- non_event_ann = brat_parse["text_bound_annotations"].copy()
524
- for event in brat_parse["events"]:
525
- event = event.copy()
526
- event["id"] = id_prefix + event["id"]
527
- trigger = next(
528
- tr
529
- for tr in brat_parse["text_bound_annotations"]
530
- if tr["id"] == event["trigger"]
531
- )
532
- if trigger in non_event_ann:
533
- non_event_ann.remove(trigger)
534
- event["trigger"] = {
535
- "text": trigger["text"].copy(),
536
- "offsets": trigger["offsets"].copy(),
537
- }
538
- for argument in event["arguments"]:
539
- argument["ref_id"] = id_prefix + argument["ref_id"]
540
-
541
- unified_example["events"].append(event)
542
-
543
- unified_example["entities"] = []
544
- anno_ids = [ref_id["id"] for ref_id in non_event_ann]
545
- for ann in non_event_ann:
546
- entity_ann = ann.copy()
547
- entity_ann["id"] = id_prefix + entity_ann["id"]
548
- entity_ann["normalized"] = ref_id_to_normalizations[ann["id"]]
549
- unified_example["entities"].append(entity_ann)
550
-
551
- # massage relations
552
- unified_example["relations"] = []
553
- skipped_relations = set()
554
- for ann in brat_parse["relations"]:
555
- if (
556
- ann["head"]["ref_id"] not in anno_ids
557
- or ann["tail"]["ref_id"] not in anno_ids
558
- ):
559
- skipped_relations.add(ann["id"])
560
- continue
561
- unified_example["relations"].append(
562
- {
563
- "arg1_id": id_prefix + ann["head"]["ref_id"],
564
- "arg2_id": id_prefix + ann["tail"]["ref_id"],
565
- "id": id_prefix + ann["id"],
566
- "type": ann["type"],
567
- "normalized": [],
568
- }
569
- )
570
- if len(skipped_relations) > 0:
571
- example_id = brat_parse["document_id"]
572
- logger.info(
573
- f"Example:{example_id}: The `bigbio_kb` schema allows `relations` only between entities."
574
- f" Skip (for now): "
575
- f"{list(skipped_relations)}"
576
- )
577
-
578
- # get coreferences
579
- unified_example["coreferences"] = []
580
- for i, ann in enumerate(brat_parse["equivalences"], start=1):
581
- is_entity_cluster = True
582
- for ref_id in ann["ref_ids"]:
583
- if not ref_id.startswith("T"): # not textbound -> no entity
584
- is_entity_cluster = False
585
- elif ref_id not in anno_ids: # event trigger -> no entity
586
- is_entity_cluster = False
587
- if is_entity_cluster:
588
- entity_ids = [id_prefix + i for i in ann["ref_ids"]]
589
- unified_example["coreferences"].append(
590
- {"id": id_prefix + str(i), "entity_ids": entity_ids}
591
- )
592
- return unified_example
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
chemprot.py DELETED
@@ -1,446 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """
16
- The BioCreative VI Chemical-Protein interaction dataset identifies entities of
17
- chemicals and proteins and their likely relation to one other. Compounds are
18
- generally agonists (activators) or antagonists (inhibitors) of proteins. The
19
- script loads dataset in bigbio schema (using knowledgebase schema: schemas/kb)
20
- AND/OR source (default) schema
21
- """
22
- import os
23
- from typing import Dict, Tuple
24
-
25
- import datasets
26
-
27
- from .bigbiohub import kb_features
28
- from .bigbiohub import BigBioConfig
29
- from .bigbiohub import Tasks
30
-
31
- _LANGUAGES = ['English']
32
- _PUBMED = True
33
- _LOCAL = False
34
- _CITATION = """\
35
- @article{DBLP:journals/biodb/LiSJSWLDMWL16,
36
- author = {Krallinger, M., Rabal, O., Lourenço, A.},
37
- title = {Overview of the BioCreative VI chemical-protein interaction Track},
38
- journal = {Proceedings of the BioCreative VI Workshop,},
39
- volume = {141-146},
40
- year = {2017},
41
- url = {https://biocreative.bioinformatics.udel.edu/tasks/biocreative-vi/track-5/},
42
- doi = {},
43
- biburl = {},
44
- bibsource = {}
45
- }
46
- """
47
- _DESCRIPTION = """\
48
- The BioCreative VI Chemical-Protein interaction dataset identifies entities of
49
- chemicals and proteins and their likely relation to one other. Compounds are
50
- generally agonists (activators) or antagonists (inhibitors) of proteins.
51
- """
52
-
53
- _DATASETNAME = "chemprot"
54
- _DISPLAYNAME = "ChemProt"
55
-
56
- _HOMEPAGE = "https://biocreative.bioinformatics.udel.edu/tasks/biocreative-vi/track-5/"
57
-
58
- _LICENSE = 'Public Domain Mark 1.0'
59
-
60
- _URLs = {
61
- "source": "https://huggingface.co/datasets/bigbio/chemprot/resolve/main/ChemProt_Corpus.zip",
62
- "bigbio_kb": "https://huggingface.co/datasets/bigbio/chemprot/resolve/main/ChemProt_Corpus.zip",
63
- }
64
-
65
- _SUPPORTED_TASKS = [Tasks.RELATION_EXTRACTION, Tasks.NAMED_ENTITY_RECOGNITION]
66
- _SOURCE_VERSION = "1.0.0"
67
- _BIGBIO_VERSION = "1.0.0"
68
-
69
-
70
- # Chemprot specific variables
71
- # NOTE: There are 3 examples (2 in dev, 1 in training) with CPR:0
72
- _GROUP_LABELS = {
73
- "CPR:0": "Undefined",
74
- "CPR:1": "Part_of",
75
- "CPR:2": "Regulator",
76
- "CPR:3": "Upregulator",
77
- "CPR:4": "Downregulator",
78
- "CPR:5": "Agonist",
79
- "CPR:6": "Antagonist",
80
- "CPR:7": "Modulator",
81
- "CPR:8": "Cofactor",
82
- "CPR:9": "Substrate",
83
- "CPR:10": "Not",
84
- }
85
-
86
-
87
- class ChemprotDataset(datasets.GeneratorBasedBuilder):
88
- """BioCreative VI Chemical-Protein Interaction Task."""
89
-
90
- SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
91
- BIGBIO_VERSION = datasets.Version(_BIGBIO_VERSION)
92
-
93
- BUILDER_CONFIGS = [
94
- BigBioConfig(
95
- name="chemprot_full_source",
96
- version=SOURCE_VERSION,
97
- description="chemprot source schema",
98
- schema="source",
99
- subset_id="chemprot_full",
100
- ),
101
- BigBioConfig(
102
- name="chemprot_shared_task_eval_source",
103
- version=SOURCE_VERSION,
104
- description="chemprot source schema with only the relation types that were used in the shared task evaluation",
105
- schema="source",
106
- subset_id="chemprot_shared_task_eval",
107
- ),
108
- BigBioConfig(
109
- name="chemprot_bigbio_kb",
110
- version=BIGBIO_VERSION,
111
- description="chemprot BigBio schema",
112
- schema="bigbio_kb",
113
- subset_id="chemprot",
114
- ),
115
- ]
116
-
117
- DEFAULT_CONFIG_NAME = "chemprot_full_source"
118
-
119
- def _info(self):
120
-
121
- if self.config.schema == "source":
122
- features = datasets.Features(
123
- {
124
- "pmid": datasets.Value("string"),
125
- "text": datasets.Value("string"),
126
- "entities": datasets.Sequence(
127
- {
128
- "id": datasets.Value("string"),
129
- "type": datasets.Value("string"),
130
- "text": datasets.Value("string"),
131
- "offsets": datasets.Sequence(datasets.Value("int64")),
132
- }
133
- ),
134
- "relations": datasets.Sequence(
135
- {
136
- "type": datasets.Value("string"),
137
- "arg1": datasets.Value("string"),
138
- "arg2": datasets.Value("string"),
139
- }
140
- ),
141
- }
142
- )
143
-
144
- elif self.config.schema == "bigbio_kb":
145
- features = kb_features
146
-
147
- return datasets.DatasetInfo(
148
- description=_DESCRIPTION,
149
- features=features,
150
- homepage=_HOMEPAGE,
151
- license=str(_LICENSE),
152
- citation=_CITATION,
153
- )
154
-
155
- def _split_generators(self, dl_manager):
156
- """Returns SplitGenerators."""
157
- my_urls = _URLs[self.config.schema]
158
- data_dir = dl_manager.download_and_extract(my_urls)
159
-
160
- # Extract each of the individual folders
161
- # NOTE: omitting "extract" call cause it uses a new folder
162
- train_path = dl_manager.extract(
163
- os.path.join(data_dir, "ChemProt_Corpus/chemprot_training.zip")
164
- )
165
- test_path = dl_manager.extract(
166
- os.path.join(data_dir, "ChemProt_Corpus/chemprot_test_gs.zip")
167
- )
168
- dev_path = dl_manager.extract(
169
- os.path.join(data_dir, "ChemProt_Corpus/chemprot_development.zip")
170
- )
171
- sample_path = dl_manager.extract(
172
- os.path.join(data_dir, "ChemProt_Corpus/chemprot_sample.zip")
173
- )
174
-
175
- return [
176
- datasets.SplitGenerator(
177
- name="sample", # should be a named split : /
178
- gen_kwargs={
179
- "filepath": os.path.join(sample_path, "chemprot_sample"),
180
- "abstract_file": "chemprot_sample_abstracts.tsv",
181
- "entity_file": "chemprot_sample_entities.tsv",
182
- "relation_file": "chemprot_sample_relations.tsv",
183
- "gold_standard_file": "chemprot_sample_gold_standard.tsv",
184
- "split": "sample",
185
- },
186
- ),
187
- datasets.SplitGenerator(
188
- name=datasets.Split.TRAIN,
189
- gen_kwargs={
190
- "filepath": os.path.join(train_path, "chemprot_training"),
191
- "abstract_file": "chemprot_training_abstracts.tsv",
192
- "entity_file": "chemprot_training_entities.tsv",
193
- "relation_file": "chemprot_training_relations.tsv",
194
- "gold_standard_file": "chemprot_training_gold_standard.tsv",
195
- "split": "train",
196
- },
197
- ),
198
- datasets.SplitGenerator(
199
- name=datasets.Split.TEST,
200
- gen_kwargs={
201
- "filepath": os.path.join(test_path, "chemprot_test_gs"),
202
- "abstract_file": "chemprot_test_abstracts_gs.tsv",
203
- "entity_file": "chemprot_test_entities_gs.tsv",
204
- "relation_file": "chemprot_test_relations_gs.tsv",
205
- "gold_standard_file": "chemprot_test_gold_standard.tsv",
206
- "split": "test",
207
- },
208
- ),
209
- datasets.SplitGenerator(
210
- name=datasets.Split.VALIDATION,
211
- gen_kwargs={
212
- "filepath": os.path.join(dev_path, "chemprot_development"),
213
- "abstract_file": "chemprot_development_abstracts.tsv",
214
- "entity_file": "chemprot_development_entities.tsv",
215
- "relation_file": "chemprot_development_relations.tsv",
216
- "gold_standard_file": "chemprot_development_gold_standard.tsv",
217
- "split": "dev",
218
- },
219
- ),
220
- ]
221
-
222
- def _generate_examples(
223
- self,
224
- filepath,
225
- abstract_file,
226
- entity_file,
227
- relation_file,
228
- gold_standard_file,
229
- split,
230
- ):
231
- """Yields examples as (key, example) tuples."""
232
- if self.config.schema == "source":
233
- abstracts = self._get_abstract(os.path.join(filepath, abstract_file))
234
-
235
- entities, entity_id = self._get_entities(
236
- os.path.join(filepath, entity_file)
237
- )
238
-
239
- if self.config.subset_id == "chemprot_full":
240
- relations = self._get_relations(os.path.join(filepath, relation_file))
241
- elif self.config.subset_id == "chemprot_shared_task_eval":
242
- relations = self._get_relations_gs(
243
- os.path.join(filepath, gold_standard_file)
244
- )
245
- else:
246
- raise ValueError(self.config)
247
-
248
- for id_, pmid in enumerate(abstracts.keys()):
249
- yield id_, {
250
- "pmid": pmid,
251
- "text": abstracts[pmid],
252
- "entities": entities[pmid],
253
- "relations": relations.get(pmid, []),
254
- }
255
-
256
- elif self.config.schema == "bigbio_kb":
257
-
258
- abstracts = self._get_abstract(os.path.join(filepath, abstract_file))
259
- entities, entity_id = self._get_entities(
260
- os.path.join(filepath, entity_file)
261
- )
262
- relations = self._get_relations(
263
- os.path.join(filepath, relation_file), is_mapped=True
264
- )
265
-
266
- uid = 0
267
- for id_, pmid in enumerate(abstracts.keys()):
268
- data = {
269
- "id": str(uid),
270
- "document_id": str(pmid),
271
- "passages": [],
272
- "entities": [],
273
- "relations": [],
274
- "events": [],
275
- "coreferences": [],
276
- }
277
- uid += 1
278
-
279
- data["passages"] = [
280
- {
281
- "id": str(uid),
282
- "type": "title and abstract",
283
- "text": [abstracts[pmid]],
284
- "offsets": [[0, len(abstracts[pmid])]],
285
- }
286
- ]
287
- uid += 1
288
-
289
- entity_to_id = {}
290
- for entity in entities[pmid]:
291
- _text = entity["text"]
292
- entity.update({"text": [_text]})
293
- entity_to_id[entity["id"]] = str(uid)
294
- entity.update({"id": str(uid)})
295
- _offsets = entity["offsets"]
296
- entity.update({"offsets": [_offsets]})
297
- entity["normalized"] = []
298
- data["entities"].append(entity)
299
- uid += 1
300
-
301
- for relation in relations.get(pmid, []):
302
- relation["arg1_id"] = entity_to_id[relation.pop("arg1")]
303
- relation["arg2_id"] = entity_to_id[relation.pop("arg2")]
304
- relation.update({"id": str(uid)})
305
- relation["normalized"] = []
306
- data["relations"].append(relation)
307
- uid += 1
308
-
309
- yield id_, data
310
-
311
- @staticmethod
312
- def _get_abstract(abs_filename: str) -> Dict[str, str]:
313
- """
314
- For each document in PubMed ID (PMID) in the ChemProt abstract data file, return the abstract. Data is tab-separated.
315
-
316
- :param filename: `*_abstracts.tsv from ChemProt
317
-
318
- :returns Dictionary with PMID keys and abstract text as values.
319
- """
320
- with open(abs_filename, "r") as f:
321
- contents = [i.strip() for i in f.readlines()]
322
-
323
- # PMID is the first column, Abstract is last
324
- return {
325
- doc.split("\t")[0]: "\n".join(doc.split("\t")[1:]) for doc in contents
326
- } # Includes title as line 1
327
-
328
- @staticmethod
329
- def _get_entities(ents_filename: str) -> Tuple[Dict[str, str]]:
330
- """
331
- For each document in the corpus, return entity annotations per PMID.
332
- Each column in the entity file is as follows:
333
- (1) PMID
334
- (2) Entity Number
335
- (3) Entity Type (Chemical, Gene-Y, Gene-N)
336
- (4) Start index
337
- (5) End index
338
- (6) Actual text of entity
339
-
340
- :param ents_filename: `_*entities.tsv` file from ChemProt
341
-
342
- :returns: Dictionary with PMID keys and entity annotations.
343
- """
344
- with open(ents_filename, "r") as f:
345
- contents = [i.strip() for i in f.readlines()]
346
-
347
- entities = {}
348
- entity_id = {}
349
-
350
- for line in contents:
351
-
352
- pmid, idx, label, start_offset, end_offset, name = line.split("\t")
353
-
354
- # Populate entity dictionary
355
- if pmid not in entities:
356
- entities[pmid] = []
357
-
358
- ann = {
359
- "offsets": [int(start_offset), int(end_offset)],
360
- "text": name,
361
- "type": label,
362
- "id": idx,
363
- }
364
-
365
- entities[pmid].append(ann)
366
-
367
- # Populate entity mapping
368
- entity_id.update({idx: name})
369
-
370
- return entities, entity_id
371
-
372
- @staticmethod
373
- def _get_relations(rel_filename: str, is_mapped: bool = False) -> Dict[str, str]:
374
- """For each document in the ChemProt corpus, create an annotation for all relationships.
375
-
376
- :param is_mapped: Whether to convert into NL the relation tags. Default is OFF
377
- """
378
- with open(rel_filename, "r") as f:
379
- contents = [i.strip() for i in f.readlines()]
380
-
381
- relations = {}
382
-
383
- for line in contents:
384
- pmid, label, _, _, arg1, arg2 = line.split("\t")
385
- arg1 = arg1.split("Arg1:")[-1]
386
- arg2 = arg2.split("Arg2:")[-1]
387
-
388
- if pmid not in relations:
389
- relations[pmid] = []
390
-
391
- if is_mapped:
392
- label = _GROUP_LABELS[label]
393
-
394
- ann = {
395
- "type": label,
396
- "arg1": arg1,
397
- "arg2": arg2,
398
- }
399
-
400
- relations[pmid].append(ann)
401
-
402
- return relations
403
-
404
- @staticmethod
405
- def _get_relations_gs(rel_filename: str, is_mapped: bool = False) -> Dict[str, str]:
406
- """
407
- For each document in the ChemProt corpus, create an annotation for the gold-standard relationships.
408
-
409
- The columns include:
410
- (1) PMID
411
- (2) Relationship Label (CPR)
412
- (3) Used in shared task
413
- (3) Interactor Argument 1 Entity Identifier
414
- (4) Interactor Argument 2 Entity Identifier
415
-
416
- Gold standard includes CPRs 3-9. Relationships are always Gene + Protein.
417
- Unlike entities, there is no counter, hence once must be made
418
-
419
- :param rel_filename: Gold standard file name
420
- :param ent_dict: Entity Identifier to text
421
- """
422
- with open(rel_filename, "r") as f:
423
- contents = [i.strip() for i in f.readlines()]
424
-
425
- relations = {}
426
-
427
- for line in contents:
428
- pmid, label, arg1, arg2 = line.split("\t")
429
- arg1 = arg1.split("Arg1:")[-1]
430
- arg2 = arg2.split("Arg2:")[-1]
431
-
432
- if pmid not in relations:
433
- relations[pmid] = []
434
-
435
- if is_mapped:
436
- label = _GROUP_LABELS[label]
437
-
438
- ann = {
439
- "type": label,
440
- "arg1": arg1,
441
- "arg2": arg2,
442
- }
443
-
444
- relations[pmid].append(ann)
445
-
446
- return relations
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ChemProt_Corpus.zip → chemprot_bigbio_kb/sample-00000-of-00001.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:492e3d607f38e2727b799e9d60263b776ebd2a5e61cf0fb59bea2b3eb68e1c28
3
- size 4977337
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d9e6475f44050d56ad4bb9801c544d890fac6bdd2e2da2396b2bdfe6a4aedbb7
3
+ size 97134
chemprot_bigbio_kb/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2cb0525070e1f54da1e40ef37553792ac5fabe5fa9832ecb2bda09d591131669
3
+ size 1181109
chemprot_bigbio_kb/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:248ff5f162e86e640334300cd45a9ee472581b9ef22bc05e553bea1f1b278dff
3
+ size 1476494
chemprot_bigbio_kb/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fc9d9301f143aabdeb32410c95bca10325f782f9ca74c516e7747c5a25cbd98e
3
+ size 890137
chemprot_full_source/sample-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:42da035f9eb47e798d594532f6837ff68710914a1b56a9ab1126e1965e06c1dd
3
+ size 77284
chemprot_full_source/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:90bc2b91e95cd9208751389ff16c3892bd113fa32c45c1188dcc16e4be7de0f3
3
+ size 945944
chemprot_full_source/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:40559ad52a20dc6b8d061f8f542daafaecf7b8fb14a8d18253bf52b4c2e8d9c6
3
+ size 1192129
chemprot_full_source/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0d07a367ae932f918310b63f267e7136f6564c6a1826cfdb9efdd9e2200999c8
3
+ size 723246
chemprot_shared_task_eval_source/sample-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:936eba9332ffa6c086f37386fb6922879716e513afa8362c357a50171607dcbc
3
+ size 76938
chemprot_shared_task_eval_source/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e1cbe5bcd04d03fdf9b40f1cac5a5326b5e40607f1c9ef5a85378461c53045ec
3
+ size 940460
chemprot_shared_task_eval_source/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:82868b082981b7d73ae5436668e3cfdb18d147b2608c691d374f4f06fe5ed68b
3
+ size 1186849
chemprot_shared_task_eval_source/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6c558d0bd6eceae7127b104f66d7e39e41b68d9970d597aa65d4be7a5baab580
3
+ size 720123