Basvoju commited on
Commit
1f6208e
·
1 Parent(s): 194e5dd

Create SemEval2018_Task7.py

Browse files
Files changed (1) hide show
  1. SemEval2018_Task7.py +308 -0
SemEval2018_Task7.py ADDED
@@ -0,0 +1,308 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # I am trying to understand to the following code. Do not use this for any purpose as I do not support this.
2
+ # Use the original source from https://huggingface.co/datasets/DFKI-SLT/science_ie/raw/main/science_ie.py
3
+
4
+
5
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
6
+ #
7
+ # Licensed under the Apache License, Version 2.0 (the "License");
8
+ # you may not use this file except in compliance with the License.
9
+ # You may obtain a copy of the License at
10
+ #
11
+ # http://www.apache.org/licenses/LICENSE-2.0
12
+ #
13
+ # Unless required by applicable law or agreed to in writing, software
14
+ # distributed under the License is distributed on an "AS IS" BASIS,
15
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16
+ # See the License for the specific language governing permissions and
17
+ # limitations under the License.
18
+ """Semeval2018Task7 is a dataset that describes the first task on semantic relation extraction and classification in scientific paper abstracts"""
19
+
20
+
21
+
22
+ import glob
23
+ import datasets
24
+ import xml.dom.minidom
25
+ import xml.etree.ElementTree as ET
26
+
27
+ # Find for instance the citation on arxiv or on the dataset repo/website
28
+ _CITATION = """\
29
+ @inproceedings{gabor-etal-2018-semeval,
30
+ title = "{S}em{E}val-2018 Task 7: Semantic Relation Extraction and Classification in Scientific Papers",
31
+ author = {G{\'a}bor, Kata and
32
+ Buscaldi, Davide and
33
+ Schumann, Anne-Kathrin and
34
+ QasemiZadeh, Behrang and
35
+ Zargayouna, Ha{\"\i}fa and
36
+ Charnois, Thierry},
37
+ booktitle = "Proceedings of the 12th International Workshop on Semantic Evaluation",
38
+ month = jun,
39
+ year = "2018",
40
+ address = "New Orleans, Louisiana",
41
+ publisher = "Association for Computational Linguistics",
42
+ url = "https://aclanthology.org/S18-1111",
43
+ doi = "10.18653/v1/S18-1111",
44
+ pages = "679--688",
45
+ abstract = "This paper describes the first task on semantic relation extraction and classification in
46
+ scientific paper abstracts at SemEval 2018. The challenge focuses on domain-specific semantic relations
47
+ and includes three different subtasks. The subtasks were designed so as to compare and quantify the
48
+ effect of different pre-processing steps on the relation classification results. We expect the task to
49
+ be relevant for a broad range of researchers working on extracting specialized knowledge from domain
50
+ corpora, for example but not limited to scientific or bio-medical information extraction. The task
51
+ attracted a total of 32 participants, with 158 submissions across different scenarios.",
52
+ }
53
+ """
54
+
55
+ # You can copy an official description
56
+ _DESCRIPTION = """\
57
+ This paper describes the first task on semantic relation extraction and classification in scientific paper
58
+ abstracts at SemEval 2018. The challenge focuses on domain-specific semantic relations and includes three
59
+ different subtasks. The subtasks were designed so as to compare and quantify the effect of different
60
+ pre-processing steps on the relation classification results. We expect the task to be relevant for a broad
61
+ range of researchers working on extracting specialized knowledge from domain corpora, for example but not
62
+ limited to scientific or bio-medical information extraction. The task attracted a total of 32 participants,
63
+ with 158 submissions across different scenarios.
64
+ """
65
+
66
+ # Add a link to an official homepage for the dataset here
67
+ _HOMEPAGE = "https://github.com/gkata/SemEval2018Task7/tree/testing"
68
+
69
+ # Add the licence for the dataset here if you can find it
70
+ _LICENSE = ""
71
+
72
+ # Add link to the official dataset URLs here
73
+ # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
74
+ # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
75
+ _URLS = {
76
+ "Subtask_1_1": {
77
+ "train": {
78
+ "relations": "https://raw.githubusercontent.com/gkata/SemEval2018Task7/testing/1.1.relations.txt",
79
+ "text": "https://raw.githubusercontent.com/gkata/SemEval2018Task7/testing/1.1.text.xml",
80
+ },
81
+ "test": {
82
+ "relations": "https://raw.githubusercontent.com/gkata/SemEval2018Task7/testing/1.1.test.relations.txt",
83
+ "text": "https://raw.githubusercontent.com/gkata/SemEval2018Task7/testing/1.1.test.text.xml",
84
+ },
85
+ },
86
+ "Subtask_1_2": {
87
+ "train": {
88
+ "relations": "https://raw.githubusercontent.com/gkata/SemEval2018Task7/testing/1.2.relations.txt",
89
+ "text": "https://raw.githubusercontent.com/gkata/SemEval2018Task7/testing/1.2.text.xml",
90
+ },
91
+ "test": {
92
+ "relations": "https://raw.githubusercontent.com/gkata/SemEval2018Task7/testing/1.2.test.relations.txt",
93
+ "text": "https://raw.githubusercontent.com/gkata/SemEval2018Task7/testing/1.2.test.text.xml",
94
+ },
95
+ },
96
+
97
+ }
98
+
99
+
100
+ def all_text_nodes(root):
101
+ if root.text is not None:
102
+ yield root.text
103
+ for child in root:
104
+ if child.tail is not None:
105
+ yield child.tail
106
+
107
+
108
+ def reading_entity_data(ET_data_to_convert):
109
+ parsed_data = ET.tostring(ET_data_to_convert,"utf-8")
110
+ parsed_data= parsed_data.decode('utf8').replace("b\'","")
111
+ parsed_data= parsed_data.replace("<abstract>","")
112
+ parsed_data= parsed_data.replace("</abstract>","")
113
+ parsed_data= parsed_data.replace("<title>","")
114
+ parsed_data= parsed_data.replace("</title>","")
115
+ parsed_data = parsed_data.replace("\n\n\n","")
116
+
117
+ parsing_tag = False
118
+ final_string = ""
119
+ tag_string= ""
120
+ current_tag_id = ""
121
+ current_tag_starting_pos = 0
122
+ current_tag_ending_pos= 0
123
+ entity_mapping_list=[]
124
+
125
+ for i in parsed_data:
126
+ if i=='<':
127
+ parsing_tag = True
128
+ if current_tag_id!="":
129
+ current_tag_ending_pos = len(final_string)-1
130
+ entity_mapping_list.append({"id":current_tag_id,
131
+ "char_start":current_tag_starting_pos,
132
+ "char_end":current_tag_ending_pos+1})
133
+ current_tag_id= ""
134
+ tag_string=""
135
+
136
+
137
+ elif i=='>':
138
+ parsing_tag = False
139
+ tag_string_split = tag_string.split('"')
140
+ if len(tag_string_split)>1:
141
+ current_tag_id= tag_string.split('"')[1]
142
+ current_tag_starting_pos = len(final_string)
143
+
144
+ else:
145
+ if parsing_tag!=True:
146
+ final_string = final_string + i
147
+ else:
148
+ tag_string = tag_string + i
149
+
150
+ return {"text_data":final_string, "entities":entity_mapping_list}
151
+
152
+
153
+
154
+ class Semeval2018Task7(datasets.GeneratorBasedBuilder):
155
+ """
156
+ Semeval2018Task7 is a dataset for semantic relation extraction and classification in scientific paper abstracts
157
+ """
158
+
159
+ VERSION = datasets.Version("1.1.0")
160
+
161
+ BUILDER_CONFIGS = [
162
+ datasets.BuilderConfig(name="Subtask_1_1", version=VERSION,
163
+ description="Relation classification on clean data"),
164
+ datasets.BuilderConfig(name="Subtask_1_2", version=VERSION,
165
+ description="Relation classification on noisy data"),
166
+
167
+ ]
168
+ DEFAULT_CONFIG_NAME = "Subtask_1_1"
169
+
170
+ def _info(self):
171
+ class_labels = ["","USAGE", "RESULT", "MODEL-FEATURE", "PART_WHOLE", "TOPIC", "COMPARE"]
172
+ features = datasets.Features(
173
+ {
174
+ "id": datasets.Value("string"),
175
+ "title": datasets.Value("string"),
176
+ "abstract": datasets.Value("string"),
177
+ "entities": [
178
+ {
179
+ "id": datasets.Value("string"),
180
+ "char_start": datasets.Value("int32"),
181
+ "char_end": datasets.Value("int32")
182
+ }
183
+ ],
184
+ "relation": [
185
+ {
186
+ "label": datasets.ClassLabel(names=class_labels),
187
+ "arg1": datasets.Value("string"),
188
+ "arg2": datasets.Value("string"),
189
+ "reverse": datasets.Value("bool")
190
+ }
191
+ ]
192
+ }
193
+ )
194
+
195
+ return datasets.DatasetInfo(
196
+ # This is the description that will appear on the datasets page.
197
+ description=_DESCRIPTION,
198
+ # This defines the different columns of the dataset and their types
199
+ features=features, # Here we define them above because they are different between the two configurations
200
+ # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
201
+ # specify them. They'll be used if as_supervised=True in builder.as_dataset.
202
+ # supervised_keys=("sentence", "label"),
203
+ # Homepage of the dataset for documentation
204
+ homepage=_HOMEPAGE,
205
+ # License for the dataset if available
206
+ license=_LICENSE,
207
+ # Citation for the dataset
208
+ citation=_CITATION,
209
+ )
210
+
211
+ def _split_generators(self, dl_manager):
212
+ # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
213
+
214
+ # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
215
+ # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
216
+ # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
217
+ urls = _URLS[self.config.name]
218
+ downloaded_files = dl_manager.download(urls)
219
+ print(downloaded_files)
220
+
221
+ return [
222
+ datasets.SplitGenerator(
223
+ name=datasets.Split.TRAIN,
224
+ # These kwargs will be passed to _generate_examples
225
+ gen_kwargs={
226
+ "relation_filepath": downloaded_files['train']["relations"],
227
+ "text_filepath": downloaded_files['train']["text"],
228
+
229
+ }
230
+
231
+ ),
232
+ datasets.SplitGenerator(
233
+ name=datasets.Split.TEST,
234
+ # These kwargs will be passed to _generate_examples
235
+ gen_kwargs={
236
+ "relation_filepath": downloaded_files['test']["relations"],
237
+ "text_filepath": downloaded_files['test']["text"],
238
+
239
+ }
240
+
241
+ )]
242
+
243
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
244
+ def _generate_examples(self, relation_filepath, text_filepath):
245
+
246
+ # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
247
+ # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
248
+ with open(relation_filepath, encoding="utf-8") as f:
249
+ relations = []
250
+ text_id_to_relations_map= {}
251
+ for key, row in enumerate(f):
252
+ row_split = row.strip("\n").split("(")
253
+ use_case = row_split[0]
254
+ second_half = row_split[1].strip(")")
255
+ second_half_splits = second_half.split(",")
256
+ size = len(second_half_splits)
257
+
258
+ relation = {
259
+ "label": use_case,
260
+ "arg1": second_half_splits[0],
261
+ "arg2": second_half_splits[1],
262
+ "reverse": True if size == 3 else False
263
+ }
264
+ relations.append(relation)
265
+
266
+ arg_id = second_half_splits[0].split(".")[0]
267
+ if arg_id not in text_id_to_relations_map:
268
+ text_id_to_relations_map[arg_id] = [relation]
269
+ else:
270
+ text_id_to_relations_map[arg_id].append(relation)
271
+ #print("result", text_id_to_relations_map)
272
+
273
+ #for arg_id, values in text_id_to_relations_map.items():
274
+ #print(f"ID: {arg_id}")
275
+ # for value in values:
276
+ # (value)
277
+
278
+
279
+
280
+ doc2 = ET.parse(text_filepath)
281
+ root = doc2.getroot()
282
+
283
+ for child in root:
284
+ if child.find("title")==None:
285
+ continue
286
+ text_id = child.attrib
287
+ #print("text_id", text_id)
288
+
289
+ if child.find("abstract")==None:
290
+ continue
291
+ title = child.find("title").text
292
+ child_abstract = child.find("abstract")
293
+
294
+
295
+ abstract_text_and_entities = reading_entity_data(child.find("abstract"))
296
+ title_text_and_entities = reading_entity_data(child.find("title"))
297
+
298
+ text_relations = []
299
+ if text_id['id'] in text_id_to_relations_map:
300
+ text_relations = text_id_to_relations_map[text_id['id']]
301
+
302
+ yield text_id['id'], {
303
+ "id": text_id['id'],
304
+ "title": title_text_and_entities['text_data'],
305
+ "abstract": abstract_text_and_entities['text_data'],
306
+ "entities": abstract_text_and_entities['entities'] + title_text_and_entities['entities'],
307
+ "relation": text_relations
308
+ }