phucdev commited on
Commit
458cdb5
·
verified ·
1 Parent(s): 71f95b3

Update DiMB-RE.py

Browse files
Files changed (1) hide show
  1. DiMB-RE.py +349 -351
DiMB-RE.py CHANGED
@@ -1,351 +1,349 @@
1
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
- """ScienceIE is a dataset for the SemEval task of extracting key phrases and relations between them from scientific documents"""
15
- import json
16
-
17
- import datasets
18
-
19
- from pathlib import Path
20
- from itertools import permutations
21
-
22
- # Find for instance the citation on arxiv or on the dataset repo/website
23
- _CITATION = """\
24
- @misc{hong2024dimbreminingscientificliterature,
25
- title={DiMB-RE: Mining the Scientific Literature for Diet-Microbiome Associations},
26
- author={Gibong Hong and Veronica Hindle and Nadine M. Veasley and Hannah D. Holscher and Halil Kilicoglu},
27
- year={2024},
28
- eprint={2409.19581},
29
- archivePrefix={arXiv},
30
- primaryClass={cs.CL},
31
- url={https://arxiv.org/abs/2409.19581},
32
- }
33
- """
34
-
35
- # You can copy an official description
36
- _DESCRIPTION = """\
37
- DiMB-RE is a corpus of 165 nutrition and microbiome-related publications, and we validate its usefulness with state-of-the-art pretrained language models. Specifically, we make the following contributions:
38
-
39
- 1. We annotated titles and abstracts of 165 publications with 15 entity types and 13 relation types that hold between them. To our knowledge, DiMB-RE is the largest and most diverse corpus focusing on this domain in terms of the number of entities and relations it contains.
40
- 2. In addition to titles and abstracts, we annotated Results sections of 30 articles (out of 165) to assess the impact of the information from full text.
41
- 3. To ground and contextualize relations, we annotated relation triggers and certainty information, which were previously included only in the biological event extraction corpora.
42
- 4. We normalized entity mentions to standard database identifiers (e.g., MeSH, CheBI, FoodOn) to allow aggregation for further study.
43
- 5. We trained and evaluated NER and RE models based on the state-of-the-art pretrained language models to establish robust baselines for this corpus.
44
-
45
- Further details regarding this study are available in our paper: https://arxiv.org/pdf/2409.19581.pdf
46
- """
47
-
48
- _HOMEPAGE = "https://github.com/ScienceNLP-Lab/DiMB-RE"
49
-
50
- # TODO: Add the licence for the dataset here if you can find it
51
- _LICENSE = ""
52
-
53
- # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
54
- # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
55
- _URLS = {
56
- "train": "https://github.com/ScienceNLP-Lab/DiMB-RE/raw/refs/heads/master/data/DiMB-RE/ner_reduced_v6.1_trg_abs_result/train.json",
57
- "validation": "https://github.com/ScienceNLP-Lab/DiMB-RE/raw/refs/heads/master/data/DiMB-RE/ner_reduced_v6.1_trg_abs_result/dev.json",
58
- "test": "https://github.com/ScienceNLP-Lab/DiMB-RE/raw/refs/heads/master/data/DiMB-RE/ner_reduced_v6.1_trg_abs_result/test.json"
59
- }
60
-
61
-
62
- class DiMB_RE(datasets.GeneratorBasedBuilder):
63
- """DiMB-RE (Diet-MicroBiome dataset for Relation Extraction) a comprehensive corpus annotated with 15 entity
64
- types (e.g., Nutrient, Microorganism) and 13 relation types (e.g., INCREASES, IMPROVES) capturing
65
- diet-microbiome associations"""
66
-
67
- VERSION = datasets.Version("1.0.0")
68
-
69
- BUILDER_CONFIGS = [
70
- datasets.BuilderConfig(name="default", version=VERSION, description="Default configuration"),
71
- datasets.BuilderConfig(name="ner", version=VERSION,
72
- description="Configuration for Named Entity Recognition (NER)"),
73
- datasets.BuilderConfig(name="re", version=VERSION,
74
- description="Configuration for Relation Extraction (RE)"),
75
- datasets.BuilderConfig(name="sentence_level", version=VERSION,
76
- description="Configuration for sentence-level processing"),
77
- ]
78
-
79
- DEFAULT_CONFIG_NAME = "default"
80
-
81
- def _info(self):
82
- ner = [
83
- {
84
- "start": datasets.Value("int32"),
85
- "end": datasets.Value("int32"),
86
- "type": datasets.Value("string")
87
- }
88
- ]
89
- triggers = [
90
- {
91
- "start": datasets.Value("int32"),
92
- "end": datasets.Value("int32"),
93
- "type": datasets.Value("string")
94
- }
95
- ]
96
- relations = [
97
- {
98
- "head": datasets.Value("int32"),
99
- "head_start": datasets.Value("int32"),
100
- "head_end": datasets.Value("int32"),
101
- "head_type": datasets.Value("string"),
102
- "tail": datasets.Value("int32"),
103
- "tail_start": datasets.Value("int32"),
104
- "tail_end": datasets.Value("int32"),
105
- "tail_type": datasets.Value("string"),
106
- "type": datasets.Value("string"),
107
- "factuality": datasets.Value("string")
108
- }
109
- ]
110
- triplets = [
111
- {
112
- "head_start": datasets.Value("int32"),
113
- "head_end": datasets.Value("int32"),
114
- "tail_start": datasets.Value("int32"),
115
- "tail_end": datasets.Value("int32"),
116
- "trigger_start": datasets.Value("int32"),
117
- "trigger_end": datasets.Value("int32"),
118
- "relation": datasets.Value("string")
119
- }
120
- ]
121
- if self.config.name == "sentence_level":
122
- features = datasets.Features(
123
- {
124
- "id": datasets.Value("string"),
125
- "doc_key": datasets.Value("string"),
126
- "tokens": datasets.Sequence(datasets.Value("string")),
127
- "ner": ner,
128
- "ner_tags": datasets.Sequence(datasets.Value("string")),
129
- "triggers": triggers,
130
- "relations": relations,
131
- "triplets": triplets
132
- }
133
- )
134
- else:
135
- features = datasets.Features(
136
- {
137
- "doc_key": datasets.Value("string"),
138
- "tokens": datasets.Sequence(datasets.Value("string")),
139
- "sentences": [
140
- {
141
- "start": datasets.Value("int32"),
142
- "end": datasets.Value("int32")
143
- }
144
- ],
145
- "ner": ner,
146
- "ner_tags": datasets.Sequence(datasets.Value("string")),
147
- "triggers": triggers,
148
- "relations": relations,
149
- "triplets": triplets
150
- }
151
- )
152
- return datasets.DatasetInfo(
153
- # This is the description that will appear on the datasets page.
154
- description=_DESCRIPTION,
155
- # This defines the different columns of the dataset and their types
156
- features=features, # Here we define them above because they are different between the configurations
157
- # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
158
- # specify them. They'll be used if as_supervised=True in builder.as_dataset.
159
- # supervised_keys=("sentence", "label"),
160
- # Homepage of the dataset for documentation
161
- homepage=_HOMEPAGE,
162
- # License for the dataset if available
163
- license=_LICENSE,
164
- # Citation for the dataset
165
- citation=_CITATION,
166
- )
167
-
168
- def _split_generators(self, dl_manager):
169
- # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
170
-
171
- # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
172
- # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
173
- # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
174
- downloaded_files = dl_manager.download_and_extract(_URLS)
175
-
176
- return [datasets.SplitGenerator(name=i, gen_kwargs={"file_path": downloaded_files[str(i)]})
177
- for i in [datasets.Split.TRAIN, datasets.Split.VALIDATION, datasets.Split.TEST]]
178
-
179
- # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
180
- def _generate_examples(self, file_path):
181
- # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
182
-
183
- with open(file_path, "r", encoding="utf-8") as f:
184
- for line in f:
185
- loaded_doc = json.loads(line)
186
- doc_key = loaded_doc["doc_key"]
187
- tokens = []
188
- sentences = []
189
- offset = 0
190
- for sentence in loaded_doc["sentences"]:
191
- start = offset
192
- end = offset + len(sentence)
193
- sentences.append({"start": start, "end": end})
194
- offset = end
195
- tokens.extend(sentence)
196
- entities = []
197
- for sent_entities in loaded_doc["ner"]:
198
- for entity in sent_entities:
199
- # Each entity is a tuple (start, end, type)
200
- entities.append({"start": entity[0], "end": entity[1] + 1, "type": entity[2]})
201
- ner_tags = process_ner(entities, tokens)
202
- triggers = []
203
- for sent_triggers in loaded_doc["triggers"]:
204
- for trigger in sent_triggers:
205
- # Each trigger is a tuple (start, end, type)
206
- triggers.append({"start": trigger[0], "end": trigger[1] + 1, "type": trigger[2]})
207
- relations = []
208
- for sent_idx, rels in enumerate(loaded_doc["relations"]):
209
- for rel in rels:
210
- # Each relation (head_start, head_end, tail_start, tail_end, relation_type, factuality)
211
- head_start = rel[0]
212
- head_end = rel[1] + 1
213
- head_idx = -1
214
- tail_start = rel[2]
215
- tail_end = rel[3] + 1
216
- tail_idx = -1
217
- for idx, entity in enumerate(entities):
218
- if entity["start"] == head_start and entity["end"] == head_end:
219
- head_idx = idx
220
- elif entity["start"] == tail_start and entity["end"] == tail_end:
221
- tail_idx = idx
222
- if head_idx == -1 or tail_idx == -1:
223
- print(f"Warning: Relation {rel} in document {doc_key} has invalid entity indices.")
224
- continue
225
- relation_type = rel[4]
226
- factuality = rel[5]
227
- relations.append({
228
- "head": head_idx,
229
- "head_start": head_start,
230
- "head_end": head_end,
231
- "head_type": entities[head_idx]["type"],
232
- "tail": tail_idx,
233
- "tail_start": tail_start,
234
- "tail_end": tail_end,
235
- "tail_type": entities[tail_idx]["type"],
236
- "type": relation_type,
237
- "factuality": factuality
238
- })
239
- triplets = []
240
- for sent_triplets in loaded_doc["triplets"]:
241
- for triplet in sent_triplets:
242
- # Each triplet is a tuple (head_start, head_end, tail_start, tail_end, relation_type)
243
- head_start = triplet[0]
244
- head_end = triplet[1] + 1
245
- tail_start = triplet[2]
246
- tail_end = triplet[3] + 1
247
- trigger_start = triplet[4]
248
- trigger_end = triplet[5] + 1
249
- relation_type = triplet[4]
250
- triplets.append({
251
- "head_start": head_start,
252
- "head_end": head_end,
253
- "tail_start": tail_start,
254
- "tail_end": tail_end,
255
- "trigger_start": trigger_start,
256
- "trigger_end": trigger_end,
257
- "relation": relation_type
258
- })
259
- doc = {
260
- "doc_key": doc_key,
261
- "tokens": tokens,
262
- "sentences": sentences,
263
- "ner": entities,
264
- "triggers": triggers,
265
- "relations": relations,
266
- "triplets": triplets,
267
- "ner_tags": ner_tags
268
- }
269
- if self.config.name == "sentence_level":
270
- # Convert all document-level information to sentence-level, fix spans
271
- for sent_idx, sent in enumerate(sentences):
272
- sentence = {
273
- "id": f"{doc_key}_sent_{sent_idx}",
274
- "doc_key": doc_key,
275
- "tokens": doc["tokens"][sent["start"]:sent["end"]],
276
- "ner": [
277
- {
278
- "start": entity["start"] - sent["start"],
279
- "end": entity["end"] - sent["start"],
280
- "type": entity["type"]
281
- } for entity in entities
282
- if entity["start"] >= sent["start"] and entity["end"] <= sent["end"]
283
- ],
284
- "ner_tags": ner_tags[sent["start"]:sent["end"]],
285
- "triggers": [
286
- {
287
- "start": entity["start"] - sent["start"],
288
- "end": entity["end"] - sent["start"],
289
- "type": entity["type"]
290
- } for trigger in triggers
291
- if trigger["start"] >= sent["start"] and trigger["end"] <= sent["end"]
292
- ],
293
- "relations": [
294
- {
295
- "head": rel["head"],
296
- "head_start": rel["head_start"] - sent["start"],
297
- "head_end": rel["head_end"] - sent["start"],
298
- "head_type": rel["head_type"],
299
- "tail": rel["tail"],
300
- "tail_start": rel["tail_start"] - sent["start"],
301
- "tail_end": rel["tail_end"] - sent["start"],
302
- "tail_type": rel["tail_type"],
303
- "type": rel["type"],
304
- "factuality": rel["factuality"]
305
- } for rel in relations
306
- if (rel["head_start"] >= sent["start"] and rel["head_end"] <= sent["end"]) and
307
- (rel["tail_start"] >= sent["start"] and rel["tail_end"] <= sent["end"])
308
- ],
309
- "triplets": [
310
- {
311
- "head_start": triplet["head_start"] - sent["start"],
312
- "head_end": triplet["head_end"] - sent["start"],
313
- "tail_start": triplet["tail_start"] - sent["start"],
314
- "tail_end": triplet["tail_end"] - sent["start"],
315
- "trigger_start": triplet["trigger_start"] - sent["start"],
316
- "trigger_end": triplet["trigger_end"] - sent["start"],
317
- "relation": triplet["relation"]
318
- } for triplet in triplets
319
- if (triplet["head_start"] >= sent["start"] and triplet["head_end"] <= sent["end"]) and
320
- (triplet["tail_start"] >= sent["start"] and triplet["tail_end"] <= sent["end"])
321
- ]}
322
- yield sentence["id"], sentence
323
- else:
324
- # Yields examples as (key, example) tuples
325
- yield doc_key, doc
326
-
327
-
328
- def process_ner(entities, tokens):
329
- """Converts entities to BIO tags for NER"""
330
- bio_tags = ["O"] * len(tokens)
331
-
332
- # Iterate through each entity to apply B- and I- tags.
333
- for entity in entities:
334
- start_index = entity["start"]
335
- end_index = entity["end"]
336
- entity_type = entity["type"]
337
-
338
- # Ensure the entity indices are within the bounds of the token list.
339
- if start_index >= len(tokens) or end_index > len(tokens):
340
- print(f"Warning: Entity {entity} is out of bounds. Skipping.")
341
- continue
342
-
343
- # Mark the beginning of the entity.
344
- if start_index < len(tokens):
345
- bio_tags[start_index] = f"B-{entity_type}"
346
-
347
- # Mark the inside of the entity for all subsequent tokens.
348
- for i in range(start_index + 1, end_index):
349
- if i < len(tokens):
350
- bio_tags[i] = f"I-{entity_type}"
351
- return bio_tags
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """DiMB-RE (Diet-MicroBiome dataset for Relation Extraction) is a corpus of 165 nutrition and microbiome-related publications"""
15
+ import json
16
+ import datasets
17
+
18
+ from pathlib import Path
19
+
20
+ # Find for instance the citation on arxiv or on the dataset repo/website
21
+ _CITATION = """\
22
+ @misc{hong2024dimbreminingscientificliterature,
23
+ title={DiMB-RE: Mining the Scientific Literature for Diet-Microbiome Associations},
24
+ author={Gibong Hong and Veronica Hindle and Nadine M. Veasley and Hannah D. Holscher and Halil Kilicoglu},
25
+ year={2024},
26
+ eprint={2409.19581},
27
+ archivePrefix={arXiv},
28
+ primaryClass={cs.CL},
29
+ url={https://arxiv.org/abs/2409.19581},
30
+ }
31
+ """
32
+
33
+ # You can copy an official description
34
+ _DESCRIPTION = """\
35
+ DiMB-RE is a corpus of 165 nutrition and microbiome-related publications, and we validate its usefulness with state-of-the-art pretrained language models. Specifically, we make the following contributions:
36
+
37
+ 1. We annotated titles and abstracts of 165 publications with 15 entity types and 13 relation types that hold between them. To our knowledge, DiMB-RE is the largest and most diverse corpus focusing on this domain in terms of the number of entities and relations it contains.
38
+ 2. In addition to titles and abstracts, we annotated Results sections of 30 articles (out of 165) to assess the impact of the information from full text.
39
+ 3. To ground and contextualize relations, we annotated relation triggers and certainty information, which were previously included only in the biological event extraction corpora.
40
+ 4. We normalized entity mentions to standard database identifiers (e.g., MeSH, CheBI, FoodOn) to allow aggregation for further study.
41
+ 5. We trained and evaluated NER and RE models based on the state-of-the-art pretrained language models to establish robust baselines for this corpus.
42
+
43
+ Further details regarding this study are available in our paper: https://arxiv.org/pdf/2409.19581.pdf
44
+ """
45
+
46
+ _HOMEPAGE = "https://github.com/ScienceNLP-Lab/DiMB-RE"
47
+
48
+ # TODO: Add the licence for the dataset here if you can find it
49
+ _LICENSE = ""
50
+
51
+ # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
52
+ # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
53
+ _URLS = {
54
+ "train": "https://github.com/ScienceNLP-Lab/DiMB-RE/raw/refs/heads/master/data/DiMB-RE/ner_reduced_v6.1_trg_abs_result/train.json",
55
+ "validation": "https://github.com/ScienceNLP-Lab/DiMB-RE/raw/refs/heads/master/data/DiMB-RE/ner_reduced_v6.1_trg_abs_result/dev.json",
56
+ "test": "https://github.com/ScienceNLP-Lab/DiMB-RE/raw/refs/heads/master/data/DiMB-RE/ner_reduced_v6.1_trg_abs_result/test.json"
57
+ }
58
+
59
+
60
+ class DiMB_RE(datasets.GeneratorBasedBuilder):
61
+ """DiMB-RE (Diet-MicroBiome dataset for Relation Extraction) a comprehensive corpus annotated with 15 entity
62
+ types (e.g., Nutrient, Microorganism) and 13 relation types (e.g., INCREASES, IMPROVES) capturing
63
+ diet-microbiome associations"""
64
+
65
+ VERSION = datasets.Version("1.0.0")
66
+
67
+ BUILDER_CONFIGS = [
68
+ datasets.BuilderConfig(name="default", version=VERSION, description="Default configuration"),
69
+ datasets.BuilderConfig(name="ner", version=VERSION,
70
+ description="Configuration for Named Entity Recognition (NER)"),
71
+ datasets.BuilderConfig(name="re", version=VERSION,
72
+ description="Configuration for Relation Extraction (RE)"),
73
+ datasets.BuilderConfig(name="sentence_level", version=VERSION,
74
+ description="Configuration for sentence-level processing"),
75
+ ]
76
+
77
+ DEFAULT_CONFIG_NAME = "default"
78
+
79
+ def _info(self):
80
+ ner = [
81
+ {
82
+ "start": datasets.Value("int32"),
83
+ "end": datasets.Value("int32"),
84
+ "type": datasets.Value("string")
85
+ }
86
+ ]
87
+ triggers = [
88
+ {
89
+ "start": datasets.Value("int32"),
90
+ "end": datasets.Value("int32"),
91
+ "type": datasets.Value("string")
92
+ }
93
+ ]
94
+ relations = [
95
+ {
96
+ "head": datasets.Value("int32"),
97
+ "head_start": datasets.Value("int32"),
98
+ "head_end": datasets.Value("int32"),
99
+ "head_type": datasets.Value("string"),
100
+ "tail": datasets.Value("int32"),
101
+ "tail_start": datasets.Value("int32"),
102
+ "tail_end": datasets.Value("int32"),
103
+ "tail_type": datasets.Value("string"),
104
+ "type": datasets.Value("string"),
105
+ "factuality": datasets.Value("string")
106
+ }
107
+ ]
108
+ triplets = [
109
+ {
110
+ "head_start": datasets.Value("int32"),
111
+ "head_end": datasets.Value("int32"),
112
+ "tail_start": datasets.Value("int32"),
113
+ "tail_end": datasets.Value("int32"),
114
+ "trigger_start": datasets.Value("int32"),
115
+ "trigger_end": datasets.Value("int32"),
116
+ "relation": datasets.Value("string")
117
+ }
118
+ ]
119
+ if self.config.name == "sentence_level":
120
+ features = datasets.Features(
121
+ {
122
+ "id": datasets.Value("string"),
123
+ "doc_key": datasets.Value("string"),
124
+ "tokens": datasets.Sequence(datasets.Value("string")),
125
+ "ner": ner,
126
+ "ner_tags": datasets.Sequence(datasets.Value("string")),
127
+ "triggers": triggers,
128
+ "relations": relations,
129
+ "triplets": triplets
130
+ }
131
+ )
132
+ else:
133
+ features = datasets.Features(
134
+ {
135
+ "doc_key": datasets.Value("string"),
136
+ "tokens": datasets.Sequence(datasets.Value("string")),
137
+ "sentences": [
138
+ {
139
+ "start": datasets.Value("int32"),
140
+ "end": datasets.Value("int32")
141
+ }
142
+ ],
143
+ "ner": ner,
144
+ "ner_tags": datasets.Sequence(datasets.Value("string")),
145
+ "triggers": triggers,
146
+ "relations": relations,
147
+ "triplets": triplets
148
+ }
149
+ )
150
+ return datasets.DatasetInfo(
151
+ # This is the description that will appear on the datasets page.
152
+ description=_DESCRIPTION,
153
+ # This defines the different columns of the dataset and their types
154
+ features=features, # Here we define them above because they are different between the configurations
155
+ # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
156
+ # specify them. They'll be used if as_supervised=True in builder.as_dataset.
157
+ # supervised_keys=("sentence", "label"),
158
+ # Homepage of the dataset for documentation
159
+ homepage=_HOMEPAGE,
160
+ # License for the dataset if available
161
+ license=_LICENSE,
162
+ # Citation for the dataset
163
+ citation=_CITATION,
164
+ )
165
+
166
+ def _split_generators(self, dl_manager):
167
+ # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
168
+
169
+ # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
170
+ # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
171
+ # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
172
+ downloaded_files = dl_manager.download_and_extract(_URLS)
173
+
174
+ return [datasets.SplitGenerator(name=i, gen_kwargs={"file_path": downloaded_files[str(i)]})
175
+ for i in [datasets.Split.TRAIN, datasets.Split.VALIDATION, datasets.Split.TEST]]
176
+
177
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
178
+ def _generate_examples(self, file_path):
179
+ # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
180
+
181
+ with open(file_path, "r", encoding="utf-8") as f:
182
+ for line in f:
183
+ loaded_doc = json.loads(line)
184
+ doc_key = loaded_doc["doc_key"]
185
+ tokens = []
186
+ sentences = []
187
+ offset = 0
188
+ for sentence in loaded_doc["sentences"]:
189
+ start = offset
190
+ end = offset + len(sentence)
191
+ sentences.append({"start": start, "end": end})
192
+ offset = end
193
+ tokens.extend(sentence)
194
+ entities = []
195
+ for sent_entities in loaded_doc["ner"]:
196
+ for entity in sent_entities:
197
+ # Each entity is a tuple (start, end, type)
198
+ entities.append({"start": entity[0], "end": entity[1] + 1, "type": entity[2]})
199
+ ner_tags = process_ner(entities, tokens)
200
+ triggers = []
201
+ for sent_triggers in loaded_doc["triggers"]:
202
+ for trigger in sent_triggers:
203
+ # Each trigger is a tuple (start, end, type)
204
+ triggers.append({"start": trigger[0], "end": trigger[1] + 1, "type": trigger[2]})
205
+ relations = []
206
+ for sent_idx, rels in enumerate(loaded_doc["relations"]):
207
+ for rel in rels:
208
+ # Each relation (head_start, head_end, tail_start, tail_end, relation_type, factuality)
209
+ head_start = rel[0]
210
+ head_end = rel[1] + 1
211
+ head_idx = -1
212
+ tail_start = rel[2]
213
+ tail_end = rel[3] + 1
214
+ tail_idx = -1
215
+ for idx, entity in enumerate(entities):
216
+ if entity["start"] == head_start and entity["end"] == head_end:
217
+ head_idx = idx
218
+ elif entity["start"] == tail_start and entity["end"] == tail_end:
219
+ tail_idx = idx
220
+ if head_idx == -1 or tail_idx == -1:
221
+ print(f"Warning: Relation {rel} in document {doc_key} has invalid entity indices.")
222
+ continue
223
+ relation_type = rel[4]
224
+ factuality = rel[5]
225
+ relations.append({
226
+ "head": head_idx,
227
+ "head_start": head_start,
228
+ "head_end": head_end,
229
+ "head_type": entities[head_idx]["type"],
230
+ "tail": tail_idx,
231
+ "tail_start": tail_start,
232
+ "tail_end": tail_end,
233
+ "tail_type": entities[tail_idx]["type"],
234
+ "type": relation_type,
235
+ "factuality": factuality
236
+ })
237
+ triplets = []
238
+ for sent_triplets in loaded_doc["triplets"]:
239
+ for triplet in sent_triplets:
240
+ # Each triplet is a tuple (head_start, head_end, tail_start, tail_end, relation_type)
241
+ head_start = triplet[0]
242
+ head_end = triplet[1] + 1
243
+ tail_start = triplet[2]
244
+ tail_end = triplet[3] + 1
245
+ trigger_start = triplet[4]
246
+ trigger_end = triplet[5] + 1
247
+ relation_type = triplet[4]
248
+ triplets.append({
249
+ "head_start": head_start,
250
+ "head_end": head_end,
251
+ "tail_start": tail_start,
252
+ "tail_end": tail_end,
253
+ "trigger_start": trigger_start,
254
+ "trigger_end": trigger_end,
255
+ "relation": relation_type
256
+ })
257
+ doc = {
258
+ "doc_key": doc_key,
259
+ "tokens": tokens,
260
+ "sentences": sentences,
261
+ "ner": entities,
262
+ "triggers": triggers,
263
+ "relations": relations,
264
+ "triplets": triplets,
265
+ "ner_tags": ner_tags
266
+ }
267
+ if self.config.name == "sentence_level":
268
+ # Convert all document-level information to sentence-level, fix spans
269
+ for sent_idx, sent in enumerate(sentences):
270
+ sentence = {
271
+ "id": f"{doc_key}_sent_{sent_idx}",
272
+ "doc_key": doc_key,
273
+ "tokens": doc["tokens"][sent["start"]:sent["end"]],
274
+ "ner": [
275
+ {
276
+ "start": entity["start"] - sent["start"],
277
+ "end": entity["end"] - sent["start"],
278
+ "type": entity["type"]
279
+ } for entity in entities
280
+ if entity["start"] >= sent["start"] and entity["end"] <= sent["end"]
281
+ ],
282
+ "ner_tags": ner_tags[sent["start"]:sent["end"]],
283
+ "triggers": [
284
+ {
285
+ "start": entity["start"] - sent["start"],
286
+ "end": entity["end"] - sent["start"],
287
+ "type": entity["type"]
288
+ } for trigger in triggers
289
+ if trigger["start"] >= sent["start"] and trigger["end"] <= sent["end"]
290
+ ],
291
+ "relations": [
292
+ {
293
+ "head": rel["head"],
294
+ "head_start": rel["head_start"] - sent["start"],
295
+ "head_end": rel["head_end"] - sent["start"],
296
+ "head_type": rel["head_type"],
297
+ "tail": rel["tail"],
298
+ "tail_start": rel["tail_start"] - sent["start"],
299
+ "tail_end": rel["tail_end"] - sent["start"],
300
+ "tail_type": rel["tail_type"],
301
+ "type": rel["type"],
302
+ "factuality": rel["factuality"]
303
+ } for rel in relations
304
+ if (rel["head_start"] >= sent["start"] and rel["head_end"] <= sent["end"]) and
305
+ (rel["tail_start"] >= sent["start"] and rel["tail_end"] <= sent["end"])
306
+ ],
307
+ "triplets": [
308
+ {
309
+ "head_start": triplet["head_start"] - sent["start"],
310
+ "head_end": triplet["head_end"] - sent["start"],
311
+ "tail_start": triplet["tail_start"] - sent["start"],
312
+ "tail_end": triplet["tail_end"] - sent["start"],
313
+ "trigger_start": triplet["trigger_start"] - sent["start"],
314
+ "trigger_end": triplet["trigger_end"] - sent["start"],
315
+ "relation": triplet["relation"]
316
+ } for triplet in triplets
317
+ if (triplet["head_start"] >= sent["start"] and triplet["head_end"] <= sent["end"]) and
318
+ (triplet["tail_start"] >= sent["start"] and triplet["tail_end"] <= sent["end"])
319
+ ]}
320
+ yield sentence["id"], sentence
321
+ else:
322
+ # Yields examples as (key, example) tuples
323
+ yield doc_key, doc
324
+
325
+
326
+ def process_ner(entities, tokens):
327
+ """Converts entities to BIO tags for NER"""
328
+ bio_tags = ["O"] * len(tokens)
329
+
330
+ # Iterate through each entity to apply B- and I- tags.
331
+ for entity in entities:
332
+ start_index = entity["start"]
333
+ end_index = entity["end"]
334
+ entity_type = entity["type"]
335
+
336
+ # Ensure the entity indices are within the bounds of the token list.
337
+ if start_index >= len(tokens) or end_index > len(tokens):
338
+ print(f"Warning: Entity {entity} is out of bounds. Skipping.")
339
+ continue
340
+
341
+ # Mark the beginning of the entity.
342
+ if start_index < len(tokens):
343
+ bio_tags[start_index] = f"B-{entity_type}"
344
+
345
+ # Mark the inside of the entity for all subsequent tokens.
346
+ for i in range(start_index + 1, end_index):
347
+ if i < len(tokens):
348
+ bio_tags[i] = f"I-{entity_type}"
349
+ return bio_tags