|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""DiMB-RE (Diet-MicroBiome dataset for Relation Extraction) is a corpus of 165 nutrition and microbiome-related publications""" |
|
|
import json |
|
|
import datasets |
|
|
|
|
|
from pathlib import Path |
|
|
|
|
|
|
|
|
_CITATION = """\ |
|
|
@misc{hong2024dimbreminingscientificliterature, |
|
|
title={DiMB-RE: Mining the Scientific Literature for Diet-Microbiome Associations}, |
|
|
author={Gibong Hong and Veronica Hindle and Nadine M. Veasley and Hannah D. Holscher and Halil Kilicoglu}, |
|
|
year={2024}, |
|
|
eprint={2409.19581}, |
|
|
archivePrefix={arXiv}, |
|
|
primaryClass={cs.CL}, |
|
|
url={https://arxiv.org/abs/2409.19581}, |
|
|
} |
|
|
""" |
|
|
|
|
|
|
|
|
_DESCRIPTION = """\ |
|
|
DiMB-RE is a corpus of 165 nutrition and microbiome-related publications, and we validate its usefulness with state-of-the-art pretrained language models. Specifically, we make the following contributions: |
|
|
|
|
|
1. We annotated titles and abstracts of 165 publications with 15 entity types and 13 relation types that hold between them. To our knowledge, DiMB-RE is the largest and most diverse corpus focusing on this domain in terms of the number of entities and relations it contains. |
|
|
2. In addition to titles and abstracts, we annotated Results sections of 30 articles (out of 165) to assess the impact of the information from full text. |
|
|
3. To ground and contextualize relations, we annotated relation triggers and certainty information, which were previously included only in the biological event extraction corpora. |
|
|
4. We normalized entity mentions to standard database identifiers (e.g., MeSH, CheBI, FoodOn) to allow aggregation for further study. |
|
|
5. We trained and evaluated NER and RE models based on the state-of-the-art pretrained language models to establish robust baselines for this corpus. |
|
|
|
|
|
Further details regarding this study are available in our paper: https://arxiv.org/pdf/2409.19581.pdf |
|
|
""" |
|
|
|
|
|
_HOMEPAGE = "https://github.com/ScienceNLP-Lab/DiMB-RE" |
|
|
|
|
|
|
|
|
_LICENSE = "" |
|
|
|
|
|
|
|
|
|
|
|
_URLS = { |
|
|
"train": "https://github.com/ScienceNLP-Lab/DiMB-RE/raw/refs/heads/master/data/DiMB-RE/ner_reduced_v6.1_trg_abs_result/train.json", |
|
|
"validation": "https://github.com/ScienceNLP-Lab/DiMB-RE/raw/refs/heads/master/data/DiMB-RE/ner_reduced_v6.1_trg_abs_result/dev.json", |
|
|
"test": "https://github.com/ScienceNLP-Lab/DiMB-RE/raw/refs/heads/master/data/DiMB-RE/ner_reduced_v6.1_trg_abs_result/test.json" |
|
|
} |
|
|
|
|
|
|
|
|
class DiMB_RE(datasets.GeneratorBasedBuilder): |
|
|
"""DiMB-RE (Diet-MicroBiome dataset for Relation Extraction) a comprehensive corpus annotated with 15 entity |
|
|
types (e.g., Nutrient, Microorganism) and 13 relation types (e.g., INCREASES, IMPROVES) capturing |
|
|
diet-microbiome associations""" |
|
|
|
|
|
VERSION = datasets.Version("1.0.0") |
|
|
|
|
|
BUILDER_CONFIGS = [ |
|
|
datasets.BuilderConfig(name="default", version=VERSION, description="Default configuration"), |
|
|
datasets.BuilderConfig(name="ner", version=VERSION, |
|
|
description="Configuration for Named Entity Recognition (NER)"), |
|
|
datasets.BuilderConfig(name="re", version=VERSION, |
|
|
description="Configuration for Relation Extraction (RE)"), |
|
|
datasets.BuilderConfig(name="sentence_level", version=VERSION, |
|
|
description="Configuration for sentence-level processing"), |
|
|
] |
|
|
|
|
|
DEFAULT_CONFIG_NAME = "default" |
|
|
|
|
|
def _info(self): |
|
|
ner = [ |
|
|
{ |
|
|
"start": datasets.Value("int32"), |
|
|
"end": datasets.Value("int32"), |
|
|
"type": datasets.Value("string") |
|
|
} |
|
|
] |
|
|
triggers = [ |
|
|
{ |
|
|
"start": datasets.Value("int32"), |
|
|
"end": datasets.Value("int32"), |
|
|
"type": datasets.Value("string") |
|
|
} |
|
|
] |
|
|
relations = [ |
|
|
{ |
|
|
"head": datasets.Value("int32"), |
|
|
"head_start": datasets.Value("int32"), |
|
|
"head_end": datasets.Value("int32"), |
|
|
"head_type": datasets.Value("string"), |
|
|
"tail": datasets.Value("int32"), |
|
|
"tail_start": datasets.Value("int32"), |
|
|
"tail_end": datasets.Value("int32"), |
|
|
"tail_type": datasets.Value("string"), |
|
|
"type": datasets.Value("string"), |
|
|
"factuality": datasets.Value("string") |
|
|
} |
|
|
] |
|
|
triplets = [ |
|
|
{ |
|
|
"head_start": datasets.Value("int32"), |
|
|
"head_end": datasets.Value("int32"), |
|
|
"tail_start": datasets.Value("int32"), |
|
|
"tail_end": datasets.Value("int32"), |
|
|
"trigger_start": datasets.Value("int32"), |
|
|
"trigger_end": datasets.Value("int32"), |
|
|
"relation": datasets.Value("string") |
|
|
} |
|
|
] |
|
|
if self.config.name == "sentence_level": |
|
|
features = datasets.Features( |
|
|
{ |
|
|
"id": datasets.Value("string"), |
|
|
"doc_key": datasets.Value("string"), |
|
|
"tokens": datasets.Sequence(datasets.Value("string")), |
|
|
"ner": ner, |
|
|
"ner_tags": datasets.Sequence(datasets.Value("string")), |
|
|
"triggers": triggers, |
|
|
"relations": relations, |
|
|
"triplets": triplets |
|
|
} |
|
|
) |
|
|
else: |
|
|
features = datasets.Features( |
|
|
{ |
|
|
"doc_key": datasets.Value("string"), |
|
|
"tokens": datasets.Sequence(datasets.Value("string")), |
|
|
"sentences": [ |
|
|
{ |
|
|
"start": datasets.Value("int32"), |
|
|
"end": datasets.Value("int32") |
|
|
} |
|
|
], |
|
|
"ner": ner, |
|
|
"ner_tags": datasets.Sequence(datasets.Value("string")), |
|
|
"triggers": triggers, |
|
|
"relations": relations, |
|
|
"triplets": triplets |
|
|
} |
|
|
) |
|
|
return datasets.DatasetInfo( |
|
|
|
|
|
description=_DESCRIPTION, |
|
|
|
|
|
features=features, |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
homepage=_HOMEPAGE, |
|
|
|
|
|
license=_LICENSE, |
|
|
|
|
|
citation=_CITATION, |
|
|
) |
|
|
|
|
|
def _split_generators(self, dl_manager): |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
downloaded_files = dl_manager.download_and_extract(_URLS) |
|
|
|
|
|
return [datasets.SplitGenerator(name=i, gen_kwargs={"file_path": downloaded_files[str(i)]}) |
|
|
for i in [datasets.Split.TRAIN, datasets.Split.VALIDATION, datasets.Split.TEST]] |
|
|
|
|
|
|
|
|
def _generate_examples(self, file_path): |
|
|
|
|
|
|
|
|
with open(file_path, "r", encoding="utf-8") as f: |
|
|
for line in f: |
|
|
loaded_doc = json.loads(line) |
|
|
doc_key = loaded_doc["doc_key"] |
|
|
tokens = [] |
|
|
sentences = [] |
|
|
offset = 0 |
|
|
for sentence in loaded_doc["sentences"]: |
|
|
start = offset |
|
|
end = offset + len(sentence) |
|
|
sentences.append({"start": start, "end": end}) |
|
|
offset = end |
|
|
tokens.extend(sentence) |
|
|
entities = [] |
|
|
for sent_entities in loaded_doc["ner"]: |
|
|
for entity in sent_entities: |
|
|
|
|
|
entities.append({"start": entity[0], "end": entity[1] + 1, "type": entity[2]}) |
|
|
ner_tags = process_ner(entities, tokens) |
|
|
triggers = [] |
|
|
for sent_triggers in loaded_doc["triggers"]: |
|
|
for trigger in sent_triggers: |
|
|
|
|
|
triggers.append({"start": trigger[0], "end": trigger[1] + 1, "type": trigger[2]}) |
|
|
relations = [] |
|
|
for sent_idx, rels in enumerate(loaded_doc["relations"]): |
|
|
for rel in rels: |
|
|
|
|
|
head_start = rel[0] |
|
|
head_end = rel[1] + 1 |
|
|
head_idx = -1 |
|
|
tail_start = rel[2] |
|
|
tail_end = rel[3] + 1 |
|
|
tail_idx = -1 |
|
|
for idx, entity in enumerate(entities): |
|
|
if entity["start"] == head_start and entity["end"] == head_end: |
|
|
head_idx = idx |
|
|
elif entity["start"] == tail_start and entity["end"] == tail_end: |
|
|
tail_idx = idx |
|
|
if head_idx == -1 or tail_idx == -1: |
|
|
print(f"Warning: Relation {rel} in document {doc_key} has invalid entity indices.") |
|
|
continue |
|
|
relation_type = rel[4] |
|
|
factuality = rel[5] |
|
|
relations.append({ |
|
|
"head": head_idx, |
|
|
"head_start": head_start, |
|
|
"head_end": head_end, |
|
|
"head_type": entities[head_idx]["type"], |
|
|
"tail": tail_idx, |
|
|
"tail_start": tail_start, |
|
|
"tail_end": tail_end, |
|
|
"tail_type": entities[tail_idx]["type"], |
|
|
"type": relation_type, |
|
|
"factuality": factuality |
|
|
}) |
|
|
triplets = [] |
|
|
for sent_triplets in loaded_doc["triplets"]: |
|
|
for triplet in sent_triplets: |
|
|
|
|
|
head_start = triplet[0] |
|
|
head_end = triplet[1] + 1 |
|
|
tail_start = triplet[2] |
|
|
tail_end = triplet[3] + 1 |
|
|
trigger_start = triplet[4] |
|
|
trigger_end = triplet[5] + 1 |
|
|
relation_type = triplet[4] |
|
|
triplets.append({ |
|
|
"head_start": head_start, |
|
|
"head_end": head_end, |
|
|
"tail_start": tail_start, |
|
|
"tail_end": tail_end, |
|
|
"trigger_start": trigger_start, |
|
|
"trigger_end": trigger_end, |
|
|
"relation": relation_type |
|
|
}) |
|
|
doc = { |
|
|
"doc_key": doc_key, |
|
|
"tokens": tokens, |
|
|
"sentences": sentences, |
|
|
"ner": entities, |
|
|
"triggers": triggers, |
|
|
"relations": relations, |
|
|
"triplets": triplets, |
|
|
"ner_tags": ner_tags |
|
|
} |
|
|
if self.config.name == "sentence_level": |
|
|
|
|
|
for sent_idx, sent in enumerate(sentences): |
|
|
sentence = { |
|
|
"id": f"{doc_key}_sent_{sent_idx}", |
|
|
"doc_key": doc_key, |
|
|
"tokens": doc["tokens"][sent["start"]:sent["end"]], |
|
|
"ner": [ |
|
|
{ |
|
|
"start": entity["start"] - sent["start"], |
|
|
"end": entity["end"] - sent["start"], |
|
|
"type": entity["type"] |
|
|
} for entity in entities |
|
|
if entity["start"] >= sent["start"] and entity["end"] <= sent["end"] |
|
|
], |
|
|
"ner_tags": ner_tags[sent["start"]:sent["end"]], |
|
|
"triggers": [ |
|
|
{ |
|
|
"start": entity["start"] - sent["start"], |
|
|
"end": entity["end"] - sent["start"], |
|
|
"type": entity["type"] |
|
|
} for trigger in triggers |
|
|
if trigger["start"] >= sent["start"] and trigger["end"] <= sent["end"] |
|
|
], |
|
|
"relations": [ |
|
|
{ |
|
|
"head": rel["head"], |
|
|
"head_start": rel["head_start"] - sent["start"], |
|
|
"head_end": rel["head_end"] - sent["start"], |
|
|
"head_type": rel["head_type"], |
|
|
"tail": rel["tail"], |
|
|
"tail_start": rel["tail_start"] - sent["start"], |
|
|
"tail_end": rel["tail_end"] - sent["start"], |
|
|
"tail_type": rel["tail_type"], |
|
|
"type": rel["type"], |
|
|
"factuality": rel["factuality"] |
|
|
} for rel in relations |
|
|
if (rel["head_start"] >= sent["start"] and rel["head_end"] <= sent["end"]) and |
|
|
(rel["tail_start"] >= sent["start"] and rel["tail_end"] <= sent["end"]) |
|
|
], |
|
|
"triplets": [ |
|
|
{ |
|
|
"head_start": triplet["head_start"] - sent["start"], |
|
|
"head_end": triplet["head_end"] - sent["start"], |
|
|
"tail_start": triplet["tail_start"] - sent["start"], |
|
|
"tail_end": triplet["tail_end"] - sent["start"], |
|
|
"trigger_start": triplet["trigger_start"] - sent["start"], |
|
|
"trigger_end": triplet["trigger_end"] - sent["start"], |
|
|
"relation": triplet["relation"] |
|
|
} for triplet in triplets |
|
|
if (triplet["head_start"] >= sent["start"] and triplet["head_end"] <= sent["end"]) and |
|
|
(triplet["tail_start"] >= sent["start"] and triplet["tail_end"] <= sent["end"]) |
|
|
]} |
|
|
yield sentence["id"], sentence |
|
|
else: |
|
|
|
|
|
yield doc_key, doc |
|
|
|
|
|
|
|
|
def process_ner(entities, tokens): |
|
|
"""Converts entities to BIO tags for NER""" |
|
|
bio_tags = ["O"] * len(tokens) |
|
|
|
|
|
|
|
|
for entity in entities: |
|
|
start_index = entity["start"] |
|
|
end_index = entity["end"] |
|
|
entity_type = entity["type"] |
|
|
|
|
|
|
|
|
if start_index >= len(tokens) or end_index > len(tokens): |
|
|
print(f"Warning: Entity {entity} is out of bounds. Skipping.") |
|
|
continue |
|
|
|
|
|
|
|
|
if start_index < len(tokens): |
|
|
bio_tags[start_index] = f"B-{entity_type}" |
|
|
|
|
|
|
|
|
for i in range(start_index + 1, end_index): |
|
|
if i < len(tokens): |
|
|
bio_tags[i] = f"I-{entity_type}" |
|
|
return bio_tags |
|
|
|