Spaces:
Runtime error
Runtime error
| # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. | |
| # | |
| # Licensed under the Apache License, Version 2.0 (the "License"); | |
| # you may not use this file except in compliance with the License. | |
| # You may obtain a copy of the License at | |
| # | |
| # http://www.apache.org/licenses/LICENSE-2.0 | |
| # | |
| # Unless required by applicable law or agreed to in writing, software | |
| # distributed under the License is distributed on an "AS IS" BASIS, | |
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
| # See the License for the specific language governing permissions and | |
| # limitations under the License. | |
| """TODO: Add a description here.""" | |
| import evaluate | |
| import datasets | |
| import numpy as np | |
| # TODO: Add BibTeX citation | |
| _CITATION = """\ | |
| @InProceedings{huggingface:module, | |
| title = {A great new module}, | |
| authors={huggingface, Inc.}, | |
| year={2020} | |
| } | |
| """ | |
| # TODO: Add description of the module here | |
| _DESCRIPTION = """\ | |
| This new module is designed to solve this great ML task and is crafted with a lot of care. | |
| """ | |
| # TODO: Add description of the arguments of the module here | |
| _KWARGS_DESCRIPTION = """ | |
| Calculates how good are predictions given some references, using certain scores | |
| Args: | |
| predictions: list of predictions to score. Each predictions | |
| should be a string with tokens separated by spaces. | |
| references: list of reference for each prediction. Each | |
| reference should be a string with tokens separated by spaces. | |
| Returns: | |
| accuracy: description of the first score, | |
| another_score: description of the second score, | |
| Examples: | |
| Examples should be written in doctest format, and should illustrate how | |
| to use the function. | |
| >>> my_new_module = evaluate.load("my_new_module") | |
| >>> results = my_new_module.compute(references=[0, 1], predictions=[0, 1]) | |
| >>> print(results) | |
| {'accuracy': 1.0} | |
| """ | |
| # TODO: Define external resources urls if needed | |
| BAD_WORDS_URL = "http://url/to/external/resource/bad_words.txt" | |
| class relation_extraction(evaluate.Metric): | |
| """TODO: Short description of my evaluation module.""" | |
| def _info(self): | |
| # TODO: Specifies the evaluate.EvaluationModuleInfo object | |
| return evaluate.MetricInfo( | |
| # This is the description that will appear on the modules page. | |
| module_type="metric", | |
| description=_DESCRIPTION, | |
| citation=_CITATION, | |
| inputs_description=_KWARGS_DESCRIPTION, | |
| # This defines the format of each prediction and reference | |
| features=datasets.Features({ | |
| 'predictions': datasets.Sequence(datasets.Sequence(datasets.Features({ | |
| "head": datasets.Value("string"), | |
| "head_type": datasets.Value("string"), | |
| "type": datasets.Value("string"), | |
| "tail": datasets.Value("string"), | |
| "tail_type": datasets.Value("string"), | |
| }))), | |
| 'references': datasets.Sequence(datasets.Sequence(datasets.Features({ | |
| "head": datasets.Value("string"), | |
| "head_type": datasets.Value("string"), | |
| "type": datasets.Value("string"), | |
| "tail": datasets.Value("string"), | |
| "tail_type": datasets.Value("string"), | |
| }))), | |
| }), | |
| # Homepage of the module for documentation | |
| homepage="http://module.homepage", | |
| # Additional links to the codebase or references | |
| codebase_urls=["http://github.com/path/to/codebase/of/new_module"], | |
| reference_urls=["http://path.to.reference.url/new_module"] | |
| ) | |
| def _download_and_prepare(self, dl_manager): | |
| """Optional: download external resources useful to compute the scores""" | |
| # TODO: Download external resources if needed | |
| pass | |
| def _compute(self, predictions, references, mode="strict", relation_types=[]): | |
| """Returns the scores""" | |
| # TODO: Compute the different scores of the module | |
| assert mode in ["strict", "boundaries"] | |
| # construct relation_types from ground truth if not given | |
| if len(relation_types) == 0: | |
| for triplets in references: | |
| for triplet in triplets: | |
| relation = triplet["type"] | |
| if relation not in relation_types: | |
| relation_types.append(relation) | |
| scores = {rel: {"tp": 0, "fp": 0, "fn": 0} for rel in relation_types + ["ALL"]} | |
| # Count GT relations and Predicted relations | |
| n_sents = len(references) | |
| n_rels = sum([len([rel for rel in sent]) for sent in references]) | |
| n_found = sum([len([rel for rel in sent]) for sent in predictions]) | |
| # Count TP, FP and FN per type | |
| for pred_sent, gt_sent in zip(predictions, references): | |
| for rel_type in relation_types: | |
| # strict mode takes argument types into account | |
| if mode == "strict": | |
| pred_rels = {(rel["head"], rel["head_type"], rel["tail"], rel["tail_type"]) for rel in pred_sent if | |
| rel["type"] == rel_type} | |
| gt_rels = {(rel["head"], rel["head_type"], rel["tail"], rel["tail_type"]) for rel in gt_sent if | |
| rel["type"] == rel_type} | |
| # boundaries mode only takes argument spans into account | |
| elif mode == "boundaries": | |
| pred_rels = {(rel["head"], rel["tail"]) for rel in pred_sent if rel["type"] == rel_type} | |
| gt_rels = {(rel["head"], rel["tail"]) for rel in gt_sent if rel["type"] == rel_type} | |
| scores[rel_type]["tp"] += len(pred_rels & gt_rels) | |
| scores[rel_type]["fp"] += len(pred_rels - gt_rels) | |
| scores[rel_type]["fn"] += len(gt_rels - pred_rels) | |
| # Compute per entity Precision / Recall / F1 | |
| for rel_type in scores.keys(): | |
| if scores[rel_type]["tp"]: | |
| scores[rel_type]["p"] = 100 * scores[rel_type]["tp"] / (scores[rel_type]["fp"] + scores[rel_type]["tp"]) | |
| scores[rel_type]["r"] = 100 * scores[rel_type]["tp"] / (scores[rel_type]["fn"] + scores[rel_type]["tp"]) | |
| else: | |
| scores[rel_type]["p"], scores[rel_type]["r"] = 0, 0 | |
| if not scores[rel_type]["p"] + scores[rel_type]["r"] == 0: | |
| scores[rel_type]["f1"] = 2 * scores[rel_type]["p"] * scores[rel_type]["r"] / ( | |
| scores[rel_type]["p"] + scores[rel_type]["r"]) | |
| else: | |
| scores[rel_type]["f1"] = 0 | |
| # Compute micro F1 Scores | |
| tp = sum([scores[rel_type]["tp"] for rel_type in relation_types]) | |
| fp = sum([scores[rel_type]["fp"] for rel_type in relation_types]) | |
| fn = sum([scores[rel_type]["fn"] for rel_type in relation_types]) | |
| if tp: | |
| precision = 100 * tp / (tp + fp) | |
| recall = 100 * tp / (tp + fn) | |
| f1 = 2 * precision * recall / (precision + recall) | |
| else: | |
| precision, recall, f1 = 0, 0, 0 | |
| scores["ALL"]["p"] = precision | |
| scores["ALL"]["r"] = recall | |
| scores["ALL"]["f1"] = f1 | |
| scores["ALL"]["tp"] = tp | |
| scores["ALL"]["fp"] = fp | |
| scores["ALL"]["fn"] = fn | |
| # Compute Macro F1 Scores | |
| scores["ALL"]["Macro_f1"] = np.mean([scores[ent_type]["f1"] for ent_type in relation_types]) | |
| scores["ALL"]["Macro_p"] = np.mean([scores[ent_type]["p"] for ent_type in relation_types]) | |
| scores["ALL"]["Macro_r"] = np.mean([scores[ent_type]["r"] for ent_type in relation_types]) | |
| return scores | |