From a3a3043bbb5d19b9c2c7c8211c92d4e55b4f8b12 Mon Sep 17 00:00:00 2001 From: Isaac Riley Date: Mon, 4 May 2020 01:10:20 +0200 Subject: [PATCH] added micro-f1 code and assert statement --- eval/metrics.py | 6 ++---- testing/eval_testing.py | 2 +- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/eval/metrics.py b/eval/metrics.py index feacd0a..1e74fb2 100644 --- a/eval/metrics.py +++ b/eval/metrics.py @@ -17,7 +17,7 @@ def f1_score(y_true, y_pred, labels, average): :return: returns a list of Result class objects. Use :func:`~eval.metrics.Result.print_result` to print F1 Score on the Console """ - + assert len(list(y_true))==len(list(y_pred)) if average is None or average == const.AVG_MACRO: pr_list = get_precision_recall(y_true, y_pred, labels) f1_score_list = [] @@ -36,9 +36,7 @@ def f1_score(y_true, y_pred, labels, average): return [Result(None, None, average, None, f1_sum / len(pr_list))] elif average == const.AVG_MICRO: - print('test test test') - print("another test comment") - pass + return sum([a==b for a,b in zip(y_true, y_pred)]) return None diff --git a/testing/eval_testing.py b/testing/eval_testing.py index 89782fa..42bb5e1 100644 --- a/testing/eval_testing.py +++ b/testing/eval_testing.py @@ -4,7 +4,7 @@ import utils.constants as const y_true = ['positive', 'positive', 'negative', 'negative', 'positive', 'positive', 'negative', 'negative'] y_pred = ['positive', 'negative', 'negative', 'positive', 'positive', 'negative', 'negative', 'negative'] -result_list = f1_score(y_true, y_pred, ['positive', 'negative'], const.AVG_MICRO) +result_list = f1_score(y_true, y_pred, ['positive', 'negative'], None) for result in result_list: result.print_result() \ No newline at end of file