From 9d6ffe8205fa29bb896764cdb32b103fda31b8b0 Mon Sep 17 00:00:00 2001 From: Mateusz Date: Sat, 4 May 2024 15:54:55 +0200 Subject: [PATCH] IUM_06 --- metrics.py | 17 +++++++---------- predict.py | 1 + 2 files changed, 8 insertions(+), 10 deletions(-) diff --git a/metrics.py b/metrics.py index 9f5a041..50149f5 100644 --- a/metrics.py +++ b/metrics.py @@ -1,8 +1,4 @@ -from sklearn.metrics import ( - accuracy_score, - precision_score, - recall_score, -) +from sklearn.metrics import confusion_matrix import pandas as pd @@ -10,14 +6,15 @@ def main(): y_test = pd.read_csv("data/y_test.csv") y_pred = pd.read_csv("evaluation/y_pred.csv", header=None) - accuracy = accuracy_score(y_test, y_pred) - precision_micro = precision_score(y_test, y_pred, average="micro") - recall_micro = recall_score(y_test, y_pred, average="micro") + cm = confusion_matrix(y_test, y_pred) + print( + "Recall metric in the testing dataset: ", + cm[1, 1] / (cm[1, 0] + cm[1, 1]), + ) + accuracy = cm[1, 1] / (cm[1, 0] + cm[1, 1]) with open(r"evaluation/metrics.txt", "a") as f: f.write(f"Accuracy: {accuracy}\n") - f.write(f"Micro-average Precision: {precision_micro}\n") - f.write(f"Micro-average Recall: {recall_micro}\n") f.write(f"\n") diff --git a/predict.py b/predict.py index c1f476b..0f83334 100644 --- a/predict.py +++ b/predict.py @@ -11,6 +11,7 @@ import numpy as np def main(): model = load_model("model/model.keras") X_test = pd.read_csv("data/X_test.csv") + y_test = pd.read_csv("data/y_test.csv") y_pred = model.predict(X_test) y_pred = y_pred >= 0.5