From de0cce83f9134b5e11f066bb0767eb2a939cc2d7 Mon Sep 17 00:00:00 2001 From: PawelDopierala Date: Tue, 14 May 2024 16:21:16 +0200 Subject: [PATCH] Do evaluation --- JenkinsfileEvaluation | 46 +++++++++++++++++++++++++++++++++++++++++++ evaluate.py | 39 ++++++++++++++++++++++++++++++++++++ 2 files changed, 85 insertions(+) create mode 100644 JenkinsfileEvaluation create mode 100644 evaluate.py diff --git a/JenkinsfileEvaluation b/JenkinsfileEvaluation new file mode 100644 index 0000000..60c97a4 --- /dev/null +++ b/JenkinsfileEvaluation @@ -0,0 +1,46 @@ +pipeline { + agent { + dockerfile true + } + + parameters{ + buildSelector( + defaultSelector: lastSuccessful(), + description: 'Which build to use for copying artifacts', + name: 'BUILD_SELECTOR' + ) + } + + triggers { + upstream(upstreamProjects: 's495719-training', threshold: hudson.model.Result.SUCCESS) + } + + stages { + stage('Git') { + steps { + git( + url: "https://git.wmi.amu.edu.pl/s495719/ium_495719.git", + branch: "main" + ) + } + } + stage('CopyArtifacts') { + steps { + copyArtifacts fingerprintArtifacts: true, projectName: 's495719-create-dataset', selector: buildParameter('BUILD_SELECTOR') + copyArtifacts fingerprintArtifacts: true, projectName: 's495719-training', selector: buildParameter('BUILD_SELECTOR') + copyArtifacts fingerprintArtifacts: true, projectName: 's495719-evaluation', selector: buildParameter('BUILD_SELECTOR'), optional: true + } + } + stage('Script') { + steps { + sh 'chmod 777 ./evaluate.py' + sh "python3 ./evaluate.py ${currentBuild.number}" + } + } + stage('CreateArtifacts') { + steps { + archiveArtifacts artifacts: 'hp_test_predictions.csv,hp_test_metrics.csv' + } + } + } +} \ No newline at end of file diff --git a/evaluate.py b/evaluate.py new file mode 100644 index 0000000..bc03a8c --- /dev/null +++ b/evaluate.py @@ -0,0 +1,39 @@ +import pandas as pd +import numpy as np +import sys +import os +from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score +from keras.models import load_model +from helper import prepare_tensors + +build_number = int(sys.argv[1]) + +hp_test = pd.read_csv('hp_test.csv') +X_test, Y_test = prepare_tensors(hp_test) + +model = load_model('hp_model.h5') + +test_predictions = model.predict(X_test) + +predictions_df = pd.DataFrame(test_predictions, columns=["Predicted_Price"]) +predictions_df.to_csv('hp_test_predictions.csv', index=False) + +rmse = np.sqrt(mean_squared_error(Y_test, test_predictions)) +mae = mean_absolute_error(Y_test, test_predictions) +r2 = r2_score(Y_test, test_predictions) + +metrics_df = pd.DataFrame({ + 'Build_Number': [build_number], + 'RMSE': [rmse], + 'MAE': [mae], + 'R2': [r2] +}) + +metrics_file = 'hp_test_metrics.csv' +if os.path.isfile(metrics_file): + existing_metrics_df = pd.read_csv(metrics_file) + updated_metrics_df = pd.concat([existing_metrics_df, metrics_df], ignore_index=True) +else: + updated_metrics_df = metrics_df + +updated_metrics_df.to_csv(metrics_file, index=False) \ No newline at end of file