Do evaluation
This commit is contained in:
parent
8b1a58f696
commit
de0cce83f9
46
JenkinsfileEvaluation
Normal file
46
JenkinsfileEvaluation
Normal file
@ -0,0 +1,46 @@
|
||||
pipeline {
|
||||
agent {
|
||||
dockerfile true
|
||||
}
|
||||
|
||||
parameters{
|
||||
buildSelector(
|
||||
defaultSelector: lastSuccessful(),
|
||||
description: 'Which build to use for copying artifacts',
|
||||
name: 'BUILD_SELECTOR'
|
||||
)
|
||||
}
|
||||
|
||||
triggers {
|
||||
upstream(upstreamProjects: 's495719-training', threshold: hudson.model.Result.SUCCESS)
|
||||
}
|
||||
|
||||
stages {
|
||||
stage('Git') {
|
||||
steps {
|
||||
git(
|
||||
url: "https://git.wmi.amu.edu.pl/s495719/ium_495719.git",
|
||||
branch: "main"
|
||||
)
|
||||
}
|
||||
}
|
||||
stage('CopyArtifacts') {
|
||||
steps {
|
||||
copyArtifacts fingerprintArtifacts: true, projectName: 's495719-create-dataset', selector: buildParameter('BUILD_SELECTOR')
|
||||
copyArtifacts fingerprintArtifacts: true, projectName: 's495719-training', selector: buildParameter('BUILD_SELECTOR')
|
||||
copyArtifacts fingerprintArtifacts: true, projectName: 's495719-evaluation', selector: buildParameter('BUILD_SELECTOR'), optional: true
|
||||
}
|
||||
}
|
||||
stage('Script') {
|
||||
steps {
|
||||
sh 'chmod 777 ./evaluate.py'
|
||||
sh "python3 ./evaluate.py ${currentBuild.number}"
|
||||
}
|
||||
}
|
||||
stage('CreateArtifacts') {
|
||||
steps {
|
||||
archiveArtifacts artifacts: 'hp_test_predictions.csv,hp_test_metrics.csv'
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
39
evaluate.py
Normal file
39
evaluate.py
Normal file
@ -0,0 +1,39 @@
|
||||
import pandas as pd
|
||||
import numpy as np
|
||||
import sys
|
||||
import os
|
||||
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
|
||||
from keras.models import load_model
|
||||
from helper import prepare_tensors
|
||||
|
||||
build_number = int(sys.argv[1])
|
||||
|
||||
hp_test = pd.read_csv('hp_test.csv')
|
||||
X_test, Y_test = prepare_tensors(hp_test)
|
||||
|
||||
model = load_model('hp_model.h5')
|
||||
|
||||
test_predictions = model.predict(X_test)
|
||||
|
||||
predictions_df = pd.DataFrame(test_predictions, columns=["Predicted_Price"])
|
||||
predictions_df.to_csv('hp_test_predictions.csv', index=False)
|
||||
|
||||
rmse = np.sqrt(mean_squared_error(Y_test, test_predictions))
|
||||
mae = mean_absolute_error(Y_test, test_predictions)
|
||||
r2 = r2_score(Y_test, test_predictions)
|
||||
|
||||
metrics_df = pd.DataFrame({
|
||||
'Build_Number': [build_number],
|
||||
'RMSE': [rmse],
|
||||
'MAE': [mae],
|
||||
'R2': [r2]
|
||||
})
|
||||
|
||||
metrics_file = 'hp_test_metrics.csv'
|
||||
if os.path.isfile(metrics_file):
|
||||
existing_metrics_df = pd.read_csv(metrics_file)
|
||||
updated_metrics_df = pd.concat([existing_metrics_df, metrics_df], ignore_index=True)
|
||||
else:
|
||||
updated_metrics_df = metrics_df
|
||||
|
||||
updated_metrics_df.to_csv(metrics_file, index=False)
|
Loading…
Reference in New Issue
Block a user