Evaluation

This commit is contained in:
Kamil Guttmann 2022-05-03 19:47:36 +02:00
parent 9efe7bd128
commit 3c88cb0131
3 changed files with 71 additions and 1 deletions

View File

@ -15,6 +15,6 @@ RUN apt-get update && apt-get install -y python3-pip unzip && rm -rf /var/lib/ap
RUN export PATH="$PATH:/root/.local/bin"
RUN pip3 install kaggle pandas scikit-learn tensorflow keras
RUN pip3 install kaggle pandas scikit-learn tensorflow keras matplotlib numpy
RUN mkdir /.kaggle && chmod o+w /.kaggle

48
Jenkinsfile.eval Normal file
View File

@ -0,0 +1,48 @@
pipeline {
agent {
docker { image 'kamilguttmann/ium:eval' }
}
parameters {
gitParameter branchFilter: 'origin/(.*)', defaultValue: 'master', name: 'BRANCH', type: 'PT_BRANCH'
buildSelector(
defaultSelector: lastSuccessful(),
description: 'Which build to use for copying dataset artifacts',
name: 'DATASET_BUILD_SELECTOR'
)
buildSelector(
defaultSelector: lastSuccessful(),
description: 'Which build to use for copying train artifacts',
name: 'TRAIN_BUILD_SELECTOR'
)
}
stages {
stage('Copy artifacts') {
steps {
copyArtifacts fingerprintArtifacts: true, projectName: 's444380-create-dataset', selector: buildParameter('DATASET_BUILD_SELECTOR')
copyArtifacts fingerprintArtifacts: true, projectName: 's444380-training/${BRANCH}', selector: buildParameter('TRAIN_BUILD_SELECTOR')
copyArtifacts filter: "eval_results.csv", projectName: 's444380-evaluation/${BRANCH}', optional: true
}
}
stage("Checkout git") {
steps {
checkout([$class: 'GitSCM', branches: [[name: '*/master']], extensions: [], userRemoteConfigs: [[credentialsId: 's444380', url: 'https://git.wmi.amu.edu.pl/s444380/ium_444380.git']]])
}
}
stage("Evaluation") {
steps {
sh "chmod u+x ./evaluate.py"
sh "python3 ./evaluate.py"
archiveArtifacts artifacts: "eval_results.csv, plot.png", onlyIfSuccessful: true
}
}
}
post {
success {
emailext body: "SUCCESS", subject: "s444380-evaluation", to: "e19191c5.uam.onmicrosoft.com@emea.teams.ms"
}
failure {
emailext body: "FAILURE", subject: "s444380-evaluation", to: "e19191c5.uam.onmicrosoft.com@emea.teams.ms"
}
}
}

22
evaluate.py Normal file
View File

@ -0,0 +1,22 @@
from sklearn.metrics import accuracy_score
import pandas as pd
from matplotlib import pyplot as plt
import numpy as np
test_data = pd.read_csv("out.csv")
y_true = test_data["OFFENSE_CODE_GROUP"]
y_pred = test_data["PREDICTED"]
accuracy = accuracy_score(y_true, y_pred)
with open("eval_results.csv", "a", encoding="utf-8") as f:
f.write(f"{accuracy}\n")
eval_results = pd.read_csv("eval_results.csv", header=None).values
plt.plot(np.arange(len(eval_results)), eval_results)
plt.xlabel("Build")
plt.ylabel("Accuracy")
plt.savefig("plot.png")