Evaluate job init

This commit is contained in:
MatOgr 2022-05-02 16:45:31 +02:00
parent 16e4c0b97d
commit 2a4f1b1496
4 changed files with 99 additions and 5 deletions

View File

@ -0,0 +1,49 @@
pipeline {
agent {
docker { image : 's478841-image:latest' }
}
parameters {
gitParameter branFilter: 'origin/(.*)', defaultValue: 'develop', name: 'BRANCH_NAME', type:'PT_BRANCH'
buildSelector(
defaultSelector: upstream(),
description: 'Build used for artifacts copying',
name:'BUILD_SELECTOR')
}
stages {
stage('Copy artifacts') {
steps {
git branch: '${params.BRANCH}', url: 'https://git.wmi.amu.edu,pl/s478841/ium_478841/${BRANCH}/', selector: buildParameter('BUILD_SELECTOR')
copyArtifacts filter: 'data/*test*', fingerprintArtifacts: true, projectName: 's478841-create-dataset', selector: buildParameter('BUILD_SELECTOR')
copyArtifacts filter: 'evaluation_results.txt', projectName: 's478841-evaluation/develop/', optional: true
}
}
stage('Evaluate model') {
steps {
sh 'chmod +x -R ${env.WORKSPACE}'
sh 'python3 scripts/evaluate.py'
}
}
stage('Archive artifacts') {
steps {
archiveArtifacts 'evaluation_results.txt', onlyIfSuccessful: true
archiveArtifacts 'plots.png', onlyIfSuccessful: true
}
}
}
post {
success {
emailext body: 'SUCCESS', subject: 's478841-evaluation', to: 'e19191c5.uam.onmicrosoft.com@emea.teams.ms'
}
failure {
emailext body: 'FAILURE', subject: 's478841-evaluation', to: 'e19191c5.uam.onmicrosoft.com@emea.teams.ms'
}
unstable {
emailext body: 'UNSTABLE', subject: 's478841-evaluation', to: 'e19191c5.uam.onmicrosoft.com@emea.teams.ms'
}
changed {
emailext body: 'CHANGED', subject: 's478841-evaluation', to: 'e19191c5.uam.onmicrosoft.com@emea.teams.ms'
}
}
}

View File

@ -4,7 +4,7 @@ pipeline {
}
parameters {
string(
defaultValue: '5',
defaultValue: '140',
description: 'epochs number',
name: 'epochs'
)
@ -44,6 +44,7 @@ pipeline {
post {
success {
emailext body: 'SUCCESS', subject: 's478841-training', to: 'e19191c5.uam.onmicrosoft.com@emea.teams.ms'
build job: 's478841-evaluate/develop'
}
failure {

41
scripts/evaluate.py Normal file
View File

@ -0,0 +1,41 @@
from model import AvocadoDataset, evaluate_model
from torch.utils.data import DataLoader
from torch.jit import load as load_model
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib
matplotlib.style.use('ggplot')
# * Load the test data
test_data = DataLoader(AvocadoDataset(
'./data/avocado.data.test'), batch_size=1, shuffle=False)
# * Load the model
model = load_model('./data/model_scripted.pt')
model.eval()
# * Append new inference data
with open('./data/evaluation_results.csv', 'a+') as f:
f.write("{0},{1},{2}\n".format(*evaluate_model(test_data, model)))
# * Load all inference data gathered (till the current one)
results = pd.read_csv('./data/evaluation_results.csv', header=0)
# * Plot the results
plt.plot(range(1, len(results)+1), results['MSE'], color='green')
plt.scatter(range(1, len(results)+1),
results['MSE'], label='MSE', color='green', marker='.')
plt.plot(range(1, len(results)+1), results['RMSE'], color='darkred')
plt.scatter(range(1, len(results)+1),
results['RMSE'], label='RMSE', color='darkorange', marker='.')
plt.plot(range(1, len(results)+1), results['MAE'], color='blue')
plt.scatter(range(1, len(results)+1),
results['MAE'], label='MAE', color='blue', marker='.')
plt.xticks(range(1, len(results)+1))
plt.ylabel('Metric value')
plt.xlabel('Build number')
plt.legend()
# * Save figure
plt.savefig('data/plots.png')

View File

@ -2,7 +2,7 @@ import argparse
import pandas as pd
import numpy as np
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_squared_error, mean_absolute_error
import torch
from torch import nn
@ -101,7 +101,10 @@ def evaluate_model(test_dl, model):
actuals.append(actual)
predictions, actuals = np.vstack(predictions), np.vstack(actuals)
# * return MSE value
return mean_squared_error(actuals, predictions)
mse = mean_squared_error(actuals, predictions)
rmse = mean_squared_error(actuals, predictions, squared=False)
mae = mean_absolute_error(actuals, predictions)
return mse, rmse, mae
def predict(row, model):
@ -155,8 +158,8 @@ if __name__ == '__main__':
train_model(train_dl, model, int(epochs))
# * Evaluate model
mse = evaluate_model(validate_dl, model)
print(f"\nEvaluation\t\tMSE: {mse}, RMSE: {np.sqrt(mse)}")
mse, rmse, mae = evaluate_model(validate_dl, model)
print(f"\nEvaluation\t\tMSE: {mse}, RMSE: {rmse}, MAE: {mae}")
# * Prediction
predictions = [(predict(row, model)[0], row[1].item()) for row in test_dl]