From 54b9dab048e2a318450b0bba87d542212ab2840b Mon Sep 17 00:00:00 2001 From: s444501 Date: Sun, 1 May 2022 16:56:56 +0200 Subject: [PATCH] . --- eval.py | 60 ++++++++++++++++++++++++++++++++++++++++++ evaluation.Jenkinsfile | 42 +++++++++++++++++++++++++++++ 2 files changed, 102 insertions(+) create mode 100644 eval.py create mode 100644 evaluation.Jenkinsfile diff --git a/eval.py b/eval.py new file mode 100644 index 0000000..22fd9a9 --- /dev/null +++ b/eval.py @@ -0,0 +1,60 @@ +import torch +import matplotlib.pyplot as plt +import os +import json +from sklearn.metrics import accuracy_score, precision_score, recall_score +import pandas as pd + +# Wczytywanie +# Nie potrzebuje wczytywać modelu i danych testowych bo w jobie trenującym już stworzyłem csv z wynikami +# model = torch.load('model.pkl') +# test_set = pd.read_csv('d_test.csv', encoding='latin-1') +test_results = pd.read_csv('neural_network_prediction_results.csv') + + +# Ewaluacja +y_true = test_results['Testing Y'] +y_predicted = test_results['Predicted Y'] + +accuracy = accuracy_score(y_true, y_predicted) +precision = precision_score(y_true, y_predicted, average='macro') +recall = recall_score(y_true, y_predicted, average='macro') + +eval_results = { + 'Accuracy': accuracy, + 'Macro-Avg Precision': precision, + 'Macro-Avg Recall': recall +} + + +filename = 'eval_results.json' +if not os.path.exists(filename): + with open(filename, 'w') as file: + json.dump({'eval_results': []}, file, indent=2) + +with open(filename, 'r+') as file: + file_data = json.load(file) + file_data['eval_results'].append(eval_results) + file.seek(0) + json.dump(file_data, file, indent=2) + +with open(filename, 'r') as file: + results = json.load(file)['eval_results'] + f_acc = [] + f_prc = [] + f_rec = [] + for res in results: + f_acc.append(res['Accuracy']) + f_prc.append(res['Macro-Avg Precision']) + f_rec.append(res['Macro-Avg Recall']) + +build_axis = [i+1 for i in range(len(f_acc))] + +plt.xlabel('Build') +plt.ylabel('Score') +plt.plot(build_axis, f_acc, label='Accuracy') +plt.plot(build_axis, f_prc, label='Macro-Avg Precision') +plt.plot(build_axis, f_rec, label='Macro-Avg Recall') +plt.legend() +plt.show() +plt.savefig('metrics.png') diff --git a/evaluation.Jenkinsfile b/evaluation.Jenkinsfile new file mode 100644 index 0000000..70773af --- /dev/null +++ b/evaluation.Jenkinsfile @@ -0,0 +1,42 @@ +pipeline { + agent { + docker {image 'zadanie'} + } + parameters { + buildSelector( + defaultSelector: lastSuccessful(), + description: 'Which build to use for copying artifacts', + name: 'BUILD_SELECTOR' + ) + string( + defaultValue: '100', + description: 'number of epochs', + name: 'EPOCH' + ) + } + stages { + + stage('Copy artifacts') { + steps { + copyArtifacts fingerprintArtifacts: true, projectName: 's444501-create-dataset', selector: buildParameter('BUILD_SELECTOR') + } + } + stage('Train model') { + steps { + withEnv(["EPOCH=${params.EPOCH}"]) { + sh 'python biblioteki_ml.py $EPOCH' + } + } + } + stage('Archive model') { + steps { + archiveArtifacts artifacts: 'model.pkl, neural_network_prediction_results.csv' + } + } + } + post { + always { + emailext body: "${currentBuild.currentResult}", subject: 's444501-training', to: 'e19191c5.uam.onmicrosoft.com@emea.teams.ms' + } + } +} \ No newline at end of file