From 970f35ca359237d3ed1d3790fdeb780127f9325b Mon Sep 17 00:00:00 2001 From: patrycjalazna Date: Thu, 13 May 2021 17:05:07 +0200 Subject: [PATCH] plot --- Jenkinsfile-evaluation | 23 +++++++++++++--------- avocado-evaluation.py | 43 +++++++++++++++++++++++------------------- avocado-training.py | 8 -------- 3 files changed, 38 insertions(+), 36 deletions(-) diff --git a/Jenkinsfile-evaluation b/Jenkinsfile-evaluation index 2ac7a80..36784ad 100644 --- a/Jenkinsfile-evaluation +++ b/Jenkinsfile-evaluation @@ -28,24 +28,29 @@ pipeline { steps { copyArtifacts(fingerprintArtifacts: true, projectName: 's434742-create-dataset', selector: buildParameter('BUILD_SELECTOR_DATASET')) - copyArtifacts(fingerprintArtifacts: true, projectName: 's434742-training', selector: buildParameter('BUILD_SELECTOR_TRAINING')) - copyArtifacts(fingerprintArtifacts: true, projectName: 's434742-evaluation', selector: buildParameter('BUILD_SELECTOR_EVALUATION')) + copyArtifacts(fingerprintArtifacts: true, projectName: 's434742-training/master', selector: buildParameter('BUILD_SELECTOR_TRAINING')) + copyArtifacts(fingerprintArtifacts: true, projectName: 's434742-evaluation/master', selector: buildParameter('BUILD_SELECTOR_EVALUATION', optional=true)) } } - stage('docker-training') { + stage('evaluation') { steps { script { - def img = docker.build('patlaz/ium:1.0') - img.inside { - sh 'chmod +x avocado-evaluation.py' - sh 'python3 avocado-evaluation.py' - - } + + sh 'chmod +x avocado-evaluation.py' + sh 'python3 avocado-evaluation.py' + } } } + stage('archiveArtifacts') { + steps{ + archiveArtifacts 'eval_results.txt' + archiveArtifacts 'eval_plot.png' + } + } + stage('sendMail') { steps{ diff --git a/avocado-evaluation.py b/avocado-evaluation.py index a69c692..b75807d 100644 --- a/avocado-evaluation.py +++ b/avocado-evaluation.py @@ -3,23 +3,7 @@ import numpy as np from tensorflow import keras import matplotlib.pyplot as plt from keras import backend as K - -def recall_m(y_true, y_pred): - true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1))) - possible_positives = K.sum(K.round(K.clip(y_true, 0, 1))) - recall = true_positives / (possible_positives + K.epsilon()) - return recall - -def precision_m(y_true, y_pred): - true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1))) - predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1))) - precision = true_positives / (predicted_positives + K.epsilon()) - return precision - -def f1_m(y_true, y_pred): - precision = precision_m(y_true, y_pred) - recall = recall_m(y_true, y_pred) - return 2*((precision*recall)/(precision+recall+K.epsilon())) +from sklearn.metrics import mean_squared_error # zaladowanie modelu @@ -37,6 +21,27 @@ y_train = avocado_train[['type']] X_test = avocado_test[['average_price', 'total_volume', '4046', '4225', '4770', 'total_bags', 'small_bags', 'large_bags', 'xlarge_bags']] y_test = avocado_test[['type']] +# prediction +predictions = model.predict(X_test) +#pd.DataFrame(predictions).to_csv('prediction_results.csv') + # ewaluacja -loss, accuracy, f1_score, precision, recall = model.evaluate(X_test, y_test, verbose=0) -# with open('') \ No newline at end of file +error = mean_squared_error(y_test, predictions) +print('Error: ', error) + +with open('eval_results.txt', 'a') as f: + f.write(str(error) + "\n") + +# wykres +with open('results.txt', 'r') as f: + lines = f.readlines() + + +fig = plt.figure(figsize=(5,5)) +chart = fig.add_subplot() +chart.set_ylabel("RMSE") +chart.set_xlabel("Build") +x = np.arange(0, len(lines), 1) +y = [float(x) for x in lines] +plt.plot(x, y, "ro") +plt.savefig("eval_plot.png") \ No newline at end of file diff --git a/avocado-training.py b/avocado-training.py index 9611dd1..dd2c35c 100644 --- a/avocado-training.py +++ b/avocado-training.py @@ -43,11 +43,3 @@ model.fit(X_train, y_train, epochs=epochs, batch_size=batch_size, validation_dat # zapisanie modelu model.save('avocado_model.h5') - -# predict -predictions = model.predict(X_test) -pd.DataFrame(predictions).to_csv('prediction_results.csv') - -# ewaluacja -# error = mean_squared_error(y_test, predictions) -# print('Error: ', error)