plot
This commit is contained in:
parent
f8763fd661
commit
970f35ca35
@ -28,24 +28,29 @@ pipeline {
|
||||
steps
|
||||
{
|
||||
copyArtifacts(fingerprintArtifacts: true, projectName: 's434742-create-dataset', selector: buildParameter('BUILD_SELECTOR_DATASET'))
|
||||
copyArtifacts(fingerprintArtifacts: true, projectName: 's434742-training', selector: buildParameter('BUILD_SELECTOR_TRAINING'))
|
||||
copyArtifacts(fingerprintArtifacts: true, projectName: 's434742-evaluation', selector: buildParameter('BUILD_SELECTOR_EVALUATION'))
|
||||
copyArtifacts(fingerprintArtifacts: true, projectName: 's434742-training/master', selector: buildParameter('BUILD_SELECTOR_TRAINING'))
|
||||
copyArtifacts(fingerprintArtifacts: true, projectName: 's434742-evaluation/master', selector: buildParameter('BUILD_SELECTOR_EVALUATION', optional=true))
|
||||
}
|
||||
}
|
||||
|
||||
stage('docker-training') {
|
||||
stage('evaluation') {
|
||||
steps {
|
||||
script {
|
||||
def img = docker.build('patlaz/ium:1.0')
|
||||
img.inside {
|
||||
sh 'chmod +x avocado-evaluation.py'
|
||||
sh 'python3 avocado-evaluation.py'
|
||||
|
||||
}
|
||||
sh 'chmod +x avocado-evaluation.py'
|
||||
sh 'python3 avocado-evaluation.py'
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage('archiveArtifacts') {
|
||||
steps{
|
||||
archiveArtifacts 'eval_results.txt'
|
||||
archiveArtifacts 'eval_plot.png'
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
stage('sendMail') {
|
||||
steps{
|
||||
|
@ -3,23 +3,7 @@ import numpy as np
|
||||
from tensorflow import keras
|
||||
import matplotlib.pyplot as plt
|
||||
from keras import backend as K
|
||||
|
||||
def recall_m(y_true, y_pred):
|
||||
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
|
||||
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
|
||||
recall = true_positives / (possible_positives + K.epsilon())
|
||||
return recall
|
||||
|
||||
def precision_m(y_true, y_pred):
|
||||
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
|
||||
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
|
||||
precision = true_positives / (predicted_positives + K.epsilon())
|
||||
return precision
|
||||
|
||||
def f1_m(y_true, y_pred):
|
||||
precision = precision_m(y_true, y_pred)
|
||||
recall = recall_m(y_true, y_pred)
|
||||
return 2*((precision*recall)/(precision+recall+K.epsilon()))
|
||||
from sklearn.metrics import mean_squared_error
|
||||
|
||||
|
||||
# zaladowanie modelu
|
||||
@ -37,6 +21,27 @@ y_train = avocado_train[['type']]
|
||||
X_test = avocado_test[['average_price', 'total_volume', '4046', '4225', '4770', 'total_bags', 'small_bags', 'large_bags', 'xlarge_bags']]
|
||||
y_test = avocado_test[['type']]
|
||||
|
||||
# prediction
|
||||
predictions = model.predict(X_test)
|
||||
#pd.DataFrame(predictions).to_csv('prediction_results.csv')
|
||||
|
||||
# ewaluacja
|
||||
loss, accuracy, f1_score, precision, recall = model.evaluate(X_test, y_test, verbose=0)
|
||||
# with open('')
|
||||
error = mean_squared_error(y_test, predictions)
|
||||
print('Error: ', error)
|
||||
|
||||
with open('eval_results.txt', 'a') as f:
|
||||
f.write(str(error) + "\n")
|
||||
|
||||
# wykres
|
||||
with open('results.txt', 'r') as f:
|
||||
lines = f.readlines()
|
||||
|
||||
|
||||
fig = plt.figure(figsize=(5,5))
|
||||
chart = fig.add_subplot()
|
||||
chart.set_ylabel("RMSE")
|
||||
chart.set_xlabel("Build")
|
||||
x = np.arange(0, len(lines), 1)
|
||||
y = [float(x) for x in lines]
|
||||
plt.plot(x, y, "ro")
|
||||
plt.savefig("eval_plot.png")
|
@ -43,11 +43,3 @@ model.fit(X_train, y_train, epochs=epochs, batch_size=batch_size, validation_dat
|
||||
|
||||
# zapisanie modelu
|
||||
model.save('avocado_model.h5')
|
||||
|
||||
# predict
|
||||
predictions = model.predict(X_test)
|
||||
pd.DataFrame(predictions).to_csv('prediction_results.csv')
|
||||
|
||||
# ewaluacja
|
||||
# error = mean_squared_error(y_test, predictions)
|
||||
# print('Error: ', error)
|
||||
|
Loading…
Reference in New Issue
Block a user