eval script
Some checks failed
s444417-training/pipeline/head There was a failure building this commit

This commit is contained in:
s444417 2022-05-03 21:45:42 +02:00
parent 8391625688
commit 8cca2125c9
11 changed files with 33 additions and 19 deletions

View File

@ -1,8 +1,16 @@
pipeline {
agent none
stages {
stage('Copy') {
steps {
copyArtifacts projectName: 's444417-training'
sh 'python3 evalScript.py'
archiveArtifacts 'trainResults.csv'
}
}
stage ('Starting eval job') {
steps {
build job: 's444417-evaluation/master', wait: true
}
}
}

View File

@ -7,10 +7,14 @@ pipeline {
steps {
copyArtifacts projectName: 's444417-create-dataset'
sh 'ls -la'
sg 'python3 ./src/trainScript.py 6'
archiveArtifacts 'saved_model/MyModel_tf/*'
}
}
}
options {
copyArtifactPermission('s444417-evaluation');
}
post {
always {
emailext body: "${currentBuild.currentResult}", subject: 's444417-testing build status', to: 'e19191c5.uam.onmicrosoft.com@emea.teams.ms'

View File

@ -3,9 +3,11 @@ IUM_6
---
Zadanie 1
1. stworzono job [s444417-training](https://tzietkiewicz.vm.wmi.amu.edu.pl:8080/job/s444417-training/)
2. s444417-training uruchamia się automatycznie po zakończeniu joba s444417-create-dataset, plik "Jenkinsfile", przy pomocy build job. Kopiuje zbiór danych przy pomocy copyArtifact w pliku "Jenkinsfile3"
3. "Jenkinsfile3" przy pomocy archiveArtifacts
4. Powiadomienia, "Jenkinsfile3" przy pomocy emailext
5. Parametr podany jest w pliku "startscript1.sh" w linii 11, przy wołaniu skryptu uruchamiającego uczenie "startscript2.sh", parametr oznacza ilość epok
2. s444417-training uruchamia się automatycznie po zakończeniu joba s444417-create-dataset, plik Jenkinsfile, przy pomocy build job. Kopiuje zbiór danych przy pomocy copyArtifact w pliku Jenkinsfile3
3. Jenkinsfile3 przy pomocy archiveArtifacts
4. powiadomienia, Jenkinsfile3 przy pomocy emailext
5. parametr podany jest w pliku startscript1.sh w linii 11, przy wołaniu skryptu uruchamiającego uczenie startscript2.sh, parametr oznacza ilość epok
Zadanie 2
1. stworzono job [s444417-evaluation](https://tzietkiewicz.vm.wmi.amu.edu.pl:8080/job/s444417-evaluation/)
2. evaluacja modelu i zapisanie wyniku do pliku trainResults.tsv, w Jenkinsfile.eval archiveArtifact

View File

@ -1,2 +1,2 @@
predictions: [166.16302, 211.04045, 123.3409, 42.00785, 87.86473, 109.27005, 169.75987, 173.21875, 232.33553, 142.31973]
expected: [ 80. 42. 64. 72. 14. 97.4 75. 200. 140. 61. ]
predictions: [476.51816, 140.02351, -0.7704315, 151.024, 134.35803, 128.7151, 193.7164, -74.09639, 79.28711, 223.44864]
expected: [930. 160. 16.5 34. 80.9 49. 100. 65. 28.5 120. ]

View File

@ -1,5 +0,0 @@
ëroot"_tf_keras_sequential*Æ{"name": "sequential", "trainable": true, "expects_training_arg": true, "dtype": "float32", "batch_input_shape": null, "must_restore_from_config": false, "class_name": "Sequential", "config": {"name": "sequential", "layers": [{"class_name": "InputLayer", "config": {"batch_input_shape": {"class_name": "__tuple__", "items": [null, null]}, "dtype": "float32", "sparse": false, "ragged": false, "name": "normalization_input"}}, {"class_name": "Normalization", "config": {"name": "normalization", "trainable": true, "batch_input_shape": {"class_name": "__tuple__", "items": [null, null]}, "dtype": "float32", "axis": {"class_name": "__tuple__", "items": [-1]}, "mean": null, "variance": null}}, {"class_name": "Dense", "config": {"name": "dense", "trainable": true, "dtype": "float32", "units": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "GlorotUniform", "config": {"seed": null}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}}]}, "shared_object_id": 5, "input_spec": [{"class_name": "InputSpec", "config": {"dtype": null, "shape": {"class_name": "__tuple__", "items": [null, null]}, "ndim": 2, "max_ndim": null, "min_ndim": null, "axes": {}}}], "build_input_shape": {"class_name": "TensorShape", "items": [null, null]}, "is_graph_network": true, "full_save_spec": {"class_name": "__tuple__", "items": [[{"class_name": "TypeSpec", "type_spec": "tf.TensorSpec", "serialized": [{"class_name": "TensorShape", "items": [null, null]}, "float32", "normalization_input"]}], {}]}, "save_spec": {"class_name": "TypeSpec", "type_spec": "tf.TensorSpec", "serialized": [{"class_name": "TensorShape", "items": [null, null]}, "float32", "normalization_input"]}, "keras_version": "2.8.0", "backend": "tensorflow", "model_config": {"class_name": "Sequential", "config": {"name": "sequential", "layers": [{"class_name": "InputLayer", "config": {"batch_input_shape": {"class_name": "__tuple__", "items": [null, null]}, "dtype": "float32", "sparse": false, "ragged": false, "name": "normalization_input"}, "shared_object_id": 0}, {"class_name": "Normalization", "config": {"name": "normalization", "trainable": true, "batch_input_shape": {"class_name": "__tuple__", "items": [null, null]}, "dtype": "float32", "axis": {"class_name": "__tuple__", "items": [-1]}, "mean": null, "variance": null}, "shared_object_id": 1}, {"class_name": "Dense", "config": {"name": "dense", "trainable": true, "dtype": "float32", "units": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "GlorotUniform", "config": {"seed": null}, "shared_object_id": 2}, "bias_initializer": {"class_name": "Zeros", "config": {}, "shared_object_id": 3}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "shared_object_id": 4}]}}, "training_config": {"loss": {"class_name": "MeanSquaredError", "config": {"reduction": "auto", "name": "mean_squared_error"}, "shared_object_id": 7}, "metrics": null, "weighted_metrics": null, "loss_weights": null, "optimizer_config": {"class_name": "Adam", "config": {"name": "Adam", "learning_rate": 1, "decay": 0.0, "beta_1": 0.8999999761581421, "beta_2": 0.9990000128746033, "epsilon": 1e-07, "amsgrad": false}}}}2
ûroot.layer_with_weights-0"_tf_keras_layer*Ä{"name": "normalization", "trainable": true, "expects_training_arg": false, "dtype": "float32", "batch_input_shape": {"class_name": "__tuple__", "items": [null, null]}, "stateful": false, "must_restore_from_config": true, "class_name": "Normalization", "config": {"name": "normalization", "trainable": true, "batch_input_shape": {"class_name": "__tuple__", "items": [null, null]}, "dtype": "float32", "axis": {"class_name": "__tuple__", "items": [-1]}, "mean": null, "variance": null}, "shared_object_id": 1, "build_input_shape": {"class_name": "TensorShape", "items": [null, 8]}}2
¿root.layer_with_weights-1"_tf_keras_layer*ˆ{"name": "dense", "trainable": true, "expects_training_arg": false, "dtype": "float32", "batch_input_shape": null, "stateful": false, "must_restore_from_config": false, "class_name": "Dense", "config": {"name": "dense", "trainable": true, "dtype": "float32", "units": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "GlorotUniform", "config": {"seed": null}, "shared_object_id": 2}, "bias_initializer": {"class_name": "Zeros", "config": {}, "shared_object_id": 3}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "shared_object_id": 4, "input_spec": {"class_name": "InputSpec", "config": {"dtype": null, "shape": null, "ndim": null, "max_ndim": null, "min_ndim": 2, "axes": {"-1": 8}}, "shared_object_id": 8}, "build_input_shape": {"class_name": "TensorShape", "items": [null, 8]}}2
¸0root.keras_api.metrics.0"_tf_keras_metric*<2A>{"class_name": "Mean", "name": "loss", "dtype": "float32", "config": {"name": "loss", "dtype": "float32"}, "shared_object_id": 9}2

View File

@ -5,6 +5,16 @@ import numpy as np
import tensorflow as tf
from tensorflow.keras import layers
import matplotlib.pyplot as plt
def plot_loss(history):
plt.plot(history.history['loss'], label='loss')
plt.plot(history.history['val_loss'], label='val_loss')
plt.xlabel('Epoch')
plt.ylabel('Error [MPG]')
plt.legend()
plt.grid(True)
plt.show()
#train params
numberOfEpoch = sys.argv[1]
@ -71,11 +81,11 @@ history = linear_model.fit(
house_price_features,
house_price_labels,
epochs=int(numberOfEpoch),
validation_data = (house_price_test_features,house_price_test_expected),
validation_split=0.33,
verbose=1)
#callbacks=[cp_callback])
# print(history)
plot_loss(history)
# save model
linear_model.save(modelPath, save_format='tf')

View File

@ -8,4 +8,4 @@ unzip house-price-dataset.zip
# head -n $CUTOFF ./Participants_Data_HPP/Train.csv > data.txt
# head -n $CUTOFF ./Participants_Data_HPP/Test.csv > dataTest.txt
python3 ./src/task1python.py
python3 ./src/trainScript.py 6
# python3 ./src/trainScript.py 6

View File

@ -1,5 +0,0 @@
0,306900.46875
1,304823.75
2,298283.34375
3,303093.53125
4,304189.1875
1 0 306900.46875
2 1 304823.75
3 2 298283.34375
4 3 303093.53125
5 4 304189.1875