From acd7ee81b21a682e3e83488ed5af7dd7f3108f1c Mon Sep 17 00:00:00 2001 From: Wojciech Lidwin <59863785+Halal37@users.noreply.github.com> Date: Fri, 12 May 2023 03:37:36 +0200 Subject: [PATCH] Fix --- Jenkinsfile_train | 2 +- ium_sacred.py | 86 ++++++++++++++++++++--------------------------- 2 files changed, 37 insertions(+), 51 deletions(-) diff --git a/Jenkinsfile_train b/Jenkinsfile_train index f668e95..41da832 100644 --- a/Jenkinsfile_train +++ b/Jenkinsfile_train @@ -25,7 +25,7 @@ node { checkout([$class: 'GitSCM', branches: [[name: '*/master']], extensions: [], userRemoteConfigs: [[credentialsId: 's487197', url: 'https://git.wmi.amu.edu.pl/s487197/ium_487197']]]) } stage('Dockerfile'){ - def testImage = docker.image('s487197/ium:39') + def testImage = docker.image('s487197/ium:40') testImage.inside{ copyArtifacts filter: 'baltimore_train.csv', projectName: 's487197-create-dataset' sh "python3 ium_sacred.py -epochs $EPOCHS -lr $LR -validation_split $VALIDATION_SPLIT" diff --git a/ium_sacred.py b/ium_sacred.py index 22120b9..2c74042 100644 --- a/ium_sacred.py +++ b/ium_sacred.py @@ -38,27 +38,44 @@ def get_x_y(data): @ex.config def my_config(): - epochs = 20 - lr = 0.01 - validation_split = 0.2 - #parser = argparse.ArgumentParser(description='Train') + parser = argparse.ArgumentParser(description='Train') - # parser.add_argument('-epochs', type=int, default=20) - # parser.add_argument('-lr', type=float, default=0.01) - #parser.add_argument('-validation_split', type=float, default=0.2) - #args = parser.parse_args() - # epochs = args.epochs - # lr = args.lr - # validation_split = args.validation_split + parser.add_argument('-epochs', type=int, default=20) + parser.add_argument('-lr', type=float, default=0.01) + parser.add_argument('-validation_split', type=float, default=0.2) + args = parser.parse_args() + epochs = args.epochs + lr = args.lr + validation_split = args.validation_split +@ex.capture +def prepare_message(epochs, lr, validation_split): + return "{0} {1} {2}!".format(epochs, lr, validation_split) @ex.main -def predict(epochs, lr, validation_split): - - print("ble") - model = load_model('baltimore_model') - +def my_main(epochs, lr, validation_split, _run): train = pd.read_csv('baltimore_train.csv') + + data_train, x_train, y_train = get_x_y(train) + normalizer = tf.keras.layers.Normalization(axis=1) + normalizer.adapt(np.array(x_train)) + model = Sequential(normalizer) + model.add(Dense(64, activation="relu")) + model.add(Dense(10, activation='relu')) + model.add(Dense(10, activation='relu')) + model.add(Dense(10, activation='relu')) + model.add(Dense(5, activation="softmax")) + model.compile(Adam(learning_rate=lr), loss='sparse_categorical_crossentropy', metrics=['accuracy']) + model.summary() + + history = model.fit( + x_train, + y_train, + epochs=epochs, + validation_split=validation_split) + hist = pd.DataFrame(history.history) + hist['epoch'] = history.epoch + baltimore_data_test =pd.read_csv('baltimore_test.csv') baltimore_data_test.columns = train.columns baltimore_data_test, x_test, y_test = get_x_y(baltimore_data_test) @@ -81,42 +98,11 @@ def predict(epochs, lr, validation_split): 'rmse': math.sqrt(metrics.mean_squared_error(y_test, y_predicted)), 'accuracy': scores[1] * 100 } - ex.log_scalar('accuracy', data['accuracy']) - ex.log_scalar('rmse', data['rmse']) - ex.log_scalar('accuracy', data['accuracy']) + _run.log_scalar('accuracy', data['accuracy']) + _run.log_scalar('rmse', data['rmse']) + _run.log_scalar('accuracy', data['accuracy']) ex.add_artifact('baltimore_model') - - - -@ex.capture -def train_model(epochs, lr, validation_split): - - - train = pd.read_csv('baltimore_train.csv') - - data_train, x_train, y_train = get_x_y(train) - normalizer = tf.keras.layers.Normalization(axis=1) - normalizer.adapt(np.array(x_train)) - model = Sequential(normalizer) - model.add(Dense(64, activation="relu")) - model.add(Dense(10, activation='relu')) - model.add(Dense(10, activation='relu')) - model.add(Dense(10, activation='relu')) - model.add(Dense(5, activation="softmax")) - model.compile(Adam(learning_rate=lr), loss='sparse_categorical_crossentropy', metrics=['accuracy']) - model.summary() - - history = model.fit( - x_train, - y_train, - epochs=epochs, - validation_split=validation_split) - hist = pd.DataFrame(history.history) - hist['epoch'] = history.epoch - model.save('baltimore_model') - shutil.make_archive('baltimore', 'zip', 'baltimore_model') - ex.run()