This commit is contained in:
Wojciech Lidwin 2023-05-12 03:37:36 +02:00
parent 63159ed8bf
commit acd7ee81b2
2 changed files with 37 additions and 51 deletions

View File

@ -25,7 +25,7 @@ node {
checkout([$class: 'GitSCM', branches: [[name: '*/master']], extensions: [], userRemoteConfigs: [[credentialsId: 's487197', url: 'https://git.wmi.amu.edu.pl/s487197/ium_487197']]]) checkout([$class: 'GitSCM', branches: [[name: '*/master']], extensions: [], userRemoteConfigs: [[credentialsId: 's487197', url: 'https://git.wmi.amu.edu.pl/s487197/ium_487197']]])
} }
stage('Dockerfile'){ stage('Dockerfile'){
def testImage = docker.image('s487197/ium:39') def testImage = docker.image('s487197/ium:40')
testImage.inside{ testImage.inside{
copyArtifacts filter: 'baltimore_train.csv', projectName: 's487197-create-dataset' copyArtifacts filter: 'baltimore_train.csv', projectName: 's487197-create-dataset'
sh "python3 ium_sacred.py -epochs $EPOCHS -lr $LR -validation_split $VALIDATION_SPLIT" sh "python3 ium_sacred.py -epochs $EPOCHS -lr $LR -validation_split $VALIDATION_SPLIT"

View File

@ -38,27 +38,44 @@ def get_x_y(data):
@ex.config @ex.config
def my_config(): def my_config():
epochs = 20 parser = argparse.ArgumentParser(description='Train')
lr = 0.01
validation_split = 0.2
#parser = argparse.ArgumentParser(description='Train')
# parser.add_argument('-epochs', type=int, default=20) parser.add_argument('-epochs', type=int, default=20)
# parser.add_argument('-lr', type=float, default=0.01) parser.add_argument('-lr', type=float, default=0.01)
#parser.add_argument('-validation_split', type=float, default=0.2) parser.add_argument('-validation_split', type=float, default=0.2)
#args = parser.parse_args() args = parser.parse_args()
# epochs = args.epochs epochs = args.epochs
# lr = args.lr lr = args.lr
# validation_split = args.validation_split validation_split = args.validation_split
@ex.capture
def prepare_message(epochs, lr, validation_split):
return "{0} {1} {2}!".format(epochs, lr, validation_split)
@ex.main @ex.main
def predict(epochs, lr, validation_split): def my_main(epochs, lr, validation_split, _run):
print("ble")
model = load_model('baltimore_model')
train = pd.read_csv('baltimore_train.csv') train = pd.read_csv('baltimore_train.csv')
data_train, x_train, y_train = get_x_y(train)
normalizer = tf.keras.layers.Normalization(axis=1)
normalizer.adapt(np.array(x_train))
model = Sequential(normalizer)
model.add(Dense(64, activation="relu"))
model.add(Dense(10, activation='relu'))
model.add(Dense(10, activation='relu'))
model.add(Dense(10, activation='relu'))
model.add(Dense(5, activation="softmax"))
model.compile(Adam(learning_rate=lr), loss='sparse_categorical_crossentropy', metrics=['accuracy'])
model.summary()
history = model.fit(
x_train,
y_train,
epochs=epochs,
validation_split=validation_split)
hist = pd.DataFrame(history.history)
hist['epoch'] = history.epoch
baltimore_data_test =pd.read_csv('baltimore_test.csv') baltimore_data_test =pd.read_csv('baltimore_test.csv')
baltimore_data_test.columns = train.columns baltimore_data_test.columns = train.columns
baltimore_data_test, x_test, y_test = get_x_y(baltimore_data_test) baltimore_data_test, x_test, y_test = get_x_y(baltimore_data_test)
@ -81,42 +98,11 @@ def predict(epochs, lr, validation_split):
'rmse': math.sqrt(metrics.mean_squared_error(y_test, y_predicted)), 'rmse': math.sqrt(metrics.mean_squared_error(y_test, y_predicted)),
'accuracy': scores[1] * 100 'accuracy': scores[1] * 100
} }
ex.log_scalar('accuracy', data['accuracy']) _run.log_scalar('accuracy', data['accuracy'])
ex.log_scalar('rmse', data['rmse']) _run.log_scalar('rmse', data['rmse'])
ex.log_scalar('accuracy', data['accuracy']) _run.log_scalar('accuracy', data['accuracy'])
ex.add_artifact('baltimore_model') ex.add_artifact('baltimore_model')
@ex.capture
def train_model(epochs, lr, validation_split):
train = pd.read_csv('baltimore_train.csv')
data_train, x_train, y_train = get_x_y(train)
normalizer = tf.keras.layers.Normalization(axis=1)
normalizer.adapt(np.array(x_train))
model = Sequential(normalizer)
model.add(Dense(64, activation="relu"))
model.add(Dense(10, activation='relu'))
model.add(Dense(10, activation='relu'))
model.add(Dense(10, activation='relu'))
model.add(Dense(5, activation="softmax"))
model.compile(Adam(learning_rate=lr), loss='sparse_categorical_crossentropy', metrics=['accuracy'])
model.summary()
history = model.fit(
x_train,
y_train,
epochs=epochs,
validation_split=validation_split)
hist = pd.DataFrame(history.history)
hist['epoch'] = history.epoch
model.save('baltimore_model')
shutil.make_archive('baltimore', 'zip', 'baltimore_model')
ex.run() ex.run()