parametrized
Some checks failed
s444452-training/pipeline/head There was a failure building this commit

This commit is contained in:
AdamOsiowy123 2022-05-02 22:51:16 +02:00
parent 02c32c3c0a
commit 2b8fba580b
2 changed files with 32 additions and 25 deletions

View File

@ -7,9 +7,9 @@ node {
pipelineTriggers([upstream(threshold: hudson.model.Result.SUCCESS, upstreamProjects: "s444452-create-dataset")]), pipelineTriggers([upstream(threshold: hudson.model.Result.SUCCESS, upstreamProjects: "s444452-create-dataset")]),
parameters([ parameters([
string( string(
defaultValue: ".", defaultValue: ".,14000,1,50,100",
description: 'Arguments for model training: arg1,arg2,arg3', description: 'Train params: data_path,num_words,epochs,batch_size,pad_length',
name: 'TRAIN_ARGS' name: 'TRAIN_PARAMS'
) )
]) ])
]) ])
@ -20,8 +20,8 @@ node {
copyArtifacts filter: 'dev_data.csv', fingerprintArtifacts: true, projectName: 's444452-create-dataset' copyArtifacts filter: 'dev_data.csv', fingerprintArtifacts: true, projectName: 's444452-create-dataset'
} }
stage('Run script') { stage('Run script') {
withEnv(["TRAIN_ARGS=${params.TRAIN_ARGS}"]) { withEnv(["TRAIN_ARGS=${params.TRAIN_PARAMS}"]) {
sh "python3 Scripts/train_neural_network.py $TRAIN_ARGS" sh "python3 Scripts/train_neural_network.py $TRAIN_PARAMS"
} }
} }
stage('Archive artifacts') { stage('Archive artifacts') {
@ -39,16 +39,8 @@ def notifyBuild(String buildStatus = 'STARTED') {
buildStatus = buildStatus ?: 'SUCCESS' buildStatus = buildStatus ?: 'SUCCESS'
def subject = "Job: ${env.JOB_NAME}" def subject = "Job: ${env.JOB_NAME}"
def details = "Build nr: ${env.BUILD_NUMBER}, status: ${buildStatus} \n url: ${env.BUILD_URL}" def details = "Build nr: ${env.BUILD_NUMBER}, status: ${buildStatus} \n url: ${env.BUILD_URL} \n build params: ${params.TRAIN_PARAMS}"
// Override default values based on build status
if (buildStatus == 'SUCCESS') {
color = 'GREEN'
colorCode = '#00FF00'
} else {
color = 'RED'
colorCode = '#FF0000'
}
emailext ( emailext (
subject: subject, subject: subject,
body: details, body: details,

View File

@ -13,16 +13,23 @@ import logging
logging.getLogger("tensorflow").setLevel(logging.ERROR) logging.getLogger("tensorflow").setLevel(logging.ERROR)
data_path = ''
num_words = 0
epochs = 0
batch_size = 0
pad_length = 0
def tokenize(x, x_train, x_test, max_len):
tokenizer = Tokenizer(num_words=14000) def tokenize(x, x_train, x_test):
global pad_length, num_words
tokenizer = Tokenizer(num_words=num_words)
tokenizer.fit_on_texts(x) tokenizer.fit_on_texts(x)
train_x = tokenizer.texts_to_sequences(x_train) train_x = tokenizer.texts_to_sequences(x_train)
test_x = tokenizer.texts_to_sequences(x_test) test_x = tokenizer.texts_to_sequences(x_test)
vocabulary_length = len(tokenizer.word_index) + 1 vocabulary_length = len(tokenizer.word_index) + 1
train_x = pad_sequences(train_x, padding='post', maxlen=max_len) train_x = pad_sequences(train_x, padding='post', maxlen=pad_length)
test_x = pad_sequences(test_x, padding='post', maxlen=max_len) test_x = pad_sequences(test_x, padding='post', maxlen=pad_length)
return train_x, test_x, vocabulary_length return train_x, test_x, vocabulary_length
@ -47,14 +54,16 @@ def save_model(model):
def train_model(model, x_train, y_train): def train_model(model, x_train, y_train):
model.fit(x_train, y_train, epochs=1, verbose=False, batch_size=50) global epochs, batch_size
model.fit(x_train, y_train, epochs=epochs, verbose=False, batch_size=batch_size)
def get_model(output_dim, vocabulary_length): def get_model(vocabulary_length):
global pad_length, batch_size
model = Sequential() model = Sequential()
model.add(layers.Embedding(input_dim=vocabulary_length, model.add(layers.Embedding(input_dim=vocabulary_length,
output_dim=output_dim, output_dim=batch_size,
input_length=100)) input_length=pad_length))
model.add(layers.Flatten()) model.add(layers.Flatten())
model.add(layers.Dense(10, activation='relu')) model.add(layers.Dense(10, activation='relu'))
model.add(layers.Dense(1, activation='sigmoid')) model.add(layers.Dense(1, activation='sigmoid'))
@ -74,15 +83,21 @@ def load_data(data_path, filename) -> pd.DataFrame:
return pd.read_csv(os.path.join(data_path, filename)) return pd.read_csv(os.path.join(data_path, filename))
def read_params():
global data_path, num_words, epochs, batch_size, pad_length
data_path, num_words, epochs, batch_size, pad_length = sys.argv[1].split(',')
def main(): def main():
data_path = sys.argv[1] read_params()
global data_path
abs_data_path = os.path.abspath(data_path) abs_data_path = os.path.abspath(data_path)
train_data = load_data(abs_data_path, 'train_data.csv') train_data = load_data(abs_data_path, 'train_data.csv')
test_data = load_data(abs_data_path, 'test_data.csv') test_data = load_data(abs_data_path, 'test_data.csv')
x_train, y_train = split_data(train_data) x_train, y_train = split_data(train_data)
x_test, y_test = split_data(test_data) x_test, y_test = split_data(test_data)
x_train, x_test, vocab_size = tokenize(pd.concat([x_train, x_test]), x_train, x_test, 100) x_train, x_test, vocab_size = tokenize(pd.concat([x_train, x_test]), x_train, x_test)
model = get_model(50, vocab_size) model = get_model(vocab_size)
train_model(model, x_train, y_train) train_model(model, x_train, y_train)
save_model(model) save_model(model)
evaluate_and_save(model, x_test, y_test, abs_data_path) evaluate_and_save(model, x_test, y_test, abs_data_path)