evaluate
All checks were successful
s444452-training/pipeline/head This commit looks good

This commit is contained in:
AdamOsiowy123 2022-05-03 00:00:08 +02:00
parent 81ef5f8c7d
commit 0ab6df2703
2 changed files with 6 additions and 28 deletions

View File

@ -17,7 +17,6 @@ node {
stage('Copy artifacts') {
copyArtifacts filter: 'train_data.csv', fingerprintArtifacts: true, projectName: 's444452-create-dataset'
copyArtifacts filter: 'test_data.csv', fingerprintArtifacts: true, projectName: 's444452-create-dataset'
copyArtifacts filter: 'dev_data.csv', fingerprintArtifacts: true, projectName: 's444452-create-dataset'
}
stage('Run script') {
withEnv(["TRAIN_PARAMS=${params.TRAIN_PARAMS}"]) {
@ -25,7 +24,7 @@ node {
}
}
stage('Archive artifacts') {
archiveArtifacts "neural_network_evaluation.txt, model/**"
archiveArtifacts "model/**"
}
}
} catch (e) {

View File

@ -1,14 +1,12 @@
#!/usr/bin/python
import datetime
import os
import pprint
import sys
import pandas as pd
from keras.models import Sequential, load_model
from keras.models import Sequential
from keras import layers
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from sklearn.metrics import precision_score, recall_score, f1_score, accuracy_score
import logging
logging.getLogger("tensorflow").setLevel(logging.ERROR)
@ -20,32 +18,14 @@ batch_size = 0
pad_length = 0
def tokenize(x, x_train, x_test):
def tokenize(x, x_train):
global pad_length, num_words
tokenizer = Tokenizer(num_words=num_words)
tokenizer.fit_on_texts(x)
train_x = tokenizer.texts_to_sequences(x_train)
test_x = tokenizer.texts_to_sequences(x_test)
vocabulary_length = len(tokenizer.word_index) + 1
train_x = pad_sequences(train_x, padding='post', maxlen=pad_length)
test_x = pad_sequences(test_x, padding='post', maxlen=pad_length)
return train_x, test_x, vocabulary_length
def evaluate_and_save(model, x, y, abs_path):
loss, accuracy = model.evaluate(x, y, verbose=False)
y_predicted = (model.predict(x) >= 0.5).astype(int)
with open(os.path.join(abs_path, 'neural_network_evaluation.txt'), "w") as log_file:
for obj in (
('Accuracy: ', accuracy), ('Loss: ', loss), ('Precision: ', precision_score(y, y_predicted)),
('Recall: ', recall_score(y, y_predicted)), ('F1: ', f1_score(y, y_predicted)),
('Accuracy: ', accuracy_score(y, y_predicted))):
pprint.pprint(obj, log_file)
def load_trained_model(abs_path, model_name):
return load_model(os.path.join(abs_path, model_name))
return train_x, vocabulary_length
def save_model(model):
@ -99,12 +79,11 @@ def main():
train_data = load_data(abs_data_path, 'train_data.csv')
test_data = load_data(abs_data_path, 'test_data.csv')
x_train, y_train = split_data(train_data)
x_test, y_test = split_data(test_data)
x_train, x_test, vocab_size = tokenize(pd.concat([x_train, x_test]), x_train, x_test)
x_test, _ = split_data(test_data)
x_train, vocab_size = tokenize(pd.concat([x_train, x_test]), x_train)
model = get_model(vocab_size)
train_model(model, x_train, y_train)
save_model(model)
evaluate_and_save(model, x_test, y_test, abs_data_path)
if __name__ == '__main__':