evaluate
Some checks failed
s444452-evaluation/pipeline/head There was a failure building this commit
s444452-training/pipeline/head This commit looks good

This commit is contained in:
AdamOsiowy123 2022-05-03 00:04:19 +02:00
parent 0ab6df2703
commit 8fa7f68080
2 changed files with 136 additions and 0 deletions

View File

@ -0,0 +1,48 @@
node {
checkout scm
try {
docker.image('s444452/ium:1.3').inside {
stage('Preparation') {
properties([
pipelineTriggers([upstream(threshold: hudson.model.Result.SUCCESS, upstreamProjects: "s444452-training")]),
parameters([
string(
defaultValue: ".,14000,100",
description: 'Test params: data_path,num_words,pad_length',
name: 'TEST_PARAMS'
)
])
])
}
stage('Copy artifacts') {
copyArtifacts filter: 'train_data.csv', fingerprintArtifacts: true, projectName: 's444452-create-dataset'
copyArtifacts filter: 'test_data.csv', fingerprintArtifacts: true, projectName: 's444452-create-dataset'
}
stage('Run script') {
withEnv(["TEST_PARAMS=${params.TEST_PARAMS}"]) {
sh "python3 Scripts/evaluate_neural_network.py $TEST_PARAMS"
}
}
stage('Archive artifacts') {
archiveArtifacts "neural_network_evaluation.txt"
}
}
} catch (e) {
currentBuild.result = "FAILED"
throw e
} finally {
notifyBuild(currentBuild.result)
}
}
def notifyBuild(String buildStatus = 'STARTED') {
buildStatus = buildStatus ?: 'SUCCESS'
def subject = "Job: ${env.JOB_NAME}"
def details = "Build nr: ${env.BUILD_NUMBER}, status: ${buildStatus} \n url: ${env.BUILD_URL} \n build params: ${params.TRAIN_PARAMS}"
emailext (
subject: subject,
body: details,
to: 'e19191c5.uam.onmicrosoft.com@emea.teams.ms'
)
}

View File

@ -0,0 +1,88 @@
#!/usr/bin/python
import datetime
import glob
import os
import pprint
import sys
import pandas as pd
from keras.models import Sequential, load_model
from keras import layers
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from sklearn.metrics import precision_score, recall_score, f1_score, accuracy_score
import logging
logging.getLogger("tensorflow").setLevel(logging.ERROR)
data_path = ''
num_words = 0
pad_length = 0
def tokenize(x, x_test):
global pad_length, num_words
tokenizer = Tokenizer(num_words=num_words)
tokenizer.fit_on_texts(x)
test_x = tokenizer.texts_to_sequences(x_test)
vocabulary_length = len(tokenizer.word_index) + 1
test_x = pad_sequences(test_x, padding='post', maxlen=pad_length)
return test_x, vocabulary_length
def evaluate_and_save(model, x, y, abs_path):
loss, accuracy = model.evaluate(x, y, verbose=False)
y_predicted = (model.predict(x) >= 0.5).astype(int)
if os.path.exists(os.path.join(abs_path, 'neural_network_evaluation.txt')):
with open(os.path.join(abs_path, 'neural_network_evaluation.txt'), "a") as log_file:
for obj in (
('Accuracy: ', accuracy), ('Loss: ', loss), ('Precision: ', precision_score(y, y_predicted)),
('Recall: ', recall_score(y, y_predicted)), ('F1: ', f1_score(y, y_predicted)),
('Accuracy: ', accuracy_score(y, y_predicted))):
log_file.write(str(obj) + '\n')
else:
with open(os.path.join(abs_path, 'neural_network_evaluation.txt'), "w") as log_file:
for obj in (
('Accuracy: ', accuracy), ('Loss: ', loss), ('Precision: ', precision_score(y, y_predicted)),
('Recall: ', recall_score(y, y_predicted)), ('F1: ', f1_score(y, y_predicted)),
('Accuracy: ', accuracy_score(y, y_predicted))):
log_file.write(str(obj) + '\n')
def load_trained_model(abs_path):
model_name = glob.glob('neural_net_*')[0]
return load_model(os.path.join(abs_path, model_name))
def split_data(data):
x = data['tokens']
y = data['fraudulent']
return x, y
def load_data(data_path, filename) -> pd.DataFrame:
return pd.read_csv(os.path.join(data_path, filename))
def read_params():
global data_path, num_words, pad_length
data_path, num_words, pad_length = sys.argv[1].split(',')
num_words = int(num_words)
pad_length = int(pad_length)
def main():
read_params()
global data_path
abs_data_path = os.path.abspath(data_path)
train_data = load_data(abs_data_path, 'train_data.csv')
test_data = load_data(abs_data_path, 'test_data.csv')
x_train, _ = split_data(train_data)
x_test, y_test = split_data(test_data)
x_test, _ = tokenize(pd.concat([x_train, x_test]), x_test)
model = load_trained_model(abs_data_path)
evaluate_and_save(model, x_test, y_test, abs_data_path)
if __name__ == '__main__':
main()