ium_444452/Scripts/evaluate_neural_network.py
AdamOsiowy123 c5565d64fd
Some checks failed
s444452-training/pipeline/head This commit looks good
s444452-evaluation/pipeline/head There was a failure building this commit
fix
2022-05-04 20:41:25 +02:00

108 lines
3.6 KiB
Python

#!/usr/bin/python
import glob
import os
import sys
import pandas as pd
from keras.models import load_model
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from sklearn.metrics import precision_score, recall_score, f1_score
import logging
import matplotlib.pyplot as plt
logging.getLogger("tensorflow").setLevel(logging.ERROR)
build_number = ''
data_path = ''
num_words = 0
epochs = 0
batch_size = 0
pad_length = 0
def tokenize(x, x_test):
global pad_length, num_words
tokenizer = Tokenizer(num_words=num_words)
tokenizer.fit_on_texts(x)
test_x = tokenizer.texts_to_sequences(x_test)
vocabulary_length = len(tokenizer.word_index) + 1
test_x = pad_sequences(test_x, padding='post', maxlen=pad_length)
return test_x, vocabulary_length
def evaluate_and_save(model, x, y, abs_path):
global build_number
loss, accuracy = model.evaluate(x, y, verbose=False)
y_predicted = (model.predict(x) >= 0.5).astype(int)
evaluation_file_path = os.path.join(abs_path, 'neural_network_evaluation.csv')
with open(evaluation_file_path, 'a+') as f:
result = f'{build_number},{accuracy},{loss},{precision_score(y, y_predicted)},{recall_score(y, y_predicted)},{f1_score(y, y_predicted)}'
f.write(result + '\n')
def generate_and_save_comparison(abs_path):
evaluation_file_path = os.path.join(abs_path, 'neural_network_evaluation.csv')
df = pd.read_csv(evaluation_file_path, sep=',', header=None,
names=['build_number', 'Accuracy', 'Loss', 'Precision', 'Recall', 'F1'])
df = df[df.build_number != 0]
fig = plt.figure(figsize=(16 * .6, 9 * .6))
ax = fig.add_subplot(111)
ax.set_title('Evaluation')
X = df['build_number']
ax.set_xlabel('build_number')
ax.set_xticks(df['build_number'])
for metrics, color in zip(['Accuracy', 'Loss', 'Precision', 'Recall', 'F1'],
['green', 'red', 'blue', 'brown', 'magenta']):
ax.plot(X, df[metrics], color=color, lw=1, label=f'{metrics}')
ax.legend()
plt.savefig(os.path.join(abs_path, 'evaluation.png'), format='png')
return ax
def load_trained_model():
# glob_pattern = os.path.join(os.getcwd(), 'model', 'neural_net_*')
glob_pattern = os.path.join(os.getcwd(), 'model', 'neural_net')
models = glob.glob(glob_pattern)
models = [os.path.split(x)[1] for x in models]
# model_name = sorted(models, key=lambda x: datetime.datetime.strptime(x[11:], '%d-%b-%Y-%H:%M:%S'),
# reverse=True)[0]
return load_model(os.path.join(os.getcwd(), 'model', models[0]))
def split_data(data):
x = data['tokens']
y = data['fraudulent']
return x, y
def load_data(data_path, filename) -> pd.DataFrame:
return pd.read_csv(os.path.join(data_path, filename))
def read_params():
global build_number, data_path, num_words, epochs, batch_size, pad_length
build_number = sys.argv[1]
data_path, num_words, epochs, batch_size, pad_length = sys.argv[2].split(',')
num_words = int(num_words)
epochs = int(epochs)
batch_size = int(batch_size)
pad_length = int(pad_length)
def main():
read_params()
global data_path
abs_data_path = os.path.abspath(data_path)
train_data = load_data(abs_data_path, 'train_data.csv')
test_data = load_data(abs_data_path, 'test_data.csv')
x_train, _ = split_data(train_data)
x_test, y_test = split_data(test_data)
x_test, _ = tokenize(pd.concat([x_train, x_test]), x_test)
model = load_trained_model()
evaluate_and_save(model, x_test, y_test, abs_data_path)
generate_and_save_comparison(abs_data_path)
if __name__ == '__main__':
main()