#!/usr/bin/python import glob import os import sys import pandas as pd from keras.models import load_model from keras.preprocessing.text import Tokenizer from keras.preprocessing.sequence import pad_sequences from sklearn.metrics import precision_score, recall_score, f1_score import matplotlib.pyplot as plt from sacred.observers import MongoObserver from sacred.observers import FileStorageObserver from sacred import Experiment ex = Experiment(name='s444452_fake_job_classification_evaluation', save_git_info=False) ex.observers.append(MongoObserver(url='mongodb://admin:IUM_2021@172.17.0.1:27017', db_name='sacred')) ex.observers.append(FileStorageObserver('my_runs')) build_number = sys.argv[1] data_path = sys.argv[2] epochs = int(sys.argv[3]) num_words = int(sys.argv[4]) batch_size = int(sys.argv[5]) pad_length = int(sys.argv[6]) @ex.config def config(): build_number = build_number data_path = data_path epochs = epochs num_words = num_words batch_size = batch_size pad_length = pad_length @ex.capture def tokenize(x, x_test, pad_length, num_words): tokenizer = Tokenizer(num_words=num_words) tokenizer.fit_on_texts(x) test_x = tokenizer.texts_to_sequences(x_test) vocabulary_length = len(tokenizer.word_index) + 1 test_x = pad_sequences(test_x, padding='post', maxlen=pad_length) return test_x, vocabulary_length @ex.capture def evaluate_and_save(model, x, y, abs_path, build_number): loss, accuracy = model.evaluate(x, y, verbose=False) y_predicted = (model.predict(x) >= 0.5).astype(int) evaluation_file_path = os.path.join(abs_path, 'neural_network_evaluation.csv') with open(evaluation_file_path, 'a+') as f: result = f'{build_number},{accuracy},{loss},{precision_score(y, y_predicted)},{recall_score(y, y_predicted)},{f1_score(y, y_predicted)}' f.write(result + '\n') ex.log_scalar("loss", loss) ex.log_scalar("accuracy", accuracy) ex.add_artifact(evaluation_file_path) def generate_and_save_comparison(abs_path): evaluation_file_path = os.path.join(abs_path, 'neural_network_evaluation.csv') df = pd.read_csv(evaluation_file_path, sep=',', header=None, names=['build_number', 'Accuracy', 'Loss', 'Precision', 'Recall', 'F1']) df = df[df.build_number != 0] fig = plt.figure(figsize=(16 * .6, 9 * .6)) ax = fig.add_subplot(111) ax.set_title('Evaluation') X = df['build_number'] ax.set_xlabel('build_number') ax.set_xticks(df['build_number']) for metrics, color in zip(['Accuracy', 'Loss', 'Precision', 'Recall', 'F1'], ['green', 'red', 'blue', 'brown', 'magenta']): ax.plot(X, df[metrics], color=color, lw=1, label=f'{metrics}') ax.legend() plt.savefig(os.path.join(abs_path, 'evaluation.png'), format='png') ex.add_artifact(os.path.join(abs_path, 'evaluation.png')) return ax def load_trained_model(): # glob_pattern = os.path.join(os.getcwd(), 'model', 'neural_net_*') glob_pattern = os.path.join(os.getcwd(), 'model', 'neural_net') models = glob.glob(glob_pattern) models = [os.path.split(x)[1] for x in models] # model_name = sorted(models, key=lambda x: datetime.datetime.strptime(x[11:], '%d-%b-%Y-%H:%M:%S'), # reverse=True)[0] return load_model(os.path.join(os.getcwd(), 'model', models[0])) def split_data(data): x = data['tokens'] y = data['fraudulent'] return x, y def load_data(data_path, filename) -> pd.DataFrame: return pd.read_csv(os.path.join(data_path, filename)) @ex.main def main(build_number, data_path, num_words, epochs, batch_size, pad_length, _run): abs_data_path = os.path.abspath(data_path) train_data = load_data(abs_data_path, 'train_data.csv') test_data = load_data(abs_data_path, 'test_data.csv') x_train, _ = split_data(train_data) x_test, y_test = split_data(test_data) x_test, _ = tokenize(pd.concat([x_train, x_test]), x_test) model = load_trained_model() evaluate_and_save(model, x_test, y_test, abs_data_path) generate_and_save_comparison(abs_data_path) ex.run()