2022-05-03 00:04:19 +02:00
|
|
|
#!/usr/bin/python
|
|
|
|
import glob
|
|
|
|
import os
|
|
|
|
import sys
|
|
|
|
import pandas as pd
|
2022-05-04 18:08:52 +02:00
|
|
|
from keras.models import load_model
|
2022-05-03 00:04:19 +02:00
|
|
|
from keras.preprocessing.text import Tokenizer
|
|
|
|
from keras.preprocessing.sequence import pad_sequences
|
2022-05-04 18:08:52 +02:00
|
|
|
from sklearn.metrics import precision_score, recall_score, f1_score
|
|
|
|
import matplotlib.pyplot as plt
|
2022-05-08 13:30:09 +02:00
|
|
|
from sacred.observers import MongoObserver
|
|
|
|
from sacred.observers import FileStorageObserver
|
|
|
|
from sacred import Experiment
|
2022-05-15 22:20:18 +02:00
|
|
|
import mlflow
|
|
|
|
import logging
|
|
|
|
|
|
|
|
logging.basicConfig(level=logging.WARN)
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
2022-05-15 23:04:02 +02:00
|
|
|
# mlflow.set_tracking_uri("http://172.17.0.1:5000")
|
2022-05-15 22:20:18 +02:00
|
|
|
mlflow.set_experiment("s444452")
|
2022-05-08 13:30:09 +02:00
|
|
|
|
2022-05-09 08:33:50 +02:00
|
|
|
ex = Experiment(name='s444452_fake_job_classification_evaluation', save_git_info=False)
|
|
|
|
ex.observers.append(MongoObserver(url='mongodb://admin:IUM_2021@172.17.0.1:27017',
|
|
|
|
db_name='sacred'))
|
2022-05-08 13:30:09 +02:00
|
|
|
ex.observers.append(FileStorageObserver('my_runs'))
|
|
|
|
|
|
|
|
build_number = sys.argv[1]
|
|
|
|
data_path = sys.argv[2]
|
|
|
|
epochs = int(sys.argv[3])
|
|
|
|
num_words = int(sys.argv[4])
|
|
|
|
batch_size = int(sys.argv[5])
|
|
|
|
pad_length = int(sys.argv[6])
|
|
|
|
|
|
|
|
|
|
|
|
@ex.config
|
|
|
|
def config():
|
|
|
|
build_number = build_number
|
|
|
|
data_path = data_path
|
|
|
|
epochs = epochs
|
|
|
|
num_words = num_words
|
|
|
|
batch_size = batch_size
|
|
|
|
pad_length = pad_length
|
|
|
|
|
|
|
|
|
|
|
|
@ex.capture
|
|
|
|
def tokenize(x, x_test, pad_length, num_words):
|
2022-05-03 00:04:19 +02:00
|
|
|
tokenizer = Tokenizer(num_words=num_words)
|
|
|
|
tokenizer.fit_on_texts(x)
|
|
|
|
test_x = tokenizer.texts_to_sequences(x_test)
|
|
|
|
vocabulary_length = len(tokenizer.word_index) + 1
|
|
|
|
test_x = pad_sequences(test_x, padding='post', maxlen=pad_length)
|
|
|
|
return test_x, vocabulary_length
|
|
|
|
|
|
|
|
|
2022-05-08 13:30:09 +02:00
|
|
|
@ex.capture
|
2022-05-09 08:44:44 +02:00
|
|
|
def evaluate_and_save(model, x, y, abs_path, build_number, _run):
|
2022-05-03 00:04:19 +02:00
|
|
|
loss, accuracy = model.evaluate(x, y, verbose=False)
|
|
|
|
y_predicted = (model.predict(x) >= 0.5).astype(int)
|
2022-05-04 18:08:52 +02:00
|
|
|
evaluation_file_path = os.path.join(abs_path, 'neural_network_evaluation.csv')
|
|
|
|
with open(evaluation_file_path, 'a+') as f:
|
2022-05-15 22:20:18 +02:00
|
|
|
precision = precision_score(y, y_predicted)
|
|
|
|
recall = recall_score(y, y_predicted)
|
|
|
|
f1 = f1_score(y, y_predicted)
|
|
|
|
result = f'{build_number},{accuracy},{loss},{precision},{recall},{f1}'
|
2022-05-04 18:08:52 +02:00
|
|
|
f.write(result + '\n')
|
2022-05-15 22:20:18 +02:00
|
|
|
mlflow.log_metric("accuracy", accuracy)
|
|
|
|
mlflow.log_metric("loss", loss)
|
|
|
|
mlflow.log_metric("precision", precision)
|
|
|
|
mlflow.log_metric("recall", recall)
|
|
|
|
mlflow.log_metric("f1_score", f1)
|
2022-05-09 08:44:44 +02:00
|
|
|
# ex.log_scalar("loss", loss)
|
2022-05-09 09:02:11 +02:00
|
|
|
_run.log_scalar("training.loss", loss)
|
2022-05-09 08:44:44 +02:00
|
|
|
# ex.log_scalar("accuracy", accuracy)
|
2022-05-09 09:02:11 +02:00
|
|
|
_run.log_scalar("training.accuracy", accuracy)
|
2022-05-08 13:30:09 +02:00
|
|
|
ex.add_artifact(evaluation_file_path)
|
2022-05-04 18:08:52 +02:00
|
|
|
|
|
|
|
|
|
|
|
def generate_and_save_comparison(abs_path):
|
|
|
|
evaluation_file_path = os.path.join(abs_path, 'neural_network_evaluation.csv')
|
|
|
|
df = pd.read_csv(evaluation_file_path, sep=',', header=None,
|
|
|
|
names=['build_number', 'Accuracy', 'Loss', 'Precision', 'Recall', 'F1'])
|
2022-05-04 20:41:25 +02:00
|
|
|
df = df[df.build_number != 0]
|
2022-05-04 18:08:52 +02:00
|
|
|
fig = plt.figure(figsize=(16 * .6, 9 * .6))
|
|
|
|
ax = fig.add_subplot(111)
|
|
|
|
ax.set_title('Evaluation')
|
|
|
|
X = df['build_number']
|
|
|
|
ax.set_xlabel('build_number')
|
|
|
|
ax.set_xticks(df['build_number'])
|
|
|
|
for metrics, color in zip(['Accuracy', 'Loss', 'Precision', 'Recall', 'F1'],
|
|
|
|
['green', 'red', 'blue', 'brown', 'magenta']):
|
|
|
|
ax.plot(X, df[metrics], color=color, lw=1, label=f'{metrics}')
|
|
|
|
ax.legend()
|
|
|
|
plt.savefig(os.path.join(abs_path, 'evaluation.png'), format='png')
|
2022-05-08 13:30:09 +02:00
|
|
|
ex.add_artifact(os.path.join(abs_path, 'evaluation.png'))
|
2022-05-04 18:08:52 +02:00
|
|
|
return ax
|
|
|
|
|
|
|
|
|
|
|
|
def load_trained_model():
|
|
|
|
# glob_pattern = os.path.join(os.getcwd(), 'model', 'neural_net_*')
|
|
|
|
glob_pattern = os.path.join(os.getcwd(), 'model', 'neural_net')
|
|
|
|
models = glob.glob(glob_pattern)
|
|
|
|
models = [os.path.split(x)[1] for x in models]
|
|
|
|
# model_name = sorted(models, key=lambda x: datetime.datetime.strptime(x[11:], '%d-%b-%Y-%H:%M:%S'),
|
|
|
|
# reverse=True)[0]
|
|
|
|
return load_model(os.path.join(os.getcwd(), 'model', models[0]))
|
2022-05-03 00:04:19 +02:00
|
|
|
|
|
|
|
|
|
|
|
def split_data(data):
|
|
|
|
x = data['tokens']
|
|
|
|
y = data['fraudulent']
|
|
|
|
return x, y
|
|
|
|
|
|
|
|
|
|
|
|
def load_data(data_path, filename) -> pd.DataFrame:
|
|
|
|
return pd.read_csv(os.path.join(data_path, filename))
|
|
|
|
|
|
|
|
|
2022-05-08 13:30:09 +02:00
|
|
|
@ex.main
|
|
|
|
def main(build_number, data_path, num_words, epochs, batch_size, pad_length, _run):
|
2022-05-15 22:20:18 +02:00
|
|
|
with mlflow.start_run() as mlflow_run:
|
|
|
|
print("MLflow run experiment_id: {0}".format(mlflow_run.info.experiment_id))
|
|
|
|
print("MLflow run artifact_uri: {0}".format(mlflow_run.info.artifact_uri))
|
|
|
|
mlflow.log_param("build_number", build_number)
|
|
|
|
mlflow.log_param("data_path", data_path)
|
|
|
|
mlflow.log_param("num_words", num_words)
|
|
|
|
mlflow.log_param("epochs", epochs)
|
|
|
|
mlflow.log_param("batch_size", batch_size)
|
|
|
|
mlflow.log_param("pad_length", pad_length)
|
|
|
|
|
|
|
|
abs_data_path = os.path.abspath(data_path)
|
|
|
|
train_data = load_data(abs_data_path, 'train_data.csv')
|
|
|
|
test_data = load_data(abs_data_path, 'test_data.csv')
|
|
|
|
x_train, _ = split_data(train_data)
|
|
|
|
x_test, y_test = split_data(test_data)
|
|
|
|
x_test, _ = tokenize(pd.concat([x_train, x_test]), x_test)
|
|
|
|
model = load_trained_model()
|
|
|
|
evaluate_and_save(model, x_test, y_test, abs_data_path)
|
|
|
|
generate_and_save_comparison(abs_data_path)
|
2022-05-03 00:04:19 +02:00
|
|
|
|
|
|
|
|
2022-05-08 13:30:09 +02:00
|
|
|
ex.run()
|