AdamOsiowy123
ac78319450
Some checks failed
s444452-training/pipeline/head There was a failure building this commit
143 lines
5.2 KiB
Python
143 lines
5.2 KiB
Python
#!/usr/bin/python
|
|
import glob
|
|
import os
|
|
import sys
|
|
import pandas as pd
|
|
from keras.models import load_model
|
|
from keras.preprocessing.text import Tokenizer
|
|
from keras.preprocessing.sequence import pad_sequences
|
|
from sklearn.metrics import precision_score, recall_score, f1_score
|
|
import matplotlib.pyplot as plt
|
|
from sacred.observers import MongoObserver
|
|
from sacred.observers import FileStorageObserver
|
|
from sacred import Experiment
|
|
import mlflow
|
|
import logging
|
|
|
|
logging.basicConfig(level=logging.WARN)
|
|
logger = logging.getLogger(__name__)
|
|
|
|
# mlflow.set_tracking_uri("http://172.17.0.1:5000")
|
|
mlflow.set_experiment("s444452")
|
|
|
|
ex = Experiment(name='s444452_fake_job_classification_evaluation', save_git_info=False)
|
|
ex.observers.append(MongoObserver(url='mongodb://admin:IUM_2021@172.17.0.1:27017',
|
|
db_name='sacred'))
|
|
ex.observers.append(FileStorageObserver('my_runs'))
|
|
|
|
build_number = sys.argv[1]
|
|
data_path = sys.argv[2]
|
|
epochs = int(sys.argv[3])
|
|
num_words = int(sys.argv[4])
|
|
batch_size = int(sys.argv[5])
|
|
pad_length = int(sys.argv[6])
|
|
|
|
|
|
@ex.config
|
|
def config():
|
|
build_number = build_number
|
|
data_path = data_path
|
|
epochs = epochs
|
|
num_words = num_words
|
|
batch_size = batch_size
|
|
pad_length = pad_length
|
|
|
|
|
|
@ex.capture
|
|
def tokenize(x, x_test, pad_length, num_words):
|
|
tokenizer = Tokenizer(num_words=num_words)
|
|
tokenizer.fit_on_texts(x)
|
|
test_x = tokenizer.texts_to_sequences(x_test)
|
|
vocabulary_length = len(tokenizer.word_index) + 1
|
|
test_x = pad_sequences(test_x, padding='post', maxlen=pad_length)
|
|
return test_x, vocabulary_length
|
|
|
|
|
|
@ex.capture
|
|
def evaluate_and_save(model, x, y, abs_path, build_number, _run):
|
|
loss, accuracy = model.evaluate(x, y, verbose=False)
|
|
y_predicted = (model.predict(x) >= 0.5).astype(int)
|
|
evaluation_file_path = os.path.join(abs_path, 'neural_network_evaluation.csv')
|
|
with open(evaluation_file_path, 'a+') as f:
|
|
precision = precision_score(y, y_predicted)
|
|
recall = recall_score(y, y_predicted)
|
|
f1 = f1_score(y, y_predicted)
|
|
result = f'{build_number},{accuracy},{loss},{precision},{recall},{f1}'
|
|
f.write(result + '\n')
|
|
mlflow.log_metric("accuracy", accuracy)
|
|
mlflow.log_metric("loss", loss)
|
|
mlflow.log_metric("precision", precision)
|
|
mlflow.log_metric("recall", recall)
|
|
mlflow.log_metric("f1_score", f1)
|
|
# ex.log_scalar("loss", loss)
|
|
_run.log_scalar("training.loss", loss)
|
|
# ex.log_scalar("accuracy", accuracy)
|
|
_run.log_scalar("training.accuracy", accuracy)
|
|
ex.add_artifact(evaluation_file_path)
|
|
|
|
|
|
def generate_and_save_comparison(abs_path):
|
|
evaluation_file_path = os.path.join(abs_path, 'neural_network_evaluation.csv')
|
|
df = pd.read_csv(evaluation_file_path, sep=',', header=None,
|
|
names=['build_number', 'Accuracy', 'Loss', 'Precision', 'Recall', 'F1'])
|
|
df = df[df.build_number != 0]
|
|
fig = plt.figure(figsize=(16 * .6, 9 * .6))
|
|
ax = fig.add_subplot(111)
|
|
ax.set_title('Evaluation')
|
|
X = df['build_number']
|
|
ax.set_xlabel('build_number')
|
|
ax.set_xticks(df['build_number'])
|
|
for metrics, color in zip(['Accuracy', 'Loss', 'Precision', 'Recall', 'F1'],
|
|
['green', 'red', 'blue', 'brown', 'magenta']):
|
|
ax.plot(X, df[metrics], color=color, lw=1, label=f'{metrics}')
|
|
ax.legend()
|
|
plt.savefig(os.path.join(abs_path, 'evaluation.png'), format='png')
|
|
ex.add_artifact(os.path.join(abs_path, 'evaluation.png'))
|
|
return ax
|
|
|
|
|
|
def load_trained_model():
|
|
# glob_pattern = os.path.join(os.getcwd(), 'model', 'neural_net_*')
|
|
glob_pattern = os.path.join(os.getcwd(), 'model', 'neural_net')
|
|
models = glob.glob(glob_pattern)
|
|
models = [os.path.split(x)[1] for x in models]
|
|
# model_name = sorted(models, key=lambda x: datetime.datetime.strptime(x[11:], '%d-%b-%Y-%H:%M:%S'),
|
|
# reverse=True)[0]
|
|
return load_model(os.path.join(os.getcwd(), 'model', models[0]))
|
|
|
|
|
|
def split_data(data):
|
|
x = data['tokens']
|
|
y = data['fraudulent']
|
|
return x, y
|
|
|
|
|
|
def load_data(data_path, filename) -> pd.DataFrame:
|
|
return pd.read_csv(os.path.join(data_path, filename))
|
|
|
|
|
|
@ex.main
|
|
def main(build_number, data_path, num_words, epochs, batch_size, pad_length, _run):
|
|
with mlflow.start_run() as mlflow_run:
|
|
print("MLflow run experiment_id: {0}".format(mlflow_run.info.experiment_id))
|
|
print("MLflow run artifact_uri: {0}".format(mlflow_run.info.artifact_uri))
|
|
mlflow.log_param("build_number", build_number)
|
|
mlflow.log_param("data_path", data_path)
|
|
mlflow.log_param("num_words", num_words)
|
|
mlflow.log_param("epochs", epochs)
|
|
mlflow.log_param("batch_size", batch_size)
|
|
mlflow.log_param("pad_length", pad_length)
|
|
|
|
abs_data_path = os.path.abspath(data_path)
|
|
train_data = load_data(abs_data_path, 'train_data.csv')
|
|
test_data = load_data(abs_data_path, 'test_data.csv')
|
|
x_train, _ = split_data(train_data)
|
|
x_test, y_test = split_data(test_data)
|
|
x_test, _ = tokenize(pd.concat([x_train, x_test]), x_test)
|
|
model = load_trained_model()
|
|
evaluate_and_save(model, x_test, y_test, abs_data_path)
|
|
generate_and_save_comparison(abs_data_path)
|
|
|
|
|
|
ex.run()
|