91 lines
3.0 KiB
Python
91 lines
3.0 KiB
Python
|
#!/usr/bin/python
|
||
|
import os
|
||
|
import pprint
|
||
|
import sys
|
||
|
import pandas as pd
|
||
|
from keras.models import Sequential, load_model
|
||
|
from keras import layers
|
||
|
from keras.preprocessing.text import Tokenizer
|
||
|
from keras.preprocessing.sequence import pad_sequences
|
||
|
from sklearn.metrics import precision_score, recall_score, f1_score, accuracy_score
|
||
|
import logging
|
||
|
|
||
|
logging.getLogger("tensorflow").setLevel(logging.ERROR)
|
||
|
|
||
|
|
||
|
def tokenize(x, x_train, x_test, max_len):
|
||
|
tokenizer = Tokenizer(num_words=14000)
|
||
|
tokenizer.fit_on_texts(x)
|
||
|
train_x = tokenizer.texts_to_sequences(x_train)
|
||
|
test_x = tokenizer.texts_to_sequences(x_test)
|
||
|
vocabulary_length = len(tokenizer.word_index) + 1
|
||
|
|
||
|
train_x = pad_sequences(train_x, padding='post', maxlen=max_len)
|
||
|
test_x = pad_sequences(test_x, padding='post', maxlen=max_len)
|
||
|
return train_x, test_x, vocabulary_length
|
||
|
|
||
|
|
||
|
def evaluate_and_save(model, x, y, abs_path):
|
||
|
loss, accuracy = model.evaluate(x, y, verbose=False)
|
||
|
y_predicted = (model.predict(x) >= 0.5).astype(int)
|
||
|
with open(os.path.join(abs_path, 'neural_network_evaluation.txt'), "w") as log_file:
|
||
|
for obj in (
|
||
|
('Accuracy: ', accuracy), ('Loss: ', loss), ('Precision: ', precision_score(y, y_predicted)),
|
||
|
('Recall: ', recall_score(y, y_predicted)), ('F1: ', f1_score(y, y_predicted)),
|
||
|
('Accuracy: ', accuracy_score(y, y_predicted))):
|
||
|
pprint.pprint(obj, log_file)
|
||
|
|
||
|
|
||
|
def load_trained_model(abs_path, model_name):
|
||
|
return load_model(os.path.join(abs_path, model_name))
|
||
|
|
||
|
|
||
|
def save_model(model, abs_path, model_name):
|
||
|
model.save(os.path.join(abs_path, model_name))
|
||
|
|
||
|
|
||
|
def train_model(model, x_train, y_train):
|
||
|
model.fit(x_train, y_train, epochs=1, verbose=False, batch_size=50)
|
||
|
|
||
|
|
||
|
def get_model(output_dim, vocabulary_length):
|
||
|
model = Sequential()
|
||
|
model.add(layers.Embedding(input_dim=vocabulary_length,
|
||
|
output_dim=output_dim,
|
||
|
input_length=100))
|
||
|
model.add(layers.Flatten())
|
||
|
model.add(layers.Dense(10, activation='relu'))
|
||
|
model.add(layers.Dense(1, activation='sigmoid'))
|
||
|
model.compile(optimizer='adam',
|
||
|
loss='binary_crossentropy',
|
||
|
metrics=['accuracy'])
|
||
|
return model
|
||
|
|
||
|
|
||
|
def split_data(data):
|
||
|
x = data['tokens']
|
||
|
y = data['fraudulent']
|
||
|
return x, y
|
||
|
|
||
|
|
||
|
def load_data(data_path, filename) -> pd.DataFrame:
|
||
|
return pd.read_csv(os.path.join(data_path, filename))
|
||
|
|
||
|
|
||
|
def main():
|
||
|
data_path = sys.argv[1]
|
||
|
abs_data_path = os.path.abspath(data_path)
|
||
|
train_data = load_data(abs_data_path, 'train_data.csv')
|
||
|
test_data = load_data(abs_data_path, 'test_data.csv')
|
||
|
x_train, y_train = split_data(train_data)
|
||
|
x_test, y_test = split_data(test_data)
|
||
|
x_train, x_test, vocab_size = tokenize(pd.concat([x_train, x_test]), x_train, x_test, 100)
|
||
|
model = get_model(50, vocab_size)
|
||
|
train_model(model, x_train, y_train)
|
||
|
# save_model(model, abs_data_path, 'neural_network')
|
||
|
evaluate_and_save(model, x_test, y_test, abs_data_path)
|
||
|
|
||
|
|
||
|
if __name__ == '__main__':
|
||
|
main()
|