#!/usr/bin/python import datetime import os import sys import pandas as pd from keras.models import Sequential from keras import layers from keras.preprocessing.text import Tokenizer from keras.preprocessing.sequence import pad_sequences import logging logging.getLogger("tensorflow").setLevel(logging.ERROR) data_path = '' num_words = 0 epochs = 0 batch_size = 0 pad_length = 0 def tokenize(x, x_train): global pad_length, num_words tokenizer = Tokenizer(num_words=num_words) tokenizer.fit_on_texts(x) train_x = tokenizer.texts_to_sequences(x_train) vocabulary_length = len(tokenizer.word_index) + 1 train_x = pad_sequences(train_x, padding='post', maxlen=pad_length) return train_x, vocabulary_length def save_model(model): model_name = 'neural_net_' + datetime.datetime.today().strftime('%d-%b-%Y-%H:%M:%S') model.save(os.path.join(os.getcwd(), 'model', model_name), save_format='h5', overwrite=True) def train_model(model, x_train, y_train): global epochs, batch_size model.fit(x_train, y_train, epochs=epochs, verbose=False, batch_size=batch_size) def get_model(vocabulary_length): global pad_length, batch_size model = Sequential() model.add(layers.Embedding(input_dim=vocabulary_length, output_dim=batch_size, input_length=pad_length)) model.add(layers.Flatten()) model.add(layers.Dense(10, activation='relu')) model.add(layers.Dense(1, activation='sigmoid')) model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) return model def split_data(data): x = data['tokens'] y = data['fraudulent'] return x, y def load_data(data_path, filename) -> pd.DataFrame: return pd.read_csv(os.path.join(data_path, filename)) def read_params(): global data_path, num_words, epochs, batch_size, pad_length data_path, num_words, epochs, batch_size, pad_length = sys.argv[1].split(',') num_words = int(num_words) epochs = int(epochs) batch_size = int(batch_size) pad_length = int(pad_length) def main(): read_params() global data_path abs_data_path = os.path.abspath(data_path) train_data = load_data(abs_data_path, 'train_data.csv') test_data = load_data(abs_data_path, 'test_data.csv') x_train, y_train = split_data(train_data) x_test, _ = split_data(test_data) x_train, vocab_size = tokenize(pd.concat([x_train, x_test]), x_train) model = get_model(vocab_size) train_model(model, x_train, y_train) save_model(model) if __name__ == '__main__': main()