ium_487197/ium_sacred.py

116 lines
3.9 KiB
Python
Raw Normal View History

2023-05-12 03:20:45 +02:00
from keras.models import Sequential, load_model
from keras.layers import Dense
from keras.optimizers import Adam
import pandas as pd
import tensorflow as tf
import numpy as np
from sklearn.preprocessing import LabelEncoder
import argparse
import shutil
from sacred.observers import FileStorageObserver, MongoObserver
from sacred import Experiment
from sklearn import metrics
import math
ex = Experiment('s487197-train', save_git_info=False,interactive=True)
2023-05-12 04:09:11 +02:00
2023-05-12 04:11:40 +02:00
ex.observers.append(FileStorageObserver('sacred_results'))
ex.observers.append(MongoObserver(url='mongodb://admin:IUM_2021@172.17.0.1:27017', db_name='sacred'))
2023-05-12 03:20:45 +02:00
#ex.observers.append(MongoObserver(url='mongodb://admin:IUM_2021@172.17.0.1:27017', db_name='sacred'))
def write_list(names):
with open('listfile.txt', 'w') as fp:
fp.write("\n".join(str(item) for item in names))
def get_x_y(data):
lb = LabelEncoder()
data = data.drop(["Location 1"], axis=1)
data = data.drop(
columns=["Longitude", "Latitude", "Location", "Total Incidents", "CrimeTime", "Neighborhood", "Post",
"CrimeDate", "Inside/Outside"], axis=1)
for column_name in data.columns:
data[column_name] = lb.fit_transform(data[column_name])
x = data.drop('Weapon', axis=1)
y = data['Weapon']
return data, x, y
@ex.config
def my_config():
2023-05-12 03:37:36 +02:00
parser = argparse.ArgumentParser(description='Train')
2023-05-12 03:20:45 +02:00
2023-05-12 03:37:36 +02:00
parser.add_argument('-epochs', type=int, default=20)
parser.add_argument('-lr', type=float, default=0.01)
parser.add_argument('-validation_split', type=float, default=0.2)
args = parser.parse_args()
epochs = args.epochs
lr = args.lr
validation_split = args.validation_split
2023-05-12 03:20:45 +02:00
2023-05-12 04:46:05 +02:00
2023-05-12 03:37:36 +02:00
@ex.capture
def prepare_message(epochs, lr, validation_split):
return "{0} {1} {2}!".format(epochs, lr, validation_split)
2023-05-12 03:20:45 +02:00
2023-05-12 04:46:05 +02:00
@ex.main
2023-05-12 03:37:36 +02:00
def my_main(epochs, lr, validation_split, _run):
train = pd.read_csv('baltimore_train.csv')
2023-05-12 03:20:45 +02:00
2023-05-12 03:37:36 +02:00
data_train, x_train, y_train = get_x_y(train)
normalizer = tf.keras.layers.Normalization(axis=1)
normalizer.adapt(np.array(x_train))
model = Sequential(normalizer)
model.add(Dense(64, activation="relu"))
model.add(Dense(10, activation='relu'))
model.add(Dense(10, activation='relu'))
model.add(Dense(10, activation='relu'))
model.add(Dense(5, activation="softmax"))
model.compile(Adam(learning_rate=lr), loss='sparse_categorical_crossentropy', metrics=['accuracy'])
model.summary()
history = model.fit(
x_train,
y_train,
epochs=epochs,
validation_split=validation_split)
hist = pd.DataFrame(history.history)
hist['epoch'] = history.epoch
2023-05-12 04:56:32 +02:00
for his in hist.iterrows():
ex.log_scalar('training.loss', his[1]['loss'])
ex.log_scalar('accuracy', his[1]['accuracy'])
2023-05-12 04:28:14 +02:00
model.save('baltimore_model')
2023-05-12 03:48:36 +02:00
ex.add_artifact('baltimore_model')
2023-05-12 04:11:40 +02:00
2023-05-12 03:48:36 +02:00
"""
2023-05-12 03:20:45 +02:00
baltimore_data_test =pd.read_csv('baltimore_test.csv')
baltimore_data_test.columns = train.columns
baltimore_data_test, x_test, y_test = get_x_y(baltimore_data_test)
scores = model.evaluate(x_test, y_test)
print("\n%s: %.2f%%" % (model.metrics_names[1], scores[1] * 100))
y_predicted = model.predict(x_test)
y_predicted = np.argmax(y_predicted, axis=1)
test_results = {}
test_results['Weapon'] = model.evaluate(
x_test,
y_test, verbose=0)
write_list(y_predicted)
print('Accuracy : ', scores[1] * 100)
print('Mean Absolute Error : ', metrics.mean_absolute_error(y_test, y_predicted))
print('Root Mean Squared Error : ', math.sqrt(metrics.mean_squared_error(y_test, y_predicted)))
data = {
'mse': metrics.mean_squared_error(y_test, y_predicted),
'rmse': math.sqrt(metrics.mean_squared_error(y_test, y_predicted)),
'accuracy': scores[1] * 100
}
2023-05-12 03:37:36 +02:00
_run.log_scalar('accuracy', data['accuracy'])
_run.log_scalar('rmse', data['rmse'])
_run.log_scalar('accuracy', data['accuracy'])
2023-05-12 03:48:36 +02:00
"""
2023-05-12 04:46:05 +02:00
ex.run()
2023-05-12 03:20:45 +02:00