101 lines
2.5 KiB
Python
101 lines
2.5 KiB
Python
import os
|
|
|
|
os.environ["TF_ENABLE_ONEDNN_OPTS"] = "0"
|
|
|
|
from keras.models import Sequential
|
|
from keras.layers import BatchNormalization, Dropout, Dense, Flatten, Conv1D
|
|
from keras.optimizers import Adam
|
|
import pandas as pd
|
|
from sklearn.metrics import confusion_matrix
|
|
from sacred import Experiment
|
|
from sacred.observers import FileStorageObserver, MongoObserver
|
|
|
|
ex = Experiment("464913")
|
|
|
|
ex.observers.append(
|
|
MongoObserver.create(
|
|
url="mongodb://admin:IUM_2021@tzietkiewicz.vm.wmi.amu.edu.pl:27017",
|
|
db_name="sacred",
|
|
)
|
|
)
|
|
ex.observers.append(FileStorageObserver("experiments"))
|
|
|
|
|
|
@ex.config
|
|
def my_config():
|
|
learning_rate = 0.001
|
|
epochs = 5
|
|
|
|
|
|
@ex.capture
|
|
def train_and_evaluate(_run, learning_rate, epochs):
|
|
|
|
X_train = _run.open_resource("data/X_train.csv")
|
|
X_val = _run.open_resource("data/X_val.csv")
|
|
y_train = _run.open_resource("data/y_train.csv")
|
|
y_val = _run.open_resource("data/y_val.csv")
|
|
|
|
X_train = pd.read_csv(X_train)
|
|
X_val = pd.read_csv(X_val)
|
|
y_train = pd.read_csv(y_train)
|
|
y_val = pd.read_csv(y_val)
|
|
|
|
X_train = X_train.to_numpy()
|
|
X_val = X_val.to_numpy()
|
|
y_train = y_train.to_numpy()
|
|
y_val = y_val.to_numpy()
|
|
|
|
X_train = X_train.reshape(X_train.shape[0], X_train.shape[1], 1)
|
|
X_val = X_val.reshape(X_val.shape[0], X_val.shape[1], 1)
|
|
|
|
model = Sequential(
|
|
[
|
|
Conv1D(32, 2, activation="relu", input_shape=X_train[0].shape),
|
|
BatchNormalization(),
|
|
Dropout(0.2),
|
|
Conv1D(64, 2, activation="relu"),
|
|
BatchNormalization(),
|
|
Dropout(0.5),
|
|
Flatten(),
|
|
Dense(64, activation="relu"),
|
|
Dropout(0.5),
|
|
Dense(1, activation="sigmoid"),
|
|
]
|
|
)
|
|
|
|
model.compile(
|
|
optimizer=Adam(learning_rate=learning_rate),
|
|
loss="binary_crossentropy",
|
|
metrics=["accuracy"],
|
|
)
|
|
|
|
model.fit(
|
|
X_train,
|
|
y_train,
|
|
validation_data=(X_val, y_val),
|
|
epochs=epochs,
|
|
verbose=1,
|
|
)
|
|
|
|
model.save("sacred/model.keras")
|
|
_run.add_artifact("sacred/model.keras")
|
|
|
|
X_test = _run.open_resource("data/X_test.csv")
|
|
y_test = _run.open_resource("data/y_test.csv")
|
|
|
|
X_test = pd.read_csv(X_test)
|
|
y_test = pd.read_csv(y_test)
|
|
|
|
y_pred = model.predict(X_test)
|
|
y_pred = y_pred >= 0.5
|
|
|
|
cm = confusion_matrix(y_test, y_pred)
|
|
accuracy = cm[1, 1] / (cm[1, 0] + cm[1, 1])
|
|
|
|
_run.log_scalar("accuracy", accuracy)
|
|
|
|
|
|
@ex.automain
|
|
def main(learning_rate, epochs):
|
|
train_and_evaluate()
|