98 lines
2.8 KiB
Python
98 lines
2.8 KiB
Python
import pandas as pd
|
|
import torch
|
|
import torch.nn as nn
|
|
|
|
from sklearn.preprocessing import MinMaxScaler
|
|
from torch.utils.data import TensorDataset
|
|
from torch.utils.data import DataLoader
|
|
|
|
from sacred import Experiment
|
|
from sacred.observers import FileStorageObserver
|
|
|
|
exint = Experiment("sacred_scopes", interactive=True)
|
|
exint.observers.append(FileStorageObserver('my_runs'))
|
|
|
|
@exint.config
|
|
def my_config():
|
|
batch_size = 64
|
|
learning_rate = 0.001
|
|
epochs = 100
|
|
|
|
|
|
@exint.capture
|
|
def prepare_message(msg):
|
|
return msg
|
|
|
|
|
|
@exint.main
|
|
def my_main(batch_size, learning_rate, epochs):
|
|
train_file = pd.read_csv('train.data').drop('Unnamed: 0', axis=1)
|
|
df_pandas = train_file.dropna()
|
|
|
|
X_train = df_pandas.drop('class', axis=1)
|
|
Y_train = df_pandas['class']
|
|
|
|
scaler = MinMaxScaler()
|
|
X_train = scaler.fit_transform(X_train)
|
|
|
|
x_tensor = torch.tensor(X_train).float()
|
|
y_tensor = torch.tensor(Y_train.values).float()
|
|
|
|
train_ds = TensorDataset(x_tensor, y_tensor.unsqueeze(1))
|
|
train_dl = DataLoader(train_ds, batch_size=batch_size)
|
|
|
|
class ClassificationModel(nn.Module):
|
|
def __init__(self, n_input_dim):
|
|
super(ClassificationModel, self).__init__()
|
|
self.layer_1 = nn.Linear(n_input_dim, 256)
|
|
self.layer_2 = nn.Linear(256, 128)
|
|
self.layer_out = nn.Linear(128, 1)
|
|
|
|
self.relu = nn.ReLU()
|
|
self.sigmoid = nn.Sigmoid()
|
|
self.dropout = nn.Dropout(p=0.1)
|
|
self.batchnorm1 = nn.BatchNorm1d(256)
|
|
self.batchnorm2 = nn.BatchNorm1d(128)
|
|
|
|
def forward(self, inputs):
|
|
x = self.relu(self.layer_1(inputs))
|
|
x = self.batchnorm1(x)
|
|
x = self.relu(self.layer_2(x))
|
|
x = self.batchnorm2(x)
|
|
x = self.dropout(x)
|
|
x = self.sigmoid(self.layer_out(x))
|
|
|
|
return x
|
|
|
|
|
|
model = ClassificationModel(X_train.shape[1])
|
|
print(model)
|
|
|
|
|
|
loss_func = nn.BCEWithLogitsLoss()
|
|
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
|
|
|
|
model.train()
|
|
train_loss = []
|
|
|
|
for epoch in range(epochs):
|
|
for xb, yb in train_dl:
|
|
y_pred = model(xb) # Forward Propagation
|
|
loss = loss_func(y_pred, yb) # Loss Computation
|
|
optimizer.zero_grad() # Clearing all previous gradients, setting to zero
|
|
loss.backward() # Back Propagation
|
|
optimizer.step() # Updating the parameters
|
|
|
|
if epoch % 10 == 0:
|
|
print(f"Loss in {epoch}. iteration: {loss.item()}")
|
|
train_loss.append(loss.item())
|
|
print('Last iteration loss value: '+str(loss.item()))
|
|
|
|
|
|
model_scripted = torch.jit.script(model) # Export to TorchScript
|
|
model_scripted.save('model_scripted.pt') # Save
|
|
|
|
|
|
exint.run()
|
|
|