2023-05-11 20:25:07 +02:00
|
|
|
#!/usr/bin/python
|
|
|
|
|
|
|
|
import pandas as pd
|
|
|
|
import numpy as np
|
|
|
|
import zadanie1 as z
|
|
|
|
import torch
|
|
|
|
import torchvision
|
|
|
|
import torchvision.transforms as transforms
|
|
|
|
import torch.nn as nn
|
|
|
|
import torch.nn.functional as F
|
|
|
|
import torch.optim as optim
|
|
|
|
from sacred import Experiment
|
|
|
|
from sacred.observers import FileStorageObserver
|
|
|
|
from sacred.observers import MongoObserver
|
|
|
|
|
|
|
|
class Net(nn.Module):
|
|
|
|
def __init__(self):
|
|
|
|
super().__init__()
|
|
|
|
#self.conv1 = nn.Conv2d(3, 6, 5)
|
|
|
|
#self.pool = nn.MaxPool2d(2, 2)
|
|
|
|
#self.conv2 = nn.Conv2d(6, 16, 5)
|
|
|
|
#self.fc1 = nn.Linear(16 * 5 * 5, 120)
|
|
|
|
#self.fc2 = nn.Linear(20, 6)
|
|
|
|
self.fc3 = nn.Linear(6, 6)
|
|
|
|
|
|
|
|
def forward(self, x):
|
|
|
|
#x = self.pool(F.relu(self.conv1(x)))
|
|
|
|
#x = self.pool(F.relu(self.conv2(x)))
|
|
|
|
#x = torch.flatten(x, 1)
|
|
|
|
#x = F.relu(self.fc1(x))
|
|
|
|
#x = F.relu(self.fc2(x))
|
|
|
|
x = self.fc3(x)
|
|
|
|
return x
|
|
|
|
|
|
|
|
|
|
|
|
def trainNet(_run, trainloader, criterion, optimizer, net, epochs=20):
|
|
|
|
for epoch in range(epochs):
|
|
|
|
|
|
|
|
for i, data in enumerate(trainloader, 0):
|
|
|
|
inputs, labels = data
|
|
|
|
|
|
|
|
labelsX = torch.Tensor([x for x in labels])
|
|
|
|
labels = labelsX.type(torch.LongTensor)
|
|
|
|
|
|
|
|
optimizer.zero_grad()
|
|
|
|
|
|
|
|
outputs = net(inputs)
|
|
|
|
|
|
|
|
loss = criterion(outputs, labels)
|
|
|
|
|
|
|
|
_run.log_scalar("training.loss", loss)
|
|
|
|
loss.backward()
|
|
|
|
optimizer.step()
|
|
|
|
|
|
|
|
print('Finished Training')
|
|
|
|
|
|
|
|
ex = Experiment("s452627", interactive=True)
|
|
|
|
ex.observers.append(FileStorageObserver('my_runs'))
|
|
|
|
ex.observers.append(MongoObserver(url='mongodb://admin:IUM_2021@172.17.0.1:27017', db_name='sacred'))
|
|
|
|
|
|
|
|
@ex.config
|
|
|
|
def my_config():
|
|
|
|
epochs = 10
|
|
|
|
learning_rate = 0.001
|
|
|
|
|
2023-05-11 21:32:22 +02:00
|
|
|
@ex.automain
|
2023-05-11 20:25:07 +02:00
|
|
|
def my_main(epochs, learning_rate, _run):
|
|
|
|
|
|
|
|
ex.open_resource("Customers.csv", "r")
|
|
|
|
|
|
|
|
train, dev, test = z.prepareData()
|
|
|
|
|
|
|
|
batch_size = 4
|
|
|
|
|
|
|
|
trainlist = train.values.tolist()
|
|
|
|
testlist = test.values.tolist()
|
|
|
|
|
|
|
|
trainset = [[torch.Tensor(x[1:]), torch.Tensor([x[0]])] for x in trainlist]
|
|
|
|
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size,
|
|
|
|
shuffle=True, num_workers=2)
|
|
|
|
|
|
|
|
testset = [[torch.Tensor(x[1:]), torch.Tensor([x[0]])] for x in testlist]
|
|
|
|
testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size,
|
|
|
|
shuffle=False, num_workers=2)
|
|
|
|
|
|
|
|
classes = ('male', 'female')
|
|
|
|
|
|
|
|
net = Net()
|
|
|
|
|
|
|
|
criterion = nn.CrossEntropyLoss()
|
|
|
|
optimizer = optim.SGD(net.parameters(), lr=learning_rate, momentum=0.9)
|
|
|
|
|
|
|
|
trainNet(_run, trainloader, criterion, optimizer, net, int(float(epochs)))
|
|
|
|
|
|
|
|
PATH = './cifar_net.pth'
|
|
|
|
torch.save(net.state_dict(), PATH)
|
|
|
|
|
|
|
|
ex.add_artifact("cifar_net.pth")
|
|
|
|
|
2023-05-11 21:32:22 +02:00
|
|
|
#if __name__ == '__main__':
|
2023-05-11 20:25:07 +02:00
|
|
|
|
2023-05-11 21:32:22 +02:00
|
|
|
#ex.run()
|
2023-05-11 20:25:07 +02:00
|
|
|
|