diff --git a/__pycache__/train.cpython-38.pyc b/__pycache__/train.cpython-38.pyc new file mode 100644 index 0000000..e6316ba Binary files /dev/null and b/__pycache__/train.cpython-38.pyc differ diff --git a/__pycache__/zadanie1.cpython-38.pyc b/__pycache__/zadanie1.cpython-38.pyc new file mode 100644 index 0000000..453d620 Binary files /dev/null and b/__pycache__/zadanie1.cpython-38.pyc differ diff --git a/cifar_net.pth b/cifar_net.pth new file mode 100644 index 0000000..88caf03 Binary files /dev/null and b/cifar_net.pth differ diff --git a/test.py b/test.py new file mode 100644 index 0000000..1fc4aa7 --- /dev/null +++ b/test.py @@ -0,0 +1,83 @@ +import pandas as pd +import numpy as np +import zadanie1 as z +import train as tr +import torch +import torchvision +import torchvision.transforms as transforms +import torch.nn as nn +import torch.nn.functional as F +import torch.optim as optim + +class Net(nn.Module): + def __init__(self): + super().__init__() + #self.conv1 = nn.Conv2d(3, 6, 5) + #self.pool = nn.MaxPool2d(2, 2) + #self.conv2 = nn.Conv2d(6, 16, 5) + #self.fc1 = nn.Linear(16 * 5 * 5, 120) + #self.fc2 = nn.Linear(20, 6) + self.fc3 = nn.Linear(6, 6) + + def forward(self, x): + #x = self.pool(F.relu(self.conv1(x))) + #x = self.pool(F.relu(self.conv2(x))) + #x = torch.flatten(x, 1) + #x = F.relu(self.fc1(x)) + #x = F.relu(self.fc2(x)) + x = self.fc3(x) + return x + +testdata = [] + +def testNet(testloader): + + PATH = './cifar_net.pth' + net = Net() + net.load_state_dict(torch.load(PATH)) + + correct = 0 + total = 0 + with torch.no_grad(): + for data in testloader: + input, labels = data + + labelsX = torch.Tensor([x for x in labels]) + labels = labelsX.type(torch.LongTensor) + + outputs = net(input) + + _, predicted = torch.max(outputs.data, 1) + testdata.append([input, labels, predicted]) + total += labels.size(0) + correct += (predicted == labels).sum().item() + + print(f'Accuracy of the network: {100 * correct // total} %') + +if __name__ == '__main__': + + train, dev, test = z.prepareData() + + batch_size = 4 + + trainlist = train.values.tolist() + testlist = test.values.tolist() + + trainset = [[torch.Tensor(x[1:]), torch.Tensor([x[0]])] for x in trainlist] + trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, + shuffle=True, num_workers=2) + + testset = [[torch.Tensor(x[1:]), torch.Tensor([x[0]])] for x in testlist] + testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size, + shuffle=False, num_workers=2) + + classes = ('male', 'female') + + testNet(testloader) + + with open('testresults.txt', 'w') as the_file: + for item in testdata: + for i in range(len(item)): + the_file.write(f'data: {item[0][i]} \n true value: {item[1][i]} \n prediction: {item[2][i]}\n') + + \ No newline at end of file diff --git a/testresults.txt b/testresults.txt new file mode 100644 index 0000000..4b4976d --- /dev/null +++ b/testresults.txt @@ -0,0 +1,450 @@ +data: tensor([0.8081, 0.4909, 0.5100, 0.5556, 0.2353, 0.5000]) + true value: 0 + prediction: 1 +data: tensor([0.2020, 0.9983, 0.6000, 0.3333, 0.4706, 0.3750]) + true value: 0 + prediction: 1 +data: tensor([0.8990, 0.6046, 0.5300, 0.5556, 0.5882, 0.0000]) + true value: 1 + prediction: 1 +data: tensor([0.7576, 0.7115, 0.1200, 0.2222, 0.4118, 0.6250]) + true value: 1 + prediction: 1 +data: tensor([0.0505, 0.6646, 0.5500, 1.0000, 0.3529, 0.2500]) + true value: 1 + prediction: 1 +data: tensor([0.7980, 0.6478, 0.4100, 0.5556, 0.5882, 0.3750]) + true value: 1 + prediction: 1 +data: tensor([0.9293, 0.9043, 0.6500, 0.5556, 0.3529, 0.5000]) + true value: 0 + prediction: 1 +data: tensor([0.1515, 0.8229, 0.2700, 0.8889, 0.3529, 0.1250]) + true value: 1 + prediction: 1 +data: tensor([0.9495, 0.3447, 0.8400, 1.0000, 0.5882, 0.7500]) + true value: 0 + prediction: 1 +data: tensor([0.6869, 0.4822, 0.4300, 0.4444, 0.4706, 0.0000]) + true value: 1 + prediction: 1 +data: tensor([0.2929, 0.2790, 0.7300, 0.3333, 0.2941, 0.5000]) + true value: 1 + prediction: 1 +data: tensor([0.8788, 0.6090, 0.6100, 0.1111, 0.5294, 0.0000]) + true value: 0 + prediction: 1 +data: tensor([0.2929, 0.8699, 0.0700, 0.6667, 0.5294, 0.7500]) + true value: 1 + prediction: 1 +data: tensor([0.4343, 0.2654, 0.4900, 0.1111, 0.2941, 0.0000]) + true value: 0 + prediction: 1 +data: tensor([0.4343, 0.5450, 0.9000, 0.5556, 0.5882, 0.5000]) + true value: 0 + prediction: 1 +data: tensor([0.2323, 0.3998, 0.7000, 0.6667, 0.2941, 0.7500]) + true value: 0 + prediction: 1 +data: tensor([0.8788, 0.9168, 0.2800, 0.5556, 0.2353, 0.1250]) + true value: 0 + prediction: 1 +data: tensor([0.8182, 0.5540, 0.5400, 0.6667, 0.4706, 0.2500]) + true value: 1 + prediction: 1 +data: tensor([0.3232, 0.9750, 0.8300, 0.1111, 0.2353, 0.6250]) + true value: 1 + prediction: 1 +data: tensor([0.0707, 0.9986, 0.1800, 0.5556, 0.3529, 0.6250]) + true value: 0 + prediction: 1 +data: tensor([0.9192, 0.7089, 0.6600, 0.5556, 0.4118, 0.1250]) + true value: 1 + prediction: 1 +data: tensor([0.1010, 0.8139, 0.2100, 0.5556, 0.3529, 0.6250]) + true value: 1 + prediction: 1 +data: tensor([0.2020, 0.5989, 0.8800, 0.5556, 0.3529, 0.1250]) + true value: 1 + prediction: 1 +data: tensor([0.0505, 0.8447, 0.6100, 0.3333, 0.4706, 0.0000]) + true value: 0 + prediction: 1 +data: tensor([0.5455, 0.2959, 0.3400, 0.1111, 0.5882, 0.3750]) + true value: 0 + prediction: 1 +data: tensor([0.1212, 0.2929, 0.6300, 0.1111, 0.4706, 0.0000]) + true value: 0 + prediction: 1 +data: tensor([0.9091, 0.3290, 0.9900, 0.5556, 0.5882, 0.3750]) + true value: 1 + prediction: 1 +data: tensor([0.9192, 0.8242, 0.9600, 1.0000, 0.0000, 0.7500]) + true value: 1 + prediction: 1 +data: tensor([0.9091, 0.3211, 0.3500, 0.4444, 0.0588, 0.7500]) + true value: 1 + prediction: 1 +data: tensor([0.6061, 0.5712, 0.3600, 0.7778, 0.0000, 0.1250]) + true value: 1 + prediction: 1 +data: tensor([0.4343, 0.3891, 0.4700, 0.5556, 0.6471, 0.1250]) + true value: 1 + prediction: 1 +data: tensor([0.5253, 0.7838, 0.7700, 0.1111, 0.2353, 0.3750]) + true value: 0 + prediction: 1 +data: tensor([0.8182, 0.8033, 0.9100, 0.4444, 0.0000, 0.0000]) + true value: 0 + prediction: 1 +data: tensor([0.4343, 0.7040, 0.3700, 0.5556, 0.2941, 0.3750]) + true value: 1 + prediction: 1 +data: tensor([0.2020, 0.5960, 0.4100, 0.3333, 0.5294, 0.0000]) + true value: 1 + prediction: 1 +data: tensor([0.7172, 0.3169, 0.4600, 0.5556, 0.0588, 0.2500]) + true value: 1 + prediction: 1 +data: tensor([0.7576, 0.3232, 0.6500, 0.7778, 0.4118, 0.5000]) + true value: 1 + prediction: 1 +data: tensor([0.5253, 0.7014, 0.5500, 0.5556, 0.0588, 0.0000]) + true value: 1 + prediction: 1 +data: tensor([0.4040, 0.9856, 0.1400, 0.7778, 0.0000, 0.5000]) + true value: 1 + prediction: 1 +data: tensor([0.4646, 0.8728, 0.3800, 0.1111, 0.1765, 0.5000]) + true value: 1 + prediction: 1 +data: tensor([0.9899, 0.6589, 0.3000, 0.5556, 0.0000, 0.7500]) + true value: 0 + prediction: 1 +data: tensor([0.8485, 0.2990, 0.9600, 0.1111, 0.0588, 0.1250]) + true value: 1 + prediction: 1 +data: tensor([0.7879, 0.3721, 0.5400, 0.5556, 0.0588, 0.3750]) + true value: 1 + prediction: 1 +data: tensor([0.6465, 0.2896, 0.5300, 0.5556, 0.0588, 0.7500]) + true value: 0 + prediction: 1 +data: tensor([0.9899, 0.4630, 0.7900, 0.2222, 0.0000, 0.1250]) + true value: 1 + prediction: 1 +data: tensor([0.3232, 0.5500, 0.2800, 0.1111, 0.3529, 0.3750]) + true value: 0 + prediction: 1 +data: tensor([0.2222, 0.9709, 0.4400, 0.6667, 0.1765, 0.7500]) + true value: 0 + prediction: 1 +data: tensor([0.1717, 0.7937, 0.3500, 0.4444, 0.0588, 0.7500]) + true value: 1 + prediction: 1 +data: tensor([0.0909, 0.7996, 0.1800, 0.5556, 0.0000, 0.1250]) + true value: 0 + prediction: 1 +data: tensor([0.2424, 0.2716, 0.0400, 0.5556, 0.0000, 0.1250]) + true value: 0 + prediction: 1 +data: tensor([0.0808, 0.8281, 0.7500, 0.5556, 0.5294, 0.2500]) + true value: 1 + prediction: 1 +data: tensor([0.6263, 0.9799, 0.5400, 0.1111, 0.0588, 0.0000]) + true value: 0 + prediction: 1 +data: tensor([0.3737, 0.8916, 0.4500, 0.4444, 0.0000, 0.3750]) + true value: 1 + prediction: 1 +data: tensor([0.6869, 0.5801, 0.4700, 0.8889, 0.5294, 0.1250]) + true value: 0 + prediction: 1 +data: tensor([0.1515, 0.3077, 0.6700, 0.6667, 0.0000, 0.6250]) + true value: 1 + prediction: 1 +data: tensor([0.4444, 0.9142, 0.8900, 0.5556, 0.7059, 0.6250]) + true value: 1 + prediction: 1 +data: tensor([0.1818, 0.8579, 0.1600, 0.4444, 0.0000, 0.6250]) + true value: 0 + prediction: 1 +data: tensor([0.6970, 0.8881, 0.8000, 0.7778, 0.0588, 0.3750]) + true value: 1 + prediction: 1 +data: tensor([0.1616, 0.7906, 0.8300, 0.2222, 0.1176, 0.1250]) + true value: 0 + prediction: 1 +data: tensor([0.7172, 0.4833, 0.7400, 0.5556, 0.0588, 0.3750]) + true value: 1 + prediction: 1 +data: tensor([0.5758, 0.5406, 0.9400, 0.1111, 0.0000, 0.1250]) + true value: 0 + prediction: 1 +data: tensor([0.9798, 0.8254, 0.7700, 0.1111, 0.0000, 0.6250]) + true value: 0 + prediction: 1 +data: tensor([0.6869, 0.2709, 0.4800, 0.5556, 0.0000, 0.2500]) + true value: 0 + prediction: 1 +data: tensor([0.6364, 0.8193, 0.8100, 0.0000, 0.0588, 0.3750]) + true value: 1 + prediction: 1 +data: tensor([0.6869, 0.6842, 0.6100, 0.5556, 0.4706, 0.2500]) + true value: 0 + prediction: 1 +data: tensor([0.6970, 0.5483, 0.7700, 0.2222, 0.2353, 0.3750]) + true value: 1 + prediction: 1 +data: tensor([0.7374, 0.9498, 0.5000, 0.4444, 0.0588, 0.5000]) + true value: 1 + prediction: 1 +data: tensor([0.1919, 0.7087, 0.9600, 0.6667, 0.0588, 0.1250]) + true value: 1 + prediction: 1 +data: tensor([0.0101, 0.6848, 0.6700, 0.5556, 0.0000, 0.5000]) + true value: 1 + prediction: 1 +data: tensor([0.0606, 0.6611, 0.7900, 0.1111, 0.4706, 0.6250]) + true value: 0 + prediction: 1 +data: tensor([0.6869, 0.9109, 0.1700, 0.1111, 0.0000, 0.2500]) + true value: 1 + prediction: 1 +data: tensor([0.3939, 0.3227, 0.9100, 0.5556, 0.0000, 0.5000]) + true value: 1 + prediction: 1 +data: tensor([0.6566, 0.3806, 0.3600, 0.3333, 0.0588, 0.5000]) + true value: 1 + prediction: 1 +data: tensor([0.3030, 0.4468, 0.0900, 0.4444, 0.3529, 0.5000]) + true value: 1 + prediction: 1 +data: tensor([0.4646, 0.6644, 0.9500, 1.0000, 0.4706, 0.1250]) + true value: 0 + prediction: 1 +data: tensor([0.4040, 0.2876, 0.7000, 0.5556, 0.0000, 0.6250]) + true value: 1 + prediction: 1 +data: tensor([0.2020, 0.6938, 0.1600, 0.1111, 0.2941, 0.7500]) + true value: 1 + prediction: 1 +data: tensor([0.8889, 0.7290, 0.5900, 1.0000, 0.4706, 0.3750]) + true value: 0 + prediction: 1 +data: tensor([0.9495, 0.5918, 0.5400, 0.2222, 0.5294, 0.6250]) + true value: 0 + prediction: 1 +data: tensor([0.5253, 0.8635, 0.5300, 0.0000, 0.2941, 0.3750]) + true value: 1 + prediction: 1 +data: tensor([0.6869, 0.2819, 0.9700, 1.0000, 0.4118, 0.6250]) + true value: 1 + prediction: 1 +data: tensor([0.0404, 0.9492, 0.4100, 0.5556, 0.5294, 0.1250]) + true value: 0 + prediction: 1 +data: tensor([0.5960, 0.7847, 0.7800, 0.7778, 0.5294, 0.7500]) + true value: 0 + prediction: 1 +data: tensor([0.3232, 0.9707, 0.3800, 0.4444, 0.3529, 0.7500]) + true value: 0 + prediction: 1 +data: tensor([0.9293, 0.9201, 0.9400, 0.6667, 0.2353, 0.2500]) + true value: 1 + prediction: 1 +data: tensor([0.0909, 0.3741, 0.0200, 0.5556, 0.4706, 0.0000]) + true value: 0 + prediction: 1 +data: tensor([0.4141, 0.2992, 0.0400, 0.1111, 0.4706, 0.3750]) + true value: 1 + prediction: 1 +data: tensor([0.7879, 0.5942, 0.8000, 0.4444, 0.5294, 0.6250]) + true value: 1 + prediction: 1 +data: tensor([0.0000, 0.3849, 0.7400, 0.5556, 0.4118, 0.5000]) + true value: 1 + prediction: 1 +data: tensor([0.1111, 0.8041, 0.1700, 0.1111, 0.4706, 0.0000]) + true value: 1 + prediction: 1 +data: tensor([0.4646, 0.7263, 0.5000, 0.1111, 0.4118, 0.7500]) + true value: 0 + prediction: 1 +data: tensor([0.9596, 0.6674, 0.3400, 0.5556, 0.3529, 0.3750]) + true value: 1 + prediction: 1 +data: tensor([0.9798, 0.3954, 0.8900, 0.6667, 0.4706, 0.2500]) + true value: 1 + prediction: 1 +data: tensor([0.9495, 0.3667, 0.3000, 0.8889, 0.2353, 0.3750]) + true value: 1 + prediction: 1 +data: tensor([0.0000, 0.5576, 0.4600, 0.7778, 0.2353, 0.5000]) + true value: 1 + prediction: 1 +data: tensor([0.0303, 0.3927, 0.6900, 0.1111, 0.3529, 0.0000]) + true value: 0 + prediction: 1 +data: tensor([0.7071, 0.4048, 0.6400, 0.6667, 0.2941, 0.1250]) + true value: 0 + prediction: 1 +data: tensor([0.5859, 0.4611, 0.2500, 0.6667, 0.2941, 0.2500]) + true value: 0 + prediction: 1 +data: tensor([0.7273, 0.9597, 0.4600, 0.1111, 0.4706, 0.6250]) + true value: 0 + prediction: 1 +data: tensor([0.2323, 0.3153, 0.7900, 0.0000, 0.4118, 0.5000]) + true value: 1 + prediction: 1 +data: tensor([0.8788, 0.6746, 0.4200, 0.5556, 0.4118, 0.6250]) + true value: 1 + prediction: 1 +data: tensor([0.6465, 0.7507, 0.5800, 0.7778, 0.2353, 0.6250]) + true value: 0 + prediction: 1 +data: tensor([0.1414, 0.3580, 0.1700, 0.5556, 0.5294, 0.2500]) + true value: 1 + prediction: 1 +data: tensor([0.5455, 0.8748, 0.6500, 0.5556, 0.4118, 0.6250]) + true value: 0 + prediction: 1 +data: tensor([0.7677, 0.6977, 0.4400, 0.7778, 0.2941, 0.2500]) + true value: 0 + prediction: 1 +data: tensor([0.5253, 0.3340, 0.3800, 0.5556, 0.2941, 0.5000]) + true value: 1 + prediction: 1 +data: tensor([0.9697, 0.5818, 0.0600, 0.5556, 0.5294, 0.3750]) + true value: 0 + prediction: 1 +data: tensor([0.5556, 0.3512, 0.5000, 0.1111, 0.5882, 0.5000]) + true value: 1 + prediction: 1 +data: tensor([0.1919, 0.6824, 1.0000, 0.5556, 0.3529, 0.3750]) + true value: 0 + prediction: 1 +data: tensor([0.2929, 0.7032, 0.7400, 1.0000, 0.2941, 0.2500]) + true value: 0 + prediction: 1 +data: tensor([0.9697, 0.6644, 0.4700, 0.2222, 0.4118, 0.3750]) + true value: 0 + prediction: 1 +data: tensor([0.9192, 0.7506, 0.6400, 0.3333, 0.4118, 0.6250]) + true value: 1 + prediction: 1 +data: tensor([0.3737, 0.5391, 0.3800, 0.1111, 0.2941, 0.5000]) + true value: 0 + prediction: 1 +data: tensor([0.4242, 0.3620, 0.6900, 0.4444, 0.2353, 0.0000]) + true value: 0 + prediction: 1 +data: tensor([0.5859, 0.7856, 0.8000, 0.4444, 0.4706, 0.6250]) + true value: 1 + prediction: 1 +data: tensor([0.4242, 0.9730, 0.2400, 0.4444, 0.3529, 0.0000]) + true value: 1 + prediction: 1 +data: tensor([0.4848, 0.6930, 0.4500, 0.3333, 0.5294, 0.6250]) + true value: 1 + prediction: 1 +data: tensor([0.5960, 0.5353, 0.8700, 0.5556, 0.5294, 0.2500]) + true value: 0 + prediction: 1 +data: tensor([0.4545, 0.8031, 0.3200, 0.4444, 0.5294, 0.1250]) + true value: 1 + prediction: 1 +data: tensor([0.7374, 0.7501, 0.9700, 0.7778, 0.2941, 0.0000]) + true value: 0 + prediction: 1 +data: tensor([0.7071, 0.5714, 0.4900, 0.4444, 0.2353, 0.5000]) + true value: 1 + prediction: 1 +data: tensor([0.5253, 0.5608, 0.4600, 0.2222, 0.4118, 0.1250]) + true value: 1 + prediction: 1 +data: tensor([0.7071, 0.6546, 0.5100, 0.8889, 0.4118, 0.7500]) + true value: 0 + prediction: 1 +data: tensor([0.6162, 0.9590, 0.9600, 0.5556, 0.5294, 0.5000]) + true value: 0 + prediction: 1 +data: tensor([0.4646, 0.6024, 0.3400, 0.5556, 0.4706, 0.2500]) + true value: 1 + prediction: 1 +data: tensor([0.6566, 0.7065, 0.3200, 0.8889, 0.4706, 0.3750]) + true value: 0 + prediction: 1 +data: tensor([0.7677, 0.6529, 0.8800, 0.6667, 0.5882, 0.1250]) + true value: 1 + prediction: 1 +data: tensor([0.3131, 0.3574, 0.4200, 0.7778, 0.5294, 0.0000]) + true value: 0 + prediction: 1 +data: tensor([0.5556, 0.5638, 0.8900, 0.6667, 0.2353, 0.6250]) + true value: 0 + prediction: 1 +data: tensor([0.9899, 0.5488, 0.1200, 0.2222, 0.2941, 0.1250]) + true value: 0 + prediction: 1 +data: tensor([0.4040, 0.8887, 0.3600, 0.6667, 0.2353, 0.5000]) + true value: 0 + prediction: 1 +data: tensor([0.1414, 0.8061, 0.5900, 0.1111, 0.4706, 0.6250]) + true value: 1 + prediction: 1 +data: tensor([0.6061, 0.6708, 0.8200, 0.2222, 0.4118, 0.1250]) + true value: 0 + prediction: 1 +data: tensor([0.6061, 0.6631, 1.0000, 0.3333, 0.4706, 0.1250]) + true value: 0 + prediction: 1 +data: tensor([0.8485, 0.5505, 0.8500, 0.5556, 0.5882, 0.1250]) + true value: 0 + prediction: 1 +data: tensor([0.1010, 0.4576, 0.7600, 0.5556, 0.4118, 0.1250]) + true value: 1 + prediction: 1 +data: tensor([0.6263, 0.7885, 0.1900, 0.5556, 0.2941, 0.6250]) + true value: 1 + prediction: 1 +data: tensor([0.3333, 0.7216, 0.6800, 0.3333, 0.2353, 0.0000]) + true value: 0 + prediction: 1 +data: tensor([0.0202, 0.8086, 0.5100, 0.3333, 0.3529, 0.6250]) + true value: 1 + prediction: 1 +data: tensor([0.2727, 0.3898, 0.4400, 0.4444, 0.4706, 0.0000]) + true value: 0 + prediction: 1 +data: tensor([0.0404, 0.3584, 0.6100, 0.7778, 0.2353, 0.7500]) + true value: 1 + prediction: 1 +data: tensor([0.5455, 0.6261, 0.7700, 0.5556, 0.2353, 0.3750]) + true value: 1 + prediction: 1 +data: tensor([0.4747, 0.3963, 0.5500, 0.7778, 0.3529, 0.7500]) + true value: 1 + prediction: 1 +data: tensor([0.3030, 0.8790, 0.6900, 0.5556, 0.4118, 0.2500]) + true value: 1 + prediction: 1 +data: tensor([0.9495, 0.9537, 0.2400, 1.0000, 0.5294, 0.2500]) + true value: 0 + prediction: 1 +data: tensor([0.6465, 0.9225, 1.0000, 0.5556, 0.5294, 0.5000]) + true value: 1 + prediction: 1 +data: tensor([0.1919, 0.2849, 0.8900, 0.2222, 0.3529, 0.2500]) + true value: 1 + prediction: 1 +data: tensor([0.9192, 0.3851, 0.3200, 0.7778, 0.4118, 0.7500]) + true value: 1 + prediction: 1 +data: tensor([0.8788, 0.4788, 0.1400, 0.1111, 0.5294, 0.1250]) + true value: 0 + prediction: 1 +data: tensor([0.7778, 0.9586, 0.0400, 0.6667, 0.4118, 0.1250]) + true value: 0 + prediction: 1 diff --git a/train.py b/train.py new file mode 100644 index 0000000..67343f7 --- /dev/null +++ b/train.py @@ -0,0 +1,81 @@ +#!/usr/bin/python + +import pandas as pd +import numpy as np +import zadanie1 as z +import torch +import torchvision +import torchvision.transforms as transforms +import torch.nn as nn +import torch.nn.functional as F +import torch.optim as optim + + +class Net(nn.Module): + def __init__(self): + super().__init__() + #self.conv1 = nn.Conv2d(3, 6, 5) + #self.pool = nn.MaxPool2d(2, 2) + #self.conv2 = nn.Conv2d(6, 16, 5) + #self.fc1 = nn.Linear(16 * 5 * 5, 120) + #self.fc2 = nn.Linear(20, 6) + self.fc3 = nn.Linear(6, 6) + + def forward(self, x): + #x = self.pool(F.relu(self.conv1(x))) + #x = self.pool(F.relu(self.conv2(x))) + #x = torch.flatten(x, 1) + #x = F.relu(self.fc1(x)) + #x = F.relu(self.fc2(x)) + x = self.fc3(x) + return x + + +def trainNet(trainloader, criterion, optimizer): + for epoch in range(20): + + for i, data in enumerate(trainloader, 0): + inputs, labels = data + + labelsX = torch.Tensor([x for x in labels]) + labels = labelsX.type(torch.LongTensor) + + optimizer.zero_grad() + + outputs = net(inputs) + + loss = criterion(outputs, labels) + loss.backward() + optimizer.step() + + print('Finished Training') + +if __name__ == '__main__': + + train, dev, test = z.prepareData() + + batch_size = 4 + + trainlist = train.values.tolist() + testlist = test.values.tolist() + + trainset = [[torch.Tensor(x[1:]), torch.Tensor([x[0]])] for x in trainlist] + trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, + shuffle=True, num_workers=2) + + testset = [[torch.Tensor(x[1:]), torch.Tensor([x[0]])] for x in testlist] + testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size, + shuffle=False, num_workers=2) + + classes = ('male', 'female') + + net = Net() + + criterion = nn.CrossEntropyLoss() + optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9) + + trainNet(trainloader, criterion, optimizer) + + PATH = './cifar_net.pth' + torch.save(net.state_dict(), PATH) + diff --git a/zadanie1.py b/zadanie1.py index 7a2dd49..d42dacb 100644 --- a/zadanie1.py +++ b/zadanie1.py @@ -3,34 +3,38 @@ import pandas as pd import numpy as np -data = pd.read_csv("Customers.csv") -print(data[:10]) -dataF = data +def prepareData(): + data = pd.read_csv("Customers.csv") + #print(data[:10]) -mapping = {'NaN' : 0, 'Healthcare' : 1, 'Engineer' : 2, 'Lawyer' : 3, 'Entertainment' : 4, 'Artist' : 5, 'Executive' : 6, - 'Doctor' : 7, 'Homemaker' : 8, 'Marketing' : 9} + dataF = data -mapping2 = {'Male' : 0, 'Female' : 1} + mapping = {'NaN' : 0, 'Healthcare' : 1, 'Engineer' : 2, 'Lawyer' : 3, 'Entertainment' : 4, 'Artist' : 5, 'Executive' : 6, + 'Doctor' : 7, 'Homemaker' : 8, 'Marketing' : 9} -dataF = dataF.replace({'Profession': mapping}) -dataF = dataF.replace({'Gender': mapping2}) + mapping2 = {'Male' : 0, 'Female' : 1} -dataF = dataF.drop(columns=['CustomerID']) + dataF = dataF.replace({'Profession': mapping}) + dataF = dataF.replace({'Gender': mapping2}) -dataF['Profession'] = dataF['Profession'].fillna(0) + dataF = dataF.drop(columns=['CustomerID']) -normalized_dataF = (dataF - dataF.min())/(dataF.max() - dataF.min()) + dataF['Profession'] = dataF['Profession'].fillna(0) -print(normalized_dataF[:10]) + normalized_dataF = (dataF - dataF.min())/(dataF.max() - dataF.min()) -train_data = normalized_dataF[0:1600] -dev_data = normalized_dataF[1600:1800] -test_data = normalized_dataF[1800:] + #print(normalized_dataF[:10]) -print(f"Wielkość zbioru Customers: {len(data)} elementów") -print(f"Wielkość zbioru trenującego: {len(train_data)} elementów") -print(f"Wielkość zbioru walidującego: {len(dev_data)} elementów") -print(f"Wielkość zbioru testującego: {len(test_data)} elementów") + train_data = normalized_dataF[0:1600] + dev_data = normalized_dataF[1600:1800] + test_data = normalized_dataF[1800:] -print(f" \nDane i wartości na temat zbioru: \n \n {normalized_dataF.describe()}") + #print(f"Wielkość zbioru Customers: {len(data)} elementów") + #print(f"Wielkość zbioru trenującego: {len(train_data)} elementów") + #print(f"Wielkość zbioru walidującego: {len(dev_data)} elementów") + #print(f"Wielkość zbioru testującego: {len(test_data)} elementów") + + #print(f" \nDane i wartości na temat zbioru: \n \n {normalized_dataF.describe()}") + + return train_data, dev_data, test_data