76 lines
2.7 KiB
Python
76 lines
2.7 KiB
Python
import torch
|
|
from torch.utils.data import DataLoader
|
|
from torchvision import datasets
|
|
from torchvision.transforms import Compose, Lambda, ToTensor
|
|
import matplotlib.pyplot as plt
|
|
import dataset_glasses as dg
|
|
# torch.manual_seed(42)
|
|
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
|
|
|
|
trainset = dg.GlassesDataset('classes\\Jimmy_Neuron\\train', 'classes\\Jimmy_Neuron\\sdg.csv',
|
|
transform=Compose([Lambda(lambda x: x.float())]))
|
|
|
|
testset = dg.GlassesDataset('classes\\Jimmy_Neuron\\test', 'classes\\Jimmy_Neuron\\set.csv',
|
|
transform=Compose([Lambda(lambda x: x.float())]))
|
|
|
|
def train(model, dataset, n_iter=100, batch_size=512):
|
|
optimizer = torch.optim.SGD(model.parameters(), lr=0.0001) #musiałem zmienić learning rate bo inaczej się wektory własne zerowały
|
|
criterion = torch.nn.CrossEntropyLoss() #Przydatne przy binanrym kryterium!
|
|
dl = DataLoader(dataset, batch_size=batch_size)
|
|
model.train()
|
|
for epoch in range(n_iter):
|
|
for images, targets in dl:
|
|
optimizer.zero_grad()
|
|
out = model(images.to(device))
|
|
loss = criterion(out, targets.to(device))
|
|
loss.backward()
|
|
optimizer.step()
|
|
|
|
if epoch % 10 == 0:
|
|
print('epoch: %3d loss: %.4f' % (epoch, loss))
|
|
# for name, param in model.named_parameters():
|
|
# print(name, param.grad)
|
|
def accuracy(model, dataset):
|
|
model.eval()
|
|
correct = sum([(model(images.to(device)).argmax(dim=1) == targets.to(device)).sum()
|
|
for images, targets in DataLoader(dataset, batch_size=256)])
|
|
|
|
return correct.float() / len(dataset)
|
|
|
|
hidden_size = 135*64
|
|
|
|
#żeby to skończyć kiedykolwiek i jak uda mi się to CUDA zrobić to zmieniam rozdziałke z 1024 na 128
|
|
#Użycie konwolucyjnej warstwy dobiło mi z 85 do 90
|
|
# model = torch.nn.Sequential(
|
|
# torch.nn.Conv2d(3, 6, 5),
|
|
# torch.nn.MaxPool2d(2, 2),
|
|
# torch.nn.Conv2d(6, 16, 5),
|
|
# torch.nn.Flatten(),
|
|
# torch.nn.Linear(53824, hidden_size),
|
|
# torch.nn.ReLU(),
|
|
# torch.nn.Linear(hidden_size, 32*32),
|
|
# torch.nn.ReLU(),
|
|
# torch.nn.Linear(32*32, 10),
|
|
# torch.nn.LogSoftmax(dim=-1)
|
|
# ).to(device)
|
|
|
|
# train(model, trainset)
|
|
|
|
# print(accuracy(model, testset))
|
|
|
|
# torch.save(model.state_dict(), 'model.pt')
|
|
|
|
model = torch.nn.Sequential(
|
|
torch.nn.Conv2d(3, 6, 5),
|
|
torch.nn.MaxPool2d(2, 2),
|
|
torch.nn.Conv2d(6, 16, 5),
|
|
torch.nn.Flatten(),
|
|
torch.nn.Linear(53824, hidden_size),
|
|
torch.nn.ReLU(),
|
|
torch.nn.Linear(hidden_size, 32*32),
|
|
torch.nn.ReLU(),
|
|
torch.nn.Linear(32*32, 10),
|
|
torch.nn.LogSoftmax(dim=-1)
|
|
).to(device)
|
|
model.load_state_dict(torch.load('model.pt'))
|