import os import torch import torch.nn as nn import torch.optim as optim import torchvision import torchvision.transforms as transforms class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(3, 16, 3) self.pool = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(16, 32, 3) self.fc1 = nn.Linear(32 * 14 * 14, 128) self.fc2 = nn.Linear(128, 5) def forward(self, x): x = self.pool(torch.relu(self.conv1(x))) x = self.pool(torch.relu(self.conv2(x))) x = x.view(-1, 32 * 14 * 14) x = torch.relu(self.fc1(x)) x = self.fc2(x) return x def main() -> None: transform = transforms.Compose([ transforms.Resize((64, 64)), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) ]) path = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))) path = os.path.join(path, 'garbage_photos/train_set') trainset = torchvision.datasets.ImageFolder(root=path, transform=transform) trainloader = torch.utils.data.DataLoader(trainset, batch_size=32, shuffle=True, num_workers=2) net = Net() criterion = nn.CrossEntropyLoss() optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9) device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") net.to(device) for epoch in range(10): running_loss = 0.0 for i, data in enumerate(trainloader, 0): inputs, labels = data[0].to(device), data[1].to(device) optimizer.zero_grad() outputs = net(inputs) loss = criterion(outputs, labels) loss.backward() optimizer.step() running_loss += loss.item() if i % 200 == 199: print(f'Epoch: {epoch + 1}, Batch: {i + 1}, Loss: {running_loss / 200:.3f}') running_loss = 0.0 torch.save({ 'model_state_dict': net.state_dict(), 'optimizer_state_dict': optimizer.state_dict(), 'epoch': epoch, 'loss': loss, }, 'model.pt') print("Uczenie zakończone.") if __name__ == "__main__": main()