import os import cv2 from torch.utils.data import DataLoader, Dataset from torch.utils.data import random_split import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import torchvision class TreesDataset(Dataset): def __init__(self, data_links) -> None: self.X, self.Y = readData(data_links) def __len__(self): return len(self.X) def __getitem__(self, index): return (self.X[index], self.Y[index]) class Net(nn.Module): def __init__(self): super().__init__() self.conv1 = nn.Conv2d(3, 6, 5) self.pool = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(6, 16, 5) self.fc1 = nn.Linear(3264, 120) self.fc2 = nn.Linear(120, 84) self.fc3 = nn.Linear(84, 10) def forward(self, x): x = self.pool(F.relu(self.conv1(x))) x = self.pool(F.relu(self.conv2(x))) x = torch.flatten(x, 1) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.fc3(x) return x def create_datalinks(root_dir): data_links = os.listdir(root_dir) data_links = [root_dir + "/" + x for x in data_links] return data_links def preprocess(img): scale_percent = 10 width = int(img.shape[1] * scale_percent / 100) height = int(img.shape[0] * scale_percent / 100) dim = (width, height) resized = cv2.resize(img, dim, interpolation = cv2.INTER_AREA) resized = torchvision.transforms.functional.to_tensor(resized) return resized def readData(data_links): x, y = [], [] for link in data_links: img = cv2.imread(link, cv2.IMREAD_COLOR) img = preprocess(img) if("ground" in link): label = 1 elif("AS12" in link): label = 0 x.append(img) y.append(label) return x, y links_3_plus_ground = create_datalinks("new_data/AS12_3") + create_datalinks("new_data/ground") dataset = TreesDataset(links_3_plus_ground) train_set, test_set = random_split(dataset, [300, 50], generator=torch.Generator().manual_seed(42)) trainloader = DataLoader(train_set, batch_size=10, shuffle=True, num_workers=2) testloader = DataLoader(test_set, batch_size=10, shuffle=True, num_workers=2) classes = ('tree', 'ground') epochs_num = 15 net = Net() criterion = nn.CrossEntropyLoss() optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9) for epoch in range(epochs_num): running_loss = 0.0 for i, data in enumerate(trainloader, 0): inputs, labels = data optimizer.zero_grad() outputs = net(inputs) loss = criterion(outputs, labels) loss.backward() optimizer.step() running_loss += loss.item() if i % 10 == 0: print('[%d, %5d] loss: %.3f' % (epoch + 1, i + 1, running_loss / 10)) running_loss = 0.0 print('Finished Training') correct = 0 total = 0 with torch.no_grad(): for data in testloader: images, labels = data outputs = net(images) _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum().item() print('Accuracy : %d %%' % (100 * correct / total))