neural_network #4

Merged
s452622 merged 34 commits from neural_network into master 2021-06-08 23:47:22 +02:00
2 changed files with 45 additions and 108 deletions
Showing only changes of commit 048219b96f - Show all commits

View File

@ -1,68 +1,22 @@
import os import os
import torch import torch
import glob import torchvision
import torch.nn as nn
from PIL import Image from PIL import Image
from torchvision.transforms import transforms import torch.nn as nn
from torch.utils.data import DataLoader
from torch.optim import Adam from torch.optim import Adam
from torch.autograd import Variable from torch.autograd import Variable
import torchvision from torch.utils.data import DataLoader
import pathlib
from net import Net from machine_learning.neural_network.net import Net
from machine_learning.neural_network.helpers import main_path, train_path, test_path, prediction_path, transformer
temp_path = os.path.abspath('../../..')
DIR = ''
train_dir = r'images\learning\training'
test_dir = r'images\learning\test'
train_dir = os.path.join(temp_path, train_dir)
test_dir = os.path.join(temp_path, test_dir)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
classes = ['mine', 'rock']
transformer = transforms.Compose([
transforms.Resize((150,150)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.5,0.5,0.5],
[0.5,0.5,0.5])
])
train_path = r'C:\Users\User\PycharmProjects\Super-Saper222\images\learning\training\training'
test_path = r'C:\Users\User\PycharmProjects\Super-Saper222\images\learning\test\test'
pred_path = r'C:\Users\User\PycharmProjects\Super-Saper222\images\learning\prediction\prediction'
train_loader = DataLoader( def train(dataloader, model: Net, optimizer: Adam, loss_fn: nn.CrossEntropyLoss):
torchvision.datasets.ImageFolder(train_path, transform=transformer),
batch_size=64, shuffle=True
)
test_loader = DataLoader(
torchvision.datasets.ImageFolder(test_path, transform=transformer),
batch_size=32, shuffle=True
)
root=pathlib.Path(train_path)
classes = sorted([j.name.split('/')[-1] for j in root.iterdir()])
model = Net(num_classes=6).to(device)
optimizer = Adam(model.parameters(),lr=1e-3,weight_decay=0.0001)
loss_fn = nn.CrossEntropyLoss()
num_epochs = 10
train_count = len(glob.glob(train_path+'/**/*.*'))
test_count = len(glob.glob(test_path+'/**/*.*'))
print(train_count,test_count)
best_accuracy = 0.0
def train(dataloader, model, loss_fn, optimizer):
size = len(dataloader.dataset) size = len(dataloader.dataset)
for batch, (X, y) in enumerate(dataloader): for batch, (X, y) in enumerate(dataloader):
X, y = X.to(device), y.to(device) X, y = X.to(device), y.to(device)
@ -74,12 +28,12 @@ def train(dataloader, model, loss_fn, optimizer):
loss.backward() loss.backward()
optimizer.step() optimizer.step()
if batch % 100 == 0: if batch % 5 == 0:
loss, current = loss.item(), batch * len(X) loss, current = loss.item(), batch * len(X)
print(f"loss: {loss:>7f} [{current:>5d}/{size:>5d}]") print(f"loss: {loss:>7f} [{current:>5d}/{size:>5d}]")
def test(dataloader, model): def test(dataloader, model: Net, loss_fn: nn.CrossEntropyLoss):
size = len(dataloader.dataset) size = len(dataloader.dataset)
model.eval() model.eval()
test_loss, correct = 0, 0 test_loss, correct = 0, 0
@ -96,7 +50,7 @@ def test(dataloader, model):
print(f"Test Error: \n Accuracy: {(100*correct):>0.1f}%, Avg loss: {test_loss:>8f} \n") print(f"Test Error: \n Accuracy: {(100*correct):>0.1f}%, Avg loss: {test_loss:>8f} \n")
def prediction1(classes, img_path, model, transformer): def prediction(img_path, model: Net):
image = Image.open(img_path).convert('RGB') image = Image.open(img_path).convert('RGB')
image_tensor = transformer(image).float() image_tensor = transformer(image).float()
image_tensor = image_tensor.unsqueeze_(0) image_tensor = image_tensor.unsqueeze_(0)
@ -110,41 +64,45 @@ def prediction1(classes, img_path, model, transformer):
pred = classes[index] pred = classes[index]
return pred return pred
transformer1 = transforms.Compose([transforms.Resize((150, 150)),
transforms.ToTensor(), transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])])
#creating new model def test_prediction_set():
checkpoint = torch.load(f'{main_path}/mine_recognizer.model')
model = Net(num_classes=2)
model.load_state_dict(checkpoint)
model.eval()
# for t in range(9): pred_dict = {}
# print(f"Epoch {t+1}\n-------------------------------")
# train(train_loader, model, loss_fn, optimizer) for file in os.listdir(prediction_path):
# test(test_loader, model) pred_dict[file[file.rfind('/') + 1:]] = prediction(f'{prediction_path}/{file}', model)
# print("Done!")
# torch.save(model.state_dict(), 'mine_recognizer.model') print(pred_dict)
#checking work of new model def main():
num_epochs = 50
checkpoint = torch.load(os.path.join('.', 'mine_recognizer.model')) train_loader = DataLoader(
model = Net(num_classes=6) torchvision.datasets.ImageFolder(train_path, transform=transformer),
model.load_state_dict(checkpoint) batch_size=64, shuffle=True
model.eval() )
test_loader = DataLoader(
torchvision.datasets.ImageFolder(test_path, transform=transformer),
batch_size=32, shuffle=True
)
transformer1 = transforms.Compose([transforms.Resize((150, 150)), transforms.ToTensor(), transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]) model = Net(2).to(device)
images_path = glob.glob(pred_path+'/*.*') optimizer = Adam(model.parameters(), lr=1e-3, weight_decay=0.0001)
pred_dict = {} loss_fn = nn.CrossEntropyLoss()
for i in images_path: for t in range(num_epochs):
pred_dict[i[i.rfind('/') + 1:]] = prediction1(classes, i, model, transformer1) print(f"Epoch {t + 1}\n-------------------------------")
print(pred_dict) train(train_loader, model, optimizer, loss_fn)
test(test_loader, model, loss_fn)
print("Done!")
torch.save(model.state_dict(), f'{main_path}/mine_recognizer.model')
test_prediction_set()
model = Net(num_classes=6)
model.load_state_dict(checkpoint)
model.eval()
images_path = glob.glob(pred_path + '/*.*')
pred_dict = {}
for i in images_path:
pred_dict[i[i.rfind('/') + 1:]] = prediction1(classes, i, model, transformer1)
print(pred_dict)
if __name__ == "__main__":
main()

View File

@ -4,39 +4,21 @@ import torch.nn as nn
class Net(nn.Module): class Net(nn.Module):
def __init__(self, num_classes=6): def __init__(self, num_classes=6):
super(Net, self).__init__() super(Net, self).__init__()
# Output size after convolution filter
# ((w-f+2P)/s) +1
# Input shape= (256,3,150,150)
self.conv1 = nn.Conv2d(in_channels=3, out_channels=12, kernel_size=3, stride=1, padding=1) self.conv1 = nn.Conv2d(in_channels=3, out_channels=12, kernel_size=3, stride=1, padding=1)
# Shape= (256,12,150,150)
self.bn1 = nn.BatchNorm2d(num_features=12) self.bn1 = nn.BatchNorm2d(num_features=12)
# Shape= (256,12,150,150)
self.relu1 = nn.ReLU() self.relu1 = nn.ReLU()
# Shape= (256,12,150,150)
self.pool = nn.MaxPool2d(kernel_size=2) self.pool = nn.MaxPool2d(kernel_size=2)
# Reduce the image size be factor 2
# Shape= (256,12,75,75)
self.conv2 = nn.Conv2d(in_channels=12, out_channels=20, kernel_size=3, stride=1, padding=1) self.conv2 = nn.Conv2d(in_channels=12, out_channels=20, kernel_size=3, stride=1, padding=1)
# Shape= (256,20,75,75)
self.relu2 = nn.ReLU() self.relu2 = nn.ReLU()
# Shape= (256,20,75,75)
self.conv3 = nn.Conv2d(in_channels=20, out_channels=32, kernel_size=3, stride=1, padding=1) self.conv3 = nn.Conv2d(in_channels=20, out_channels=32, kernel_size=3, stride=1, padding=1)
# Shape= (256,32,75,75)
self.bn3 = nn.BatchNorm2d(num_features=32) self.bn3 = nn.BatchNorm2d(num_features=32)
# Shape= (256,32,75,75)
self.relu3 = nn.ReLU() self.relu3 = nn.ReLU()
# Shape= (256,32,75,75)
self.fc = nn.Linear(in_features=75 * 75 * 32, out_features=num_classes) self.fc = nn.Linear(in_features=75 * 75 * 32, out_features=num_classes)
# Feed forwad function
def forward(self, input): def forward(self, input):
output = self.conv1(input) output = self.conv1(input)
output = self.bn1(output) output = self.bn1(output)
@ -51,10 +33,7 @@ class Net(nn.Module):
output = self.bn3(output) output = self.bn3(output)
output = self.relu3(output) output = self.relu3(output)
# Above output will be in matrix form, with shape (256,32,75,75)
output = output.view(-1, 32 * 75 * 75) output = output.view(-1, 32 * 75 * 75)
output = self.fc(output) output = self.fc(output)
return output return output