Compare commits

..

No commits in common. "9e978d6032c293ed224bb05286ff3e9ef48bed1e" and "b5d25d710d247b5e32d721af486b8455f9993570" have entirely different histories.

11 changed files with 13 additions and 47 deletions

View File

@ -1,6 +1,4 @@
import torch.nn as nn
import torch
import torch.nn.functional as F
class Neural_Network_Model(nn.Module):
@ -18,4 +16,5 @@ class Neural_Network_Model(nn.Module):
x = self.fc2(x)
x = torch.relu(x)
x = self.out(x)
return F.log_softmax(x, dim=-1)
F.log_softmax(x, dim=-1)
return x

View File

@ -1,17 +1,15 @@
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from torchvision import datasets, transforms, utils
from torchvision import datasets, transforms
from torchvision.transforms import Compose, Lambda, ToTensor
import matplotlib.pyplot as plt
import numpy as np
from model import *
from PIL import Image
device = torch.device('cuda')
#data transform to tensors:
data_transformer = transforms.Compose([
data_transformer = transforms.Compose
([
transforms.Resize((150, 150)),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
@ -33,9 +31,10 @@ test_set = datasets.ImageFolder(root='resources/test', transform=data_transforme
#print(train_set.targets[3002])
#loading your own image: <-- zrobię to na koniec - wrzucanie konkretnego obrazka aby uzyskac wynik
#function for training model
def train(model, dataset, iter=100, batch_size=64):
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
optimizer = torch.optim.SGD(model.parameters(), lr=0.001)
criterion = nn.NLLLoss()
train_loader = DataLoader(dataset, batch_size=batch_size, shuffle=True)
model.train()
@ -47,49 +46,17 @@ def train(model, dataset, iter=100, batch_size=64):
loss = criterion(output, labels.to(device))
loss.backward()
optimizer.step()
if epoch % 10 == 0:
print('epoch: %3d loss: %.4f' % (epoch, loss))
#function for getting accuracy
def accuracy(model, dataset):
model.eval()
with torch.no_grad():
correct = sum([
(model(inputs.to(device)).argmax(dim=1) == labels.to(device)).sum()
for inputs, labels in DataLoader(dataset, batch_size=64, shuffle=True)
])
correct = sum([
(model(inputs.to(device)).argmax(dim=1) == labels.to(device)).sum()
for inputs, labels in DataLoader(dataset, batch_size=64, shuffle=True)
])
return correct.float() / len(dataset)
model = Neural_Network_Model()
model.to(device)
model.load_state_dict(torch.load('model.pth'))
model.eval()
#training the model:
# train(model, train_set)
# print(f"Accuracy of the network is: {100*accuracy(model, test_set)}%")
# torch.save(model.state_dict(), 'model.pth')
#TEST - loading the image and getting results:
testImage_path = 'resources/images/plant_photos/pexels-polina-tankilevitch-4110456.jpg'
testImage = Image.open(testImage_path)
testImage = data_transformer(testImage)
testImage = testImage.unsqueeze(0)
testImage = testImage.to(device)
model.load_state_dict(torch.load('model.pth'))
model.to(device)
model.eval()
testOutput = model(testImage)
_, predicted = torch.max(testOutput, 1)
predicted_class = train_set.classes[predicted.item()]
print(f'The predicted class is: {predicted_class}')
train(model, train_set)
print(accuracy(model, test_set))

Binary file not shown.

Binary file not shown.

Before

Width:  |  Height:  |  Size: 190 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.8 MiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 99 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 5.5 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.3 MiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.2 MiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 888 KiB