2
0
forked from s444420/AL-2020
AL-2020/coder/digits_recognizer.py

108 lines
3.2 KiB
Python
Raw Normal View History

2020-05-26 00:55:12 +02:00
import numpy as np
import torch
import torchvision
import matplotlib.pyplot as plt
from time import time
from torchvision import datasets, transforms
from torch import nn, optim
2020-05-30 15:52:48 +02:00
import cv2
2020-05-26 00:55:12 +02:00
2020-05-27 02:15:29 +02:00
# IMG transform
2020-05-26 00:55:12 +02:00
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,)),
])
2020-05-27 02:15:29 +02:00
# dataset download
train_set = datasets.MNIST('PATH_TO_STORE_TRAINSET', download=True, train=True, transform=transform)
val_set = datasets.MNIST('PATH_TO_STORE_TESTSET', download=True, train=False, transform=transform)
train_loader = torch.utils.data.DataLoader(train_set, batch_size=64, shuffle=True)
val_loader = torch.utils.data.DataLoader(val_set, batch_size=64, shuffle=True)
print(train_set[0])
2020-05-26 00:55:12 +02:00
# building nn model
2020-05-27 02:15:29 +02:00
input_size = 784 # = 28*28
hidden_sizes = [128, 128, 64]
2020-05-26 00:55:12 +02:00
output_size = 10
model = nn.Sequential(nn.Linear(input_size, hidden_sizes[0]),
nn.ReLU(),
nn.Linear(hidden_sizes[0], hidden_sizes[1]),
nn.ReLU(),
2020-05-27 02:15:29 +02:00
nn.Linear(hidden_sizes[1], hidden_sizes[2]),
nn.ReLU(),
nn.Linear(hidden_sizes[2], output_size),
nn.LogSoftmax(dim=-1))
# print(model)
criterion = nn.NLLLoss()
images, labels = next(iter(train_loader))
images = images.view(images.shape[0], -1)
logps = model(images) # log probabilities
loss = criterion(logps, labels) # calculate the NLL loss
# training
optimizer = optim.SGD(model.parameters(), lr=0.003, momentum=0.9)
time0 = time()
2020-05-30 15:52:48 +02:00
epochs = 1
2020-05-27 02:15:29 +02:00
for e in range(epochs):
running_loss = 0
for images, labels in train_loader:
# Flatten MNIST images into a 784 long vector
images = images.view(images.shape[0], -1)
# Training pass
optimizer.zero_grad()
output = model(images)
loss = criterion(output, labels)
# This is where the model learns by backpropagating
loss.backward()
# And optimizes its weights here
optimizer.step()
running_loss += loss.item()
else:
print("Epoch {} - Training loss: {}".format(e + 1, running_loss / len(train_loader)))
print("\nTraining Time (in minutes) =", (time() - time0) / 60)
# testing
images, labels = next(iter(val_loader))
img = images[0].view(1, 784)
with torch.no_grad():
logps = model(img)
ps = torch.exp(logps)
probab = list(ps.numpy()[0])
print("Predicted Digit =", probab.index(max(probab)))
# view_classify(img.view(1, 28, 28), ps)
# accuracy
correct_count, all_count = 0, 0
for images, labels in val_loader:
for i in range(len(labels)):
img = images[i].view(1, 784)
with torch.no_grad():
logps = model(img)
ps = torch.exp(logps)
probab = list(ps.numpy()[0])
pred_label = probab.index(max(probab))
true_label = labels.numpy()[i]
if true_label == pred_label:
correct_count += 1
all_count += 1
print("Number Of Images Tested =", all_count)
print("\nModel Accuracy =", (correct_count / all_count))
# saving model
2020-05-26 00:55:12 +02:00
2020-05-30 15:52:48 +02:00
# torch.save(model.state_dict(), './digit_reco_model.pt')
# torch.save(model.state_dict(), './digit_reco_model2.pt')