'best_model.pth' neural network learning model + fixes
This commit is contained in:
parent
23d5dbd614
commit
f7cc21a386
BIN
NeuralNetwork/best_model.pth
Normal file
BIN
NeuralNetwork/best_model.pth
Normal file
Binary file not shown.
10
NeuralNetwork/learning_results.txt
Normal file
10
NeuralNetwork/learning_results.txt
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
Epoch: 1 Train Loss: 65 Train Accuracy: 0.5754245754245755
|
||||||
|
Epoch: 2 Train Loss: 25 Train Accuracy: 0.7457542457542458
|
||||||
|
Epoch: 3 Train Loss: 8 Train Accuracy: 0.8431568431568431
|
||||||
|
Epoch: 4 Train Loss: 2 Train Accuracy: 0.9010989010989011
|
||||||
|
Epoch: 5 Train Loss: 1 Train Accuracy: 0.9335664335664335
|
||||||
|
Epoch: 6 Train Loss: 0 Train Accuracy: 0.9545454545454546
|
||||||
|
Epoch: 7 Train Loss: 0 Train Accuracy: 0.972027972027972
|
||||||
|
Epoch: 8 Train Loss: 0 Train Accuracy: 0.9820179820179821
|
||||||
|
Epoch: 9 Train Loss: 0 Train Accuracy: 0.994005994005994
|
||||||
|
Epoch: 10 Train Loss: 0 Train Accuracy: 0.9945054945054945
|
@ -22,7 +22,8 @@ optimizer = Adam(model.parameters(), lr=0.001, weight_decay=0.0001)
|
|||||||
criterion = nn.CrossEntropyLoss()
|
criterion = nn.CrossEntropyLoss()
|
||||||
|
|
||||||
num_epochs = 10
|
num_epochs = 10
|
||||||
train_size = len(glob.glob(images_path, '*.jpg'))
|
# train_size = len(glob.glob(images_path+'*.jpg'))
|
||||||
|
train_size = 2002
|
||||||
|
|
||||||
go_to_accuracy = 0.0
|
go_to_accuracy = 0.0
|
||||||
for epoch in range(num_epochs):
|
for epoch in range(num_epochs):
|
||||||
@ -50,10 +51,10 @@ for epoch in range(num_epochs):
|
|||||||
train_accuracy = train_accuracy/train_size
|
train_accuracy = train_accuracy/train_size
|
||||||
train_loss = train_loss/train_size
|
train_loss = train_loss/train_size
|
||||||
|
|
||||||
|
model.eval()
|
||||||
|
|
||||||
print('Epoch: '+ str(epoch+1) +' Train Loss: '+ str(int(train_loss)) +' Train Accuracy: '+ str(train_accuracy))
|
print('Epoch: '+ str(epoch+1) +' Train Loss: '+ str(int(train_loss)) +' Train Accuracy: '+ str(train_accuracy))
|
||||||
|
|
||||||
if train_accuracy > go_to_accuracy:
|
if train_accuracy > go_to_accuracy:
|
||||||
go_to_accuracy= train_accuracy
|
go_to_accuracy= train_accuracy
|
||||||
torch.save(model.state_dict(), "best_model.pth")
|
torch.save(model.state_dict(), "best_model.pth")
|
||||||
|
|
||||||
|
|
||||||
|
@ -9,33 +9,33 @@ class DataModel(nn.Module):
|
|||||||
|
|
||||||
# convolution
|
# convolution
|
||||||
self.conv1 = nn.Conv2d(in_channels=3, out_channels=12, kernel_size=3, stride=1, padding=1)
|
self.conv1 = nn.Conv2d(in_channels=3, out_channels=12, kernel_size=3, stride=1, padding=1)
|
||||||
#shape (256, 12, 244x244)
|
#shape (256, 12, 224x224)
|
||||||
|
|
||||||
# batch normalization
|
# batch normalization
|
||||||
self.bn1 = nn.BatchNorm2d(num_features=12)
|
self.bn1 = nn.BatchNorm2d(num_features=12)
|
||||||
#shape (256, 12, 244x244)
|
#shape (256, 12, 224x224)
|
||||||
self.reul1 = nn.ReLU()
|
self.reul1 = nn.ReLU()
|
||||||
|
|
||||||
self.pool=nn.MaxPool2d(kernel_size=2, stride=2)
|
self.pool=nn.MaxPool2d(kernel_size=2, stride=2)
|
||||||
# reduce image size by factor 2
|
# reduce image size by factor 2
|
||||||
# pooling window moves by 2 pixels at a time instead of 1
|
# pooling window moves by 2 pixels at a time instead of 1
|
||||||
# shape (256, 12, 122x122)
|
# shape (256, 12, 112x112)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
self.conv2 = nn.Conv2d(in_channels=12, out_channels=24, kernel_size=3, stride=1, padding=1)
|
self.conv2 = nn.Conv2d(in_channels=12, out_channels=24, kernel_size=3, stride=1, padding=1)
|
||||||
self.bn2 = nn.BatchNorm2d(num_features=24)
|
self.bn2 = nn.BatchNorm2d(num_features=24)
|
||||||
self.reul2 = nn.ReLU()
|
self.reul2 = nn.ReLU()
|
||||||
# shape (256, 24, 122x122)
|
# shape (256, 24, 112x112)
|
||||||
|
|
||||||
self.conv3 = nn.Conv2d(in_channels=24, out_channels=48, kernel_size=3, stride=1, padding=1)
|
self.conv3 = nn.Conv2d(in_channels=24, out_channels=48, kernel_size=3, stride=1, padding=1)
|
||||||
#shape (256, 48, 122x122)
|
#shape (256, 48, 112x112)
|
||||||
self.bn3 = nn.BatchNorm2d(num_features=48)
|
self.bn3 = nn.BatchNorm2d(num_features=48)
|
||||||
#shape (256, 48, 122x122)
|
#shape (256, 48, 112x112)
|
||||||
self.reul3 = nn.ReLU()
|
self.reul3 = nn.ReLU()
|
||||||
|
|
||||||
# connected layer
|
# connected layer
|
||||||
self.fc = nn.Linear(in_features=48*122*122, out_features=num_objects)
|
self.fc = nn.Linear(in_features=48*112*112, out_features=num_objects)
|
||||||
|
|
||||||
def forward(self, input):
|
def forward(self, input):
|
||||||
output = self.conv1(input)
|
output = self.conv1(input)
|
||||||
@ -51,8 +51,11 @@ class DataModel(nn.Module):
|
|||||||
output = self.bn3(output)
|
output = self.bn3(output)
|
||||||
output = self.reul3(output)
|
output = self.reul3(output)
|
||||||
|
|
||||||
# output shape matrix (256, 48, 122x122)
|
# output shape matrix (256, 48, 112x112)
|
||||||
output = output.view(-1, 48*122*122)
|
#print(output.shape)
|
||||||
|
#print(self.fc.weight.shape)
|
||||||
|
|
||||||
|
output = output.view(-1, 48*112*112)
|
||||||
output = self.fc(output)
|
output = self.fc(output)
|
||||||
|
|
||||||
return output
|
return output
|
@ -1,3 +1,4 @@
|
|||||||
|
import glob
|
||||||
import pathlib
|
import pathlib
|
||||||
import torchvision.transforms as transforms
|
import torchvision.transforms as transforms
|
||||||
from torchvision.datasets import ImageFolder
|
from torchvision.datasets import ImageFolder
|
||||||
|
Loading…
Reference in New Issue
Block a user