186 lines
5.1 KiB
Python
186 lines
5.1 KiB
Python
import torch
|
|
import torch.nn as nn
|
|
from torch.utils.data import DataLoader
|
|
from torchvision import transforms, datasets
|
|
import matplotlib.pyplot as plt
|
|
from PIL import Image
|
|
import os
|
|
import os.path
|
|
torch.manual_seed(42)
|
|
|
|
|
|
train_dir = 'data2/train'
|
|
examine_dir = 'data2/validation'
|
|
|
|
|
|
transform_img = transforms.Compose(
|
|
[transforms.Resize(255),
|
|
#transforms.RandomHorizontalFlip(),
|
|
#transforms.ToTensor()().unsqueeze_(0),
|
|
transforms.ToTensor(),
|
|
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
|
|
|
|
train_data = datasets.ImageFolder(train_dir, transform=transform_img)
|
|
|
|
examine_data = datasets.ImageFolder(examine_dir, transform=transform_img)
|
|
|
|
train_loader = DataLoader(dataset = train_data, batch_size = 255, shuffle=True, num_workers=0, collate_fn=None)
|
|
examine_loader = DataLoader(dataset = examine_data, batch_size = 255, shuffle=False, num_workers=0, collate_fn=None)
|
|
|
|
if torch.cuda.is_available():
|
|
device = torch.device('cuda')
|
|
else:
|
|
device = torch.device('cpu')
|
|
|
|
epochs = 35
|
|
batch_size = 25
|
|
learning_rate = 0.001
|
|
|
|
import torch.nn.functional as F
|
|
|
|
class CNN(nn.Module):
|
|
def __init__(self):
|
|
super(CNN, self).__init__()
|
|
self.conv1 = nn.Conv2d(in_channels=3, out_channels=10, kernel_size=3)
|
|
self.conv2 = nn.Conv2d(10, 20, kernel_size=3)
|
|
self.conv2_drop = nn.Dropout2d()
|
|
self.fc1 = nn.Linear(720, 1024)
|
|
self.fc2 = nn.Linear(1024, 2)
|
|
|
|
def forward(self, x):
|
|
x = F.relu(F.max_pool2d(self.conv1(x), 2))
|
|
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
|
|
x = x.view(x.shape[0],-1)
|
|
x = F.relu(self.fc1(x))
|
|
x = F.dropout(x, training=self.training)
|
|
x = self.fc2(x)
|
|
return x
|
|
|
|
model = CNN()
|
|
print(model)
|
|
|
|
|
|
|
|
model = CNN().to(device)
|
|
criterion = nn.CrossEntropyLoss()
|
|
optimizer = torch.optim.Adam(model.parameters(),lr = learning_rate)
|
|
|
|
|
|
train_losses = []
|
|
valid_losses = []
|
|
num_epochs = 25
|
|
|
|
for epoch in range(1, num_epochs + 1):
|
|
# keep-track-of-training-and-validation-loss
|
|
train_loss = 0.0
|
|
valid_loss = 0.0
|
|
|
|
# training-the-model
|
|
model.train()
|
|
for data, target in train_loader:
|
|
# move-tensors-to-GPU
|
|
data = data.to(device)
|
|
target = target.to(device)
|
|
|
|
# clear-the-gradients-of-all-optimized-variables
|
|
optimizer.zero_grad()
|
|
# forward-pass: compute-predicted-outputs-by-passing-inputs-to-the-model
|
|
output = model(data)
|
|
# calculate-the-batch-loss
|
|
loss = criterion(output, target)
|
|
# backward-pass: compute-gradient-of-the-loss-wrt-model-parameters
|
|
loss.backward()
|
|
# perform-a-ingle-optimization-step (parameter-update)
|
|
optimizer.step()
|
|
# update-training-loss
|
|
train_loss += loss.item() * data.size(0)
|
|
|
|
# validate-the-model
|
|
model.eval()
|
|
for data, target in examine_loader:
|
|
data = data.to(device)
|
|
target = target.to(device)
|
|
|
|
output = model(data)
|
|
|
|
loss = criterion(output, target)
|
|
|
|
# update-average-validation-loss
|
|
valid_loss += loss.item() * data.size(0)
|
|
|
|
# calculate-average-losses
|
|
train_loss = train_loss / len(train_loader)
|
|
valid_loss = valid_loss / len(examine_loader)
|
|
train_losses.append(train_loss)
|
|
valid_losses.append(valid_loss)
|
|
|
|
# print-training/validation-statistics
|
|
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
|
|
epoch, train_loss, valid_loss))
|
|
|
|
# test-the-model
|
|
model.eval() # it-disables-dropout
|
|
with torch.no_grad():
|
|
correct = 0
|
|
total = 0
|
|
for images, labels in examine_loader:
|
|
images = images.to(device)
|
|
labels = labels.to(device)
|
|
outputs = model(images)
|
|
_, predicted = torch.max(outputs.data, 1)
|
|
total += labels.size(0)
|
|
correct += (predicted == labels).sum().item()
|
|
|
|
print('Test Accuracy of the model: {} %'.format(100 * correct / total))
|
|
|
|
# Save
|
|
torch.save(model.state_dict(), 'model.ckpt')
|
|
|
|
|
|
'''''
|
|
def train(model, dataset, n_iter=100, batch_size=256):
|
|
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
|
|
criterion = nn.NLLLoss()
|
|
dl = DataLoader(dataset, batch_size=batch_size)
|
|
model.train()
|
|
|
|
for epoch in range(n_iter):
|
|
for images, targets in dl:
|
|
optimizer.zero_grad()
|
|
out = model(images.to(device))
|
|
loss = criterion(out, targets.to(device))
|
|
loss.backward()
|
|
optimizer.step()
|
|
|
|
if epoch % 10 == 0:
|
|
print('epoch: %3d loss: %.4f' % (epoch, loss))
|
|
|
|
def accuracy(model, dataset):
|
|
model.eval()
|
|
correct = sum([(model(images.to(device)).argmax(dim=1) == targets).sum()
|
|
for images, targets in DataLoader(dataset, batch_size=256)])
|
|
return correct.float()/len(dataset)
|
|
|
|
|
|
model = nn.Sequential(
|
|
nn.Linear(28*28, 10),
|
|
nn.LogSoftmax(dim=-1)
|
|
).to(device)
|
|
|
|
#print(train(model, train_data))
|
|
#print(accuracy(model, examine_data))
|
|
|
|
hidden_size = 300
|
|
|
|
model_2 = nn.Sequential(
|
|
nn.Linear(28*28, hidden_size),
|
|
nn.ReLU(),
|
|
nn.Linear(hidden_size, 10),
|
|
nn.LogSoftmax(dim=-1)
|
|
).to(device)
|
|
|
|
#print(train(model_2, train_data))
|
|
#print(accuracy(model_2, examine_data))
|
|
|
|
#torch.save(model, 'model.pth')
|
|
''''' |