164 KiB
164 KiB
from torchvision import datasets
import torch
from torch.utils.data import Dataset, DataLoader
import torch
import torch.nn as nn
import matplotlib.pyplot as plt
%matplotlib inline
import numpy as np
device = 'cuda' if torch.cuda.is_available() else 'cpu'
data_folder = '/content/' # This can be any directory
# you want to download FMNIST to
fmnist = datasets.FashionMNIST(data_folder,
download=True,
train=True)
tr_images = fmnist.data
tr_targets = fmnist.targets
val_fmnist = datasets.FashionMNIST(data_folder,
download=True,
train=False)
val_images = val_fmnist.data
val_targets = val_fmnist.targets
Downloading http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-images-idx3-ubyte.gz to /content/FashionMNIST/raw/train-images-idx3-ubyte.gz
HBox(children=(FloatProgress(value=1.0, bar_style='info', max=1.0), HTML(value='')))
Extracting /content/FashionMNIST/raw/train-images-idx3-ubyte.gz to /content/FashionMNIST/raw Downloading http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-labels-idx1-ubyte.gz to /content/FashionMNIST/raw/train-labels-idx1-ubyte.gz
HBox(children=(FloatProgress(value=1.0, bar_style='info', max=1.0), HTML(value='')))
Extracting /content/FashionMNIST/raw/train-labels-idx1-ubyte.gz to /content/FashionMNIST/raw Downloading http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/t10k-images-idx3-ubyte.gz to /content/FashionMNIST/raw/t10k-images-idx3-ubyte.gz
HBox(children=(FloatProgress(value=1.0, bar_style='info', max=1.0), HTML(value='')))
Extracting /content/FashionMNIST/raw/t10k-images-idx3-ubyte.gz to /content/FashionMNIST/raw Downloading http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/t10k-labels-idx1-ubyte.gz to /content/FashionMNIST/raw/t10k-labels-idx1-ubyte.gz
HBox(children=(FloatProgress(value=1.0, bar_style='info', max=1.0), HTML(value='')))
Extracting /content/FashionMNIST/raw/t10k-labels-idx1-ubyte.gz to /content/FashionMNIST/raw Processing... Done!
/usr/local/lib/python3.6/dist-packages/torchvision/datasets/mnist.py:480: UserWarning: The given NumPy array is not writeable, and PyTorch does not support non-writeable tensors. This means you can write to the underlying (supposedly non-writeable) NumPy array using the tensor. You may want to copy the array to protect its data or make it writeable before converting it to a tensor. This type of warning will be suppressed for the rest of this program. (Triggered internally at /pytorch/torch/csrc/utils/tensor_numpy.cpp:141.) return torch.from_numpy(parsed.astype(m[2], copy=False)).view(*s)
from imgaug import augmenters as iaa
aug = iaa.Sequential([
iaa.Affine(translate_px={'x':(-10,10)},
mode='constant'),
])
class FMNISTDataset(Dataset):
def __init__(self, x, y, aug=None):
self.x, self.y = x, y
self.aug = aug
def __getitem__(self, ix):
x, y = self.x[ix], self.y[ix]
return x, y
def __len__(self): return len(self.x)
def collate_fn(self, batch):
'logic to modify a batch of images'
ims, classes = list(zip(*batch))
# transform a batch of images at once
if self.aug: ims=self.aug.augment_images(images=ims)
ims = torch.tensor(ims)[:,None,:,:].to(device)/255.
classes = torch.tensor(classes).to(device)
return ims, classes
from torch.optim import SGD, Adam
def get_model():
model = nn.Sequential(
nn.Conv2d(1, 64, kernel_size=3),
nn.MaxPool2d(2),
nn.ReLU(),
nn.Conv2d(64, 128, kernel_size=3),
nn.MaxPool2d(2),
nn.ReLU(),
nn.Flatten(),
nn.Linear(3200, 256),
nn.ReLU(),
nn.Linear(256, 10)
).to(device)
loss_fn = nn.CrossEntropyLoss()
optimizer = Adam(model.parameters(), lr=1e-3)
return model, loss_fn, optimizer
def train_batch(x, y, model, opt, loss_fn):
model.train()
prediction = model(x)
batch_loss = loss_fn(prediction, y)
batch_loss.backward()
optimizer.step()
optimizer.zero_grad()
return batch_loss.item()
def get_data():
train = FMNISTDataset(tr_images, tr_targets, aug=aug)
'notice the collate_fn argument'
trn_dl = DataLoader(train, batch_size=64,
collate_fn=train.collate_fn, shuffle=True)
val = FMNISTDataset(val_images, val_targets)
val_dl = DataLoader(val, batch_size=len(val_images),
collate_fn=val.collate_fn, shuffle=True)
return trn_dl, val_dl
trn_dl, val_dl = get_data()
model, loss_fn, optimizer = get_model()
for epoch in range(5):
print(epoch)
for ix, batch in enumerate(iter(trn_dl)):
x, y = batch
batch_loss = train_batch(x, y, model, optimizer, loss_fn)
0 1 2 3 4
preds = []
ix = 24300
for px in range(-5,6):
img = tr_images[ix]/255.
img = img.view(28, 28)
img2 = np.roll(img, px, axis=1)
img3 = torch.Tensor(img2).view(-1,1,28,28).to(device)
np_output = model(img3).cpu().detach().numpy()
pred = np.exp(np_output)/np.sum(np.exp(np_output))
preds.append(pred)
plt.imshow(img2)
plt.title(fmnist.classes[pred[0].argmax()])
plt.show()
import seaborn as sns
fig, ax = plt.subplots(1,1, figsize=(12,10))
plt.title('Probability of each class for various translations')
sns.heatmap(np.array(preds).reshape(11,10), annot=True, ax=ax, fmt='.2f', xticklabels=fmnist.classes, yticklabels=[str(i)+str(' pixels') for i in range(-5,6)], cmap='gray')
<matplotlib.axes._subplots.AxesSubplot at 0x7fe3601c36d8>