Computer_Vision/Chapter06/Road_sign_detection.ipynb
2024-02-13 03:34:51 +01:00

137 KiB
Raw Permalink Blame History

Open In Colab

import os
if not os.path.exists('GTSRB'):
    !pip install -U -q torch_snippets
    !wget -qq https://sid.erda.dk/public/archives/daaeac0d7ce1152aea9b61d9f1e19370/GTSRB_Final_Training_Images.zip
    !wget -qq https://sid.erda.dk/public/archives/daaeac0d7ce1152aea9b61d9f1e19370/GTSRB_Final_Test_Images.zip
    !unzip -qq GTSRB_Final_Training_Images.zip
    !unzip -qq GTSRB_Final_Test_Images.zip
    !wget https://raw.githubusercontent.com/georgesung/traffic_sign_classification_german/master/signnames.csv
    !rm GTSRB_Final_Training_Images.zip GTSRB_Final_Test_Images.zip
    
     |████████████████████████████████| 61kB 2.2MB/s 
     |████████████████████████████████| 36.6MB 86kB/s 
     |████████████████████████████████| 102kB 14.0MB/s 
[?25h  Building wheel for contextvars (setup.py) ... [?25l[?25hdone
--2020-09-17 11:22:17--  https://raw.githubusercontent.com/georgesung/traffic_sign_classification_german/master/signnames.csv
Resolving raw.githubusercontent.com (raw.githubusercontent.com)... 151.101.0.133, 151.101.64.133, 151.101.128.133, ...
Connecting to raw.githubusercontent.com (raw.githubusercontent.com)|151.101.0.133|:443... connected.
HTTP request sent, awaiting response... 200 OK
Length: 999 [text/plain]
Saving to: signnames.csv

signnames.csv       100%[===================>]     999  --.-KB/s    in 0s      

2020-09-17 11:22:18 (72.4 MB/s) - signnames.csv saved [999/999]

from torch_snippets import *
classIds = pd.read_csv('signnames.csv')
classIds.set_index('ClassId', inplace=True)
classIds = classIds.to_dict()['SignName']
classIds = {f'{k:05d}':v for k,v in classIds.items()}
id2int = {v:ix for ix,(k,v) in enumerate(classIds.items())}

from torchvision import transforms as T
classIds = pd.read_csv('signnames.csv')
classIds.set_index('ClassId', inplace=True)
classIds = classIds.to_dict()['SignName']
classIds = {f'{k:05d}':v for k,v in classIds.items()}
id2int = {v:ix for ix,(k,v) in enumerate(classIds.items())}

from torchvision import transforms as T

trn_tfms = T.Compose([
    T.ToPILImage(),
    T.Resize(32),
    T.CenterCrop(32),
    # T.ColorJitter(brightness=(0.8,1.2), 
    # contrast=(0.8,1.2), 
    # saturation=(0.8,1.2), 
    # hue=0.25),
    # T.RandomAffine(5, translate=(0.01,0.1)),
    T.ToTensor(),
    T.Normalize(mean=[0.485, 0.456, 0.406], 
                std=[0.229, 0.224, 0.225]),
])

val_tfms = T.Compose([
    T.ToPILImage(),
    T.Resize(32),
    T.CenterCrop(32),
    T.ToTensor(),
    T.Normalize(mean=[0.485, 0.456, 0.406], 
                std=[0.229, 0.224, 0.225]),
])

class GTSRB(Dataset):
    """Face Landmarks dataset."""

    def __init__(self, files, transform=None):
        self.files = files
        self.transform = transform
        logger.info(len(self))

    def __len__(self):
        return len(self.files)

    def __getitem__(self, ix):
        fpath = self.files[ix]
        clss = fname(parent(fpath))
        img = read(fpath, 1)
        return img, classIds[clss]

    def choose(self):
        return self[randint(len(self))]

    def collate_fn(self, batch):
        imgs, classes = list(zip(*batch))
        if self.transform:
            imgs = [self.transform(img)[None] for img in imgs]
        classes = [torch.tensor([id2int[clss]]) for clss in classes]
        imgs, classes = [torch.cat(i).to(device) for i in [imgs, classes]]
        return imgs, classes

device = 'cuda' if torch.cuda.is_available() else 'cpu'
all_files = Glob('GTSRB/Final_Training/Images/*/*.ppm')
np.random.seed(10)
np.random.shuffle(all_files)

from sklearn.model_selection import train_test_split
trn_files, val_files = train_test_split(all_files, random_state=1)

trn_ds = GTSRB(trn_files, transform=trn_tfms)
val_ds = GTSRB(val_files, transform=val_tfms)
trn_dl = DataLoader(trn_ds, 32, shuffle=True, collate_fn=trn_ds.collate_fn)
val_dl = DataLoader(val_ds, 32, shuffle=False, collate_fn=val_ds.collate_fn)

import torchvision.models as models

def convBlock(ni, no):
    return nn.Sequential(
        nn.Dropout(0.2),
        nn.Conv2d(ni, no, kernel_size=3, padding=1),
        nn.ReLU(inplace=True),
        #nn.BatchNorm2d(no),
        nn.MaxPool2d(2),
    )
    
class SignClassifier(nn.Module):
    def __init__(self):
        super().__init__()
        self.model = nn.Sequential(
            convBlock(3, 64),
            convBlock(64, 64),
            convBlock(64, 128),
            convBlock(128, 64),
            nn.Flatten(),
            nn.Linear(256, 256),
            nn.Dropout(0.2),
            nn.ReLU(inplace=True),
            nn.Linear(256, len(id2int))
        )
        self.loss_fn = nn.CrossEntropyLoss()

    def forward(self, x):
        return self.model(x)

    def compute_metrics(self, preds, targets):
        ce_loss = self.loss_fn(preds, targets)
        acc = (torch.max(preds, 1)[1] == targets).float().mean()
        return ce_loss, acc
def train_batch(model, data, optimizer, criterion):
    ims, labels = data
    _preds = model(ims)
    optimizer.zero_grad()
    loss, acc = criterion(_preds, labels)
    loss.backward()
    optimizer.step()
    return loss.item(), acc.item()

@torch.no_grad()
def validate_batch(model, data, criterion):
    ims, labels = data
    _preds = model(ims)
    loss, acc = criterion(_preds, labels)
    return loss.item(), acc.item()
model = SignClassifier().to(device)
criterion = model.compute_metrics
optimizer = optim.Adam(model.parameters(), lr=1e-3)
n_epochs = 40

log = Report(n_epochs)
for ex in range(n_epochs):
    N = len(trn_dl)
    for bx, data in enumerate(trn_dl):
        loss, acc = train_batch(model, data, optimizer, criterion)
        log.record(ex+(bx+1)/N, trn_loss=loss, trn_acc=acc, end='\r')

    N = len(val_dl)
    for bx, data in enumerate(val_dl):
        loss, acc = validate_batch(model, data, criterion)
        log.record(ex+(bx+1)/N, val_loss=loss, val_acc=acc, end='\r')
        
    log.report_avgs(ex+1)
    if ex == 10: optimizer = optim.Adam(model.parameters(), lr=1e-4)

log.plot_epochs()
dumpdill(log, 'no-aug-no-bn.log')
2020-09-17 11:22:22.120 | INFO     | torch_snippets.loader:Glob:172 - 39209 files found at GTSRB/Final_Training/Images/*/*.ppm
2020-09-17 11:22:22.509 | INFO     | __main__:__init__:46 - 29406
2020-09-17 11:22:22.510 | INFO     | __main__:__init__:46 - 9803
EPOCH: 1.000	trn_loss: 2.410	trn_acc: 0.290	val_loss: 1.641	val_acc: 0.475	(17.64s - 688.04s remaining)
EPOCH: 2.000	trn_loss: 1.214	trn_acc: 0.599	val_loss: 0.914	val_acc: 0.695	(34.74s - 660.13s remaining)
EPOCH: 2.610	trn_loss: 0.927	trn_acc: 0.625	(42.70s - 611.66s remaining)
from torch_snippets import *
classIds = pd.read_csv('signnames.csv')
classIds.set_index('ClassId', inplace=True)
classIds = classIds.to_dict()['SignName']
classIds = {f'{k:05d}':v for k,v in classIds.items()}
id2int = {v:ix for ix,(k,v) in enumerate(classIds.items())}

from torchvision import transforms as T
classIds = pd.read_csv('signnames.csv')
classIds.set_index('ClassId', inplace=True)
classIds = classIds.to_dict()['SignName']
classIds = {f'{k:05d}':v for k,v in classIds.items()}
id2int = {v:ix for ix,(k,v) in enumerate(classIds.items())}

from torchvision import transforms as T

trn_tfms = T.Compose([
    T.ToPILImage(),
    T.Resize(32),
    T.CenterCrop(32),
    # T.ColorJitter(brightness=(0.8,1.2), 
    # contrast=(0.8,1.2), 
    # saturation=(0.8,1.2), 
    # hue=0.25),
    # T.RandomAffine(5, translate=(0.01,0.1)),
    T.ToTensor(),
    T.Normalize(mean=[0.485, 0.456, 0.406], 
                std=[0.229, 0.224, 0.225]),
])

val_tfms = T.Compose([
    T.ToPILImage(),
    T.Resize(32),
    T.CenterCrop(32),
    T.ToTensor(),
    T.Normalize(mean=[0.485, 0.456, 0.406], 
                std=[0.229, 0.224, 0.225]),
])

class GTSRB(Dataset):
    """Face Landmarks dataset."""

    def __init__(self, files, transform=None):
        self.files = files
        self.transform = transform
        logger.info(len(self))

    def __len__(self):
        return len(self.files)

    def __getitem__(self, ix):
        fpath = self.files[ix]
        clss = fname(parent(fpath))
        img = read(fpath, 1)
        return img, classIds[clss]

    def choose(self):
        return self[randint(len(self))]

    def collate_fn(self, batch):
        imgs, classes = list(zip(*batch))
        if self.transform:
            imgs = [self.transform(img)[None] for img in imgs]
        classes = [torch.tensor([id2int[clss]]) for clss in classes]
        imgs, classes = [torch.cat(i).to(device) for i in [imgs, classes]]
        return imgs, classes

device = 'cuda' if torch.cuda.is_available() else 'cpu'
all_files = Glob('GTSRB/Final_Training/Images/*/*.ppm')
np.random.seed(10)
np.random.shuffle(all_files)

from sklearn.model_selection import train_test_split
trn_files, val_files = train_test_split(all_files, random_state=1)

trn_ds = GTSRB(trn_files, transform=trn_tfms)
val_ds = GTSRB(val_files, transform=val_tfms)
trn_dl = DataLoader(trn_ds, 32, shuffle=True, collate_fn=trn_ds.collate_fn)
val_dl = DataLoader(val_ds, 32, shuffle=False, collate_fn=val_ds.collate_fn)

import torchvision.models as models

def convBlock(ni, no):
    return nn.Sequential(
        nn.Dropout(0.2),
        nn.Conv2d(ni, no, kernel_size=3, padding=1),
        nn.ReLU(inplace=True),
        nn.BatchNorm2d(no),
        nn.MaxPool2d(2),
    )
    
class SignClassifier(nn.Module):
    def __init__(self):
        super().__init__()
        self.model = nn.Sequential(
            convBlock(3, 64),
            convBlock(64, 64),
            convBlock(64, 128),
            convBlock(128, 64),
            nn.Flatten(),
            nn.Linear(256, 256),
            nn.Dropout(0.2),
            nn.ReLU(inplace=True),
            nn.Linear(256, len(id2int))
        )
        self.loss_fn = nn.CrossEntropyLoss()

    def forward(self, x):
        return self.model(x)

    def compute_metrics(self, preds, targets):
        ce_loss = self.loss_fn(preds, targets)
        acc = (torch.max(preds, 1)[1] == targets).float().mean()
        return ce_loss, acc
def train_batch(model, data, optimizer, criterion):
    ims, labels = data
    _preds = model(ims)
    optimizer.zero_grad()
    loss, acc = criterion(_preds, labels)
    loss.backward()
    optimizer.step()
    return loss.item(), acc.item()

@torch.no_grad()
def validate_batch(model, data, criterion):
    ims, labels = data
    _preds = model(ims)
    loss, acc = criterion(_preds, labels)
    return loss.item(), acc.item()
model = SignClassifier().to(device)
criterion = model.compute_metrics
optimizer = optim.Adam(model.parameters(), lr=1e-3)
n_epochs = 40

log = Report(n_epochs)
for ex in range(n_epochs):
    N = len(trn_dl)
    for bx, data in enumerate(trn_dl):
        loss, acc = train_batch(model, data, optimizer, criterion)
        log.record(ex+(bx+1)/N, trn_loss=loss, trn_acc=acc, end='\r')

    N = len(val_dl)
    for bx, data in enumerate(val_dl):
        loss, acc = validate_batch(model, data, criterion)
        log.record(ex+(bx+1)/N, val_loss=loss, val_acc=acc, end='\r')
        
    log.report_avgs(ex+1)
    if ex == 10: optimizer = optim.Adam(model.parameters(), lr=1e-4)

log.plot_epochs()
dumpdill(log, 'no-aug-yes-bn.log')
from torch_snippets import *
classIds = pd.read_csv('signnames.csv')
classIds.set_index('ClassId', inplace=True)
classIds = classIds.to_dict()['SignName']
classIds = {f'{k:05d}':v for k,v in classIds.items()}
id2int = {v:ix for ix,(k,v) in enumerate(classIds.items())}

from torchvision import transforms as T
classIds = pd.read_csv('signnames.csv')
classIds.set_index('ClassId', inplace=True)
classIds = classIds.to_dict()['SignName']
classIds = {f'{k:05d}':v for k,v in classIds.items()}
id2int = {v:ix for ix,(k,v) in enumerate(classIds.items())}

from torchvision import transforms as T

trn_tfms = T.Compose([
    T.ToPILImage(),
    T.Resize(32),
    T.CenterCrop(32),
    T.ColorJitter(brightness=(0.8,1.2), 
    contrast=(0.8,1.2), 
    saturation=(0.8,1.2), 
    hue=0.25),
    T.RandomAffine(5, translate=(0.01,0.1)),
    T.ToTensor(),
    T.Normalize(mean=[0.485, 0.456, 0.406], 
                std=[0.229, 0.224, 0.225]),
])

val_tfms = T.Compose([
    T.ToPILImage(),
    T.Resize(32),
    T.CenterCrop(32),
    T.ToTensor(),
    T.Normalize(mean=[0.485, 0.456, 0.406], 
                std=[0.229, 0.224, 0.225]),
])

class GTSRB(Dataset):
    """Face Landmarks dataset."""

    def __init__(self, files, transform=None):
        self.files = files
        self.transform = transform
        logger.info(len(self))

    def __len__(self):
        return len(self.files)

    def __getitem__(self, ix):
        fpath = self.files[ix]
        clss = fname(parent(fpath))
        img = read(fpath, 1)
        return img, classIds[clss]

    def choose(self):
        return self[randint(len(self))]

    def collate_fn(self, batch):
        imgs, classes = list(zip(*batch))
        if self.transform:
            imgs = [self.transform(img)[None] for img in imgs]
        classes = [torch.tensor([id2int[clss]]) for clss in classes]
        imgs, classes = [torch.cat(i).to(device) for i in [imgs, classes]]
        return imgs, classes

device = 'cuda' if torch.cuda.is_available() else 'cpu'
all_files = Glob('GTSRB/Final_Training/Images/*/*.ppm')
np.random.seed(10)
np.random.shuffle(all_files)

from sklearn.model_selection import train_test_split
trn_files, val_files = train_test_split(all_files, random_state=1)

trn_ds = GTSRB(trn_files, transform=trn_tfms)
val_ds = GTSRB(val_files, transform=val_tfms)
trn_dl = DataLoader(trn_ds, 32, shuffle=True, collate_fn=trn_ds.collate_fn)
val_dl = DataLoader(val_ds, 32, shuffle=False, collate_fn=val_ds.collate_fn)

import torchvision.models as models

def convBlock(ni, no):
    return nn.Sequential(
        nn.Dropout(0.2),
        nn.Conv2d(ni, no, kernel_size=3, padding=1),
        nn.ReLU(inplace=True),
        nn.BatchNorm2d(no),
        nn.MaxPool2d(2),
    )
    
class SignClassifier(nn.Module):
    def __init__(self):
        super().__init__()
        self.model = nn.Sequential(
            convBlock(3, 64),
            convBlock(64, 64),
            convBlock(64, 128),
            convBlock(128, 64),
            nn.Flatten(),
            nn.Linear(256, 256),
            nn.Dropout(0.2),
            nn.ReLU(inplace=True),
            nn.Linear(256, len(id2int))
        )
        self.loss_fn = nn.CrossEntropyLoss()

    def forward(self, x):
        return self.model(x)

    def compute_metrics(self, preds, targets):
        ce_loss = self.loss_fn(preds, targets)
        acc = (torch.max(preds, 1)[1] == targets).float().mean()
        return ce_loss, acc
def train_batch(model, data, optimizer, criterion):
    ims, labels = data
    _preds = model(ims)
    optimizer.zero_grad()
    loss, acc = criterion(_preds, labels)
    loss.backward()
    optimizer.step()
    return loss.item(), acc.item()

@torch.no_grad()
def validate_batch(model, data, criterion):
    ims, labels = data
    _preds = model(ims)
    loss, acc = criterion(_preds, labels)
    return loss.item(), acc.item()
model = SignClassifier().to(device)
criterion = model.compute_metrics
optimizer = optim.Adam(model.parameters(), lr=1e-3)
n_epochs = 40

log = Report(n_epochs)
for ex in range(n_epochs):
    N = len(trn_dl)
    for bx, data in enumerate(trn_dl):
        loss, acc = train_batch(model, data, optimizer, criterion)
        log.record(ex+(bx+1)/N, trn_loss=loss, trn_acc=acc, end='\r')

    N = len(val_dl)
    for bx, data in enumerate(val_dl):
        loss, acc = validate_batch(model, data, criterion)
        log.record(ex+(bx+1)/N, val_loss=loss, val_acc=acc, end='\r')
        
    log.report_avgs(ex+1)
    if ex == 10: optimizer = optim.Adam(model.parameters(), lr=1e-4)

log.plot_epochs()
dumpdill(log, '40-yes-aug-yes-bn.log')
from google.colab import files
files.download('40-yes-aug-yes-bn.log')
2020-09-17 09:54:30.795 | INFO     | torch_snippets.loader:Glob:172 - 39209 files found at GTSRB/Final_Training/Images/*/*.ppm
2020-09-17 09:54:30.808 | INFO     | __main__:__init__:46 - 29406
2020-09-17 09:54:30.809 | INFO     | __main__:__init__:46 - 9803
EPOCH: 1.000	trn_loss: 1.840	trn_acc: 0.462	val_loss: 0.752	val_acc: 0.762	(62.07s - 2420.80s remaining)
EPOCH: 2.000	trn_loss: 0.679	trn_acc: 0.780	val_loss: 0.432	val_acc: 0.858	(125.57s - 2385.74s remaining)
EPOCH: 3.000	trn_loss: 0.480	trn_acc: 0.845	val_loss: 0.369	val_acc: 0.878	(189.63s - 2338.78s remaining)
EPOCH: 4.000	trn_loss: 0.397	trn_acc: 0.871	val_loss: 0.299	val_acc: 0.902	(253.21s - 2278.87s remaining)
EPOCH: 5.000	trn_loss: 0.341	trn_acc: 0.889	val_loss: 0.267	val_acc: 0.911	(316.45s - 2215.16s remaining)
EPOCH: 6.000	trn_loss: 0.297	trn_acc: 0.905	val_loss: 0.246	val_acc: 0.920	(379.64s - 2151.27s remaining)
EPOCH: 7.000	trn_loss: 0.269	trn_acc: 0.910	val_loss: 0.218	val_acc: 0.930	(442.87s - 2087.82s remaining)
EPOCH: 8.000	trn_loss: 0.250	trn_acc: 0.918	val_loss: 0.198	val_acc: 0.934	(506.33s - 2025.32s remaining)
EPOCH: 9.000	trn_loss: 0.230	trn_acc: 0.923	val_loss: 0.198	val_acc: 0.938	(569.49s - 1961.58s remaining)
EPOCH: 10.000	trn_loss: 0.211	trn_acc: 0.930	val_loss: 0.173	val_acc: 0.943	(632.87s - 1898.60s remaining)
EPOCH: 11.000	trn_loss: 0.202	trn_acc: 0.933	val_loss: 0.186	val_acc: 0.943	(696.65s - 1836.64s remaining)
EPOCH: 12.000	trn_loss: 0.145	trn_acc: 0.952	val_loss: 0.130	val_acc: 0.958	(759.59s - 1772.37s remaining)
EPOCH: 13.000	trn_loss: 0.130	trn_acc: 0.956	val_loss: 0.116	val_acc: 0.963	(822.65s - 1708.59s remaining)
EPOCH: 14.000	trn_loss: 0.119	trn_acc: 0.959	val_loss: 0.113	val_acc: 0.962	(885.85s - 1645.14s remaining)
EPOCH: 15.000	trn_loss: 0.113	trn_acc: 0.963	val_loss: 0.115	val_acc: 0.965	(948.54s - 1580.91s remaining)
EPOCH: 16.000	trn_loss: 0.109	trn_acc: 0.963	val_loss: 0.106	val_acc: 0.964	(1011.32s - 1516.98s remaining)
EPOCH: 17.000	trn_loss: 0.104	trn_acc: 0.964	val_loss: 0.105	val_acc: 0.966	(1074.53s - 1453.78s remaining)
EPOCH: 18.000	trn_loss: 0.097	trn_acc: 0.967	val_loss: 0.098	val_acc: 0.968	(1138.27s - 1391.22s remaining)
EPOCH: 19.000	trn_loss: 0.100	trn_acc: 0.967	val_loss: 0.106	val_acc: 0.966	(1201.71s - 1328.20s remaining)
EPOCH: 20.000	trn_loss: 0.098	trn_acc: 0.966	val_loss: 0.095	val_acc: 0.969	(1265.57s - 1265.57s remaining)
EPOCH: 21.000	trn_loss: 0.095	trn_acc: 0.968	val_loss: 0.098	val_acc: 0.969	(1329.09s - 1202.51s remaining)
EPOCH: 22.000	trn_loss: 0.094	trn_acc: 0.967	val_loss: 0.093	val_acc: 0.972	(1393.45s - 1140.09s remaining)
EPOCH: 23.000	trn_loss: 0.090	trn_acc: 0.970	val_loss: 0.096	val_acc: 0.971	(1457.31s - 1077.14s remaining)
EPOCH: 24.000	trn_loss: 0.085	trn_acc: 0.971	val_loss: 0.092	val_acc: 0.972	(1521.21s - 1014.14s remaining)
EPOCH: 25.000	trn_loss: 0.089	trn_acc: 0.970	val_loss: 0.096	val_acc: 0.968	(1585.12s - 951.07s remaining)
EPOCH: 26.000	trn_loss: 0.085	trn_acc: 0.971	val_loss: 0.090	val_acc: 0.972	(1649.27s - 888.07s remaining)
EPOCH: 27.000	trn_loss: 0.080	trn_acc: 0.972	val_loss: 0.083	val_acc: 0.974	(1713.44s - 824.99s remaining)
EPOCH: 28.000	trn_loss: 0.080	trn_acc: 0.973	val_loss: 0.095	val_acc: 0.972	(1777.52s - 761.79s remaining)
EPOCH: 29.000	trn_loss: 0.080	trn_acc: 0.972	val_loss: 0.081	val_acc: 0.975	(1841.47s - 698.49s remaining)
EPOCH: 30.000	trn_loss: 0.081	trn_acc: 0.972	val_loss: 0.088	val_acc: 0.974	(1905.06s - 635.02s remaining)
EPOCH: 31.000	trn_loss: 0.075	trn_acc: 0.975	val_loss: 0.084	val_acc: 0.972	(1968.86s - 571.60s remaining)
EPOCH: 32.000	trn_loss: 0.081	trn_acc: 0.973	val_loss: 0.089	val_acc: 0.972	(2032.45s - 508.11s remaining)
EPOCH: 33.000	trn_loss: 0.076	trn_acc: 0.974	val_loss: 0.089	val_acc: 0.971	(2095.91s - 444.59s remaining)
EPOCH: 34.000	trn_loss: 0.072	trn_acc: 0.975	val_loss: 0.083	val_acc: 0.975	(2159.69s - 381.12s remaining)
EPOCH: 35.000	trn_loss: 0.072	trn_acc: 0.975	val_loss: 0.087	val_acc: 0.974	(2223.42s - 317.63s remaining)
EPOCH: 36.000	trn_loss: 0.070	trn_acc: 0.976	val_loss: 0.079	val_acc: 0.977	(2287.19s - 254.13s remaining)
EPOCH: 37.000	trn_loss: 0.071	trn_acc: 0.975	val_loss: 0.081	val_acc: 0.975	(2351.40s - 190.65s remaining)
EPOCH: 38.000	trn_loss: 0.071	trn_acc: 0.976	val_loss: 0.074	val_acc: 0.975	(2415.64s - 127.14s remaining)
EPOCH: 38.106	trn_loss: 0.100	trn_acc: 0.938	(2421.79s - 120.40s remaining)
for f in Glob('*.log'):
    log = loaddill(f)
    print()
    log.report_avgs(20)
    log.plot_epochs(['trn_acc', 'val_acc'], title=f.replace(',','\n').replace('.log',''))
    print()
    line()
2020-09-17 09:35:05.018 | INFO     | torch_snippets.loader:Glob:172 - 4 files found at *.log
  0%|          | 0/20 [00:00<?, ?it/s]/usr/local/lib/python3.6/dist-packages/numpy/core/fromnumeric.py:3335: RuntimeWarning: Mean of empty slice.
  out=out, **kwargs)
/usr/local/lib/python3.6/dist-packages/numpy/core/_methods.py:161: RuntimeWarning: invalid value encountered in double_scalars
  ret = ret.dtype.type(ret / rcount)
100%|██████████| 20/20 [00:00<00:00, 351.47it/s]

EPOCH: 20.000	trn_loss: 0.031	trn_acc: 0.990	val_loss: 0.077	val_acc: 0.978	(8455.42s - 0.00s remaining)
100%|██████████| 20/20 [00:00<00:00, 358.36it/s]
==================================================================


EPOCH: 20.000	trn_loss: 0.391	trn_acc: 0.869	val_loss: 0.323	val_acc: 0.889	(9196.88s - 0.00s remaining)
100%|██████████| 20/20 [00:00<00:00, 372.78it/s]
==================================================================


EPOCH: 20.000	trn_loss: 0.094	trn_acc: 0.969	val_loss: 0.093	val_acc: 0.969	(8106.35s - 0.00s remaining)
100%|██████████| 20/20 [00:00<00:00, 330.28it/s]
==================================================================


EPOCH: 20.000	trn_loss: 0.146	trn_acc: 0.952	val_loss: 0.202	val_acc: 0.938	(7348.78s - 0.00s remaining)
==================================================================