384 KiB
384 KiB
import os
if not os.path.exists('open-images-bus-trucks'):
!pip install -q torch_snippets
!wget --quiet https://www.dropbox.com/s/agmzwk95v96ihic/open-images-bus-trucks.tar.xz
!tar -xf open-images-bus-trucks.tar.xz
!rm open-images-bus-trucks.tar.xz
!git clone https://github.com/sizhky/ssd-utils/
%cd ssd-utils
tar: Error opening archive: Failed to open 'open-images-bus-trucks.tar.xz' 'rm' is not recognized as an internal or external command, operable program or batch file.
f:\Zajecia\books\computer_vision\Modern-Computer-Vision-with-PyTorch-master\Modern-Computer-Vision-with-PyTorch-master\Modern-Computer-Vision-with-PyTorch-master\Chapter08\ssd-utils
fatal: destination path 'ssd-utils' already exists and is not an empty directory.
from torch_snippets import *
DATA_ROOT = '../open-images-bus-trucks/'
IMAGE_ROOT = f'{DATA_ROOT}/images'
DF_RAW = df = pd.read_csv(f'{DATA_ROOT}/df.csv')
df = df[df['ImageID'].isin(df['ImageID'].unique().tolist())]
label2target = {l:t+1 for t,l in enumerate(DF_RAW['LabelName'].unique())}
label2target['background'] = 0
target2label = {t:l for l,t in label2target.items()}
background_class = label2target['background']
num_classes = len(label2target)
device = 'cuda' if torch.cuda.is_available() else 'cpu'
[1;31m---------------------------------------------------------------------------[0m [1;31mFileNotFoundError[0m Traceback (most recent call last) [1;32m~\AppData\Local\Temp\ipykernel_28720\2147305028.py[0m in [0;36m<module>[1;34m[0m [0;32m 2[0m [0mDATA_ROOT[0m [1;33m=[0m [1;34m'../open-images-bus-trucks/'[0m[1;33m[0m[1;33m[0m[0m [0;32m 3[0m [0mIMAGE_ROOT[0m [1;33m=[0m [1;34mf'{DATA_ROOT}/images'[0m[1;33m[0m[1;33m[0m[0m [1;32m----> 4[1;33m [0mDF_RAW[0m [1;33m=[0m [0mdf[0m [1;33m=[0m [0mpd[0m[1;33m.[0m[0mread_csv[0m[1;33m([0m[1;34mf'{DATA_ROOT}/df.csv'[0m[1;33m)[0m[1;33m[0m[1;33m[0m[0m [0m[0;32m 5[0m [1;33m[0m[0m [0;32m 6[0m [0mdf[0m [1;33m=[0m [0mdf[0m[1;33m[[0m[0mdf[0m[1;33m[[0m[1;34m'ImageID'[0m[1;33m][0m[1;33m.[0m[0misin[0m[1;33m([0m[0mdf[0m[1;33m[[0m[1;34m'ImageID'[0m[1;33m][0m[1;33m.[0m[0munique[0m[1;33m([0m[1;33m)[0m[1;33m.[0m[0mtolist[0m[1;33m([0m[1;33m)[0m[1;33m)[0m[1;33m][0m[1;33m[0m[1;33m[0m[0m [1;32m~\anaconda3\lib\site-packages\pandas\util\_decorators.py[0m in [0;36mwrapper[1;34m(*args, **kwargs)[0m [0;32m 309[0m [0mstacklevel[0m[1;33m=[0m[0mstacklevel[0m[1;33m,[0m[1;33m[0m[1;33m[0m[0m [0;32m 310[0m ) [1;32m--> 311[1;33m [1;32mreturn[0m [0mfunc[0m[1;33m([0m[1;33m*[0m[0margs[0m[1;33m,[0m [1;33m**[0m[0mkwargs[0m[1;33m)[0m[1;33m[0m[1;33m[0m[0m [0m[0;32m 312[0m [1;33m[0m[0m [0;32m 313[0m [1;32mreturn[0m [0mwrapper[0m[1;33m[0m[1;33m[0m[0m [1;32m~\anaconda3\lib\site-packages\pandas\io\parsers\readers.py[0m in [0;36mread_csv[1;34m(filepath_or_buffer, sep, delimiter, header, names, index_col, usecols, squeeze, prefix, mangle_dupe_cols, dtype, engine, converters, true_values, false_values, skipinitialspace, skiprows, skipfooter, nrows, na_values, keep_default_na, na_filter, verbose, skip_blank_lines, parse_dates, infer_datetime_format, keep_date_col, date_parser, dayfirst, cache_dates, iterator, chunksize, compression, thousands, decimal, lineterminator, quotechar, quoting, doublequote, escapechar, comment, encoding, encoding_errors, dialect, error_bad_lines, warn_bad_lines, on_bad_lines, delim_whitespace, low_memory, memory_map, float_precision, storage_options)[0m [0;32m 676[0m [0mkwds[0m[1;33m.[0m[0mupdate[0m[1;33m([0m[0mkwds_defaults[0m[1;33m)[0m[1;33m[0m[1;33m[0m[0m [0;32m 677[0m [1;33m[0m[0m [1;32m--> 678[1;33m [1;32mreturn[0m [0m_read[0m[1;33m([0m[0mfilepath_or_buffer[0m[1;33m,[0m [0mkwds[0m[1;33m)[0m[1;33m[0m[1;33m[0m[0m [0m[0;32m 679[0m [1;33m[0m[0m [0;32m 680[0m [1;33m[0m[0m [1;32m~\anaconda3\lib\site-packages\pandas\io\parsers\readers.py[0m in [0;36m_read[1;34m(filepath_or_buffer, kwds)[0m [0;32m 573[0m [1;33m[0m[0m [0;32m 574[0m [1;31m# Create the parser.[0m[1;33m[0m[1;33m[0m[0m [1;32m--> 575[1;33m [0mparser[0m [1;33m=[0m [0mTextFileReader[0m[1;33m([0m[0mfilepath_or_buffer[0m[1;33m,[0m [1;33m**[0m[0mkwds[0m[1;33m)[0m[1;33m[0m[1;33m[0m[0m [0m[0;32m 576[0m [1;33m[0m[0m [0;32m 577[0m [1;32mif[0m [0mchunksize[0m [1;32mor[0m [0miterator[0m[1;33m:[0m[1;33m[0m[1;33m[0m[0m [1;32m~\anaconda3\lib\site-packages\pandas\io\parsers\readers.py[0m in [0;36m__init__[1;34m(self, f, engine, **kwds)[0m [0;32m 930[0m [1;33m[0m[0m [0;32m 931[0m [0mself[0m[1;33m.[0m[0mhandles[0m[1;33m:[0m [0mIOHandles[0m [1;33m|[0m [1;32mNone[0m [1;33m=[0m [1;32mNone[0m[1;33m[0m[1;33m[0m[0m [1;32m--> 932[1;33m [0mself[0m[1;33m.[0m[0m_engine[0m [1;33m=[0m [0mself[0m[1;33m.[0m[0m_make_engine[0m[1;33m([0m[0mf[0m[1;33m,[0m [0mself[0m[1;33m.[0m[0mengine[0m[1;33m)[0m[1;33m[0m[1;33m[0m[0m [0m[0;32m 933[0m [1;33m[0m[0m [0;32m 934[0m [1;32mdef[0m [0mclose[0m[1;33m([0m[0mself[0m[1;33m)[0m[1;33m:[0m[1;33m[0m[1;33m[0m[0m [1;32m~\anaconda3\lib\site-packages\pandas\io\parsers\readers.py[0m in [0;36m_make_engine[1;34m(self, f, engine)[0m [0;32m 1214[0m [1;31m# "Union[str, PathLike[str], ReadCsvBuffer[bytes], ReadCsvBuffer[str]]"[0m[1;33m[0m[1;33m[0m[0m [0;32m 1215[0m [1;31m# , "str", "bool", "Any", "Any", "Any", "Any", "Any"[0m[1;33m[0m[1;33m[0m[0m [1;32m-> 1216[1;33m self.handles = get_handle( # type: ignore[call-overload] [0m[0;32m 1217[0m [0mf[0m[1;33m,[0m[1;33m[0m[1;33m[0m[0m [0;32m 1218[0m [0mmode[0m[1;33m,[0m[1;33m[0m[1;33m[0m[0m [1;32m~\anaconda3\lib\site-packages\pandas\io\common.py[0m in [0;36mget_handle[1;34m(path_or_buf, mode, encoding, compression, memory_map, is_text, errors, storage_options)[0m [0;32m 784[0m [1;32mif[0m [0mioargs[0m[1;33m.[0m[0mencoding[0m [1;32mand[0m [1;34m"b"[0m [1;32mnot[0m [1;32min[0m [0mioargs[0m[1;33m.[0m[0mmode[0m[1;33m:[0m[1;33m[0m[1;33m[0m[0m [0;32m 785[0m [1;31m# Encoding[0m[1;33m[0m[1;33m[0m[0m [1;32m--> 786[1;33m handle = open( [0m[0;32m 787[0m [0mhandle[0m[1;33m,[0m[1;33m[0m[1;33m[0m[0m [0;32m 788[0m [0mioargs[0m[1;33m.[0m[0mmode[0m[1;33m,[0m[1;33m[0m[1;33m[0m[0m [1;31mFileNotFoundError[0m: [Errno 2] No such file or directory: '../open-images-bus-trucks//df.csv'
import collections, os, torch
from PIL import Image
from torchvision import transforms
normalize = transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]
)
denormalize = transforms.Normalize(
mean=[-0.485/0.229, -0.456/0.224, -0.406/0.255],
std=[1/0.229, 1/0.224, 1/0.255]
)
def preprocess_image(img):
img = torch.tensor(img).permute(2,0,1)
img = normalize(img)
return img.to(device).float()
class OpenDataset(torch.utils.data.Dataset):
w, h = 300, 300
def __init__(self, df, image_dir=IMAGE_ROOT):
self.image_dir = image_dir
self.files = glob.glob(self.image_dir+'/*')
self.df = df
self.image_infos = df.ImageID.unique()
logger.info(f'{len(self)} items loaded')
def __getitem__(self, ix):
# load images and masks
image_id = self.image_infos[ix]
img_path = find(image_id, self.files)
img = Image.open(img_path).convert("RGB")
img = np.array(img.resize((self.w, self.h), resample=Image.BILINEAR))/255.
data = df[df['ImageID'] == image_id]
labels = data['LabelName'].values.tolist()
data = data[['XMin','YMin','XMax','YMax']].values
data[:,[0,2]] *= self.w
data[:,[1,3]] *= self.h
boxes = data.astype(np.uint32).tolist() # convert to absolute coordinates
return img, boxes, labels
def collate_fn(self, batch):
images, boxes, labels = [], [], []
for item in batch:
img, image_boxes, image_labels = item
img = preprocess_image(img)[None]
images.append(img)
boxes.append(torch.tensor(image_boxes).float().to(device)/300.)
labels.append(torch.tensor([label2target[c] for c in image_labels]).long().to(device))
images = torch.cat(images).to(device)
return images, boxes, labels
def __len__(self):
return len(self.image_infos)
from sklearn.model_selection import train_test_split
trn_ids, val_ids = train_test_split(df.ImageID.unique(), test_size=0.1, random_state=99)
trn_df, val_df = df[df['ImageID'].isin(trn_ids)], df[df['ImageID'].isin(val_ids)]
len(trn_df), len(val_df)
train_ds = OpenDataset(trn_df)
test_ds = OpenDataset(val_df)
train_loader = DataLoader(train_ds, batch_size=4, collate_fn=train_ds.collate_fn, drop_last=True)
test_loader = DataLoader(test_ds, batch_size=4, collate_fn=test_ds.collate_fn, drop_last=True)
2020-10-13 10:38:19.093 | INFO | __main__:__init__:25 - 13702 items loaded 2020-10-13 10:38:19.138 | INFO | __main__:__init__:25 - 1523 items loaded
def train_batch(inputs, model, criterion, optimizer):
model.train()
N = len(train_loader)
images, boxes, labels = inputs
_regr, _clss = model(images)
loss = criterion(_regr, _clss, boxes, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
return loss
@torch.no_grad()
def validate_batch(inputs, model, criterion):
model.eval()
images, boxes, labels = inputs
_regr, _clss = model(images)
loss = criterion(_regr, _clss, boxes, labels)
return loss
from model import SSD300, MultiBoxLoss
from detect import *
n_epochs = 3
model = SSD300(num_classes, device)
optimizer = torch.optim.AdamW(model.parameters(), lr=1e-4, weight_decay=1e-5)
criterion = MultiBoxLoss(priors_cxcy=model.priors_cxcy, device=device)
log = Report(n_epochs=n_epochs)
logs_to_print = 5
Downloading: "https://download.pytorch.org/models/vgg16-397923af.pth" to /root/.cache/torch/hub/checkpoints/vgg16-397923af.pth
HBox(children=(FloatProgress(value=0.0, max=553433881.0), HTML(value='')))
Loaded base model.
/usr/local/lib/python3.6/dist-packages/torch/nn/_reduction.py:44: UserWarning: size_average and reduce args will be deprecated, please use reduction='none' instead. warnings.warn(warning.format(ret))
for epoch in range(n_epochs):
_n = len(train_loader)
for ix, inputs in enumerate(train_loader):
loss = train_batch(inputs, model, criterion, optimizer)
pos = (epoch + (ix+1)/_n)
log.record(pos, trn_loss=loss.item(), end='\r')
_n = len(test_loader)
for ix,inputs in enumerate(test_loader):
loss = validate_batch(inputs, model, criterion)
pos = (epoch + (ix+1)/_n)
log.record(pos, val_loss=loss.item(), end='\r')
image_paths = Glob(f'{DATA_ROOT}/images/*')
image_id = choose(test_ds.image_infos)
img_path = find(image_id, test_ds.files)
original_image = Image.open(img_path, mode='r')
original_image = original_image.convert('RGB')
2020-10-13 10:39:28.949 | INFO | torch_snippets.loader:Glob:178 - 15225 files found at ../open-images-bus-trucks//images/*
image_paths = Glob(f'{DATA_ROOT}/images/*')
for _ in range(3):
image_id = choose(test_ds.image_infos)
img_path = find(image_id, test_ds.files)
original_image = Image.open(img_path, mode='r')
bbs, labels, scores = detect(original_image, model, min_score=0.9, max_overlap=0.5,top_k=200, device=device)
labels = [target2label[c.item()] for c in labels]
label_with_conf = [f'{l} @ {s:.2f}' for l,s in zip(labels,scores)]
print(bbs, label_with_conf)
show(original_image, bbs=bbs, texts=label_with_conf, text_sz=10)
[[35, 34, 212, 123]] ['Truck @ 1.00']
[[6, 1, 250, 215]] ['Bus @ 1.00']
[[58, 22, 194, 170]] ['Bus @ 1.00']