1.5 MiB
1.5 MiB
import cv2 as cv
import numpy as np
import sklearn.svm
import sklearn.metrics
import matplotlib.pyplot as plt
%matplotlib inline
import os
import random
import zipfile
with zipfile.ZipFile('datasets/glasses.zip', 'r') as zip_ref:
zip_ref.extractall('.')
dataset_dir = "datasets/glasses"
images_0 = os.listdir(f"{dataset_dir}/with")
images_0 = [f"{dataset_dir}/with/{x}" for x in images_0]
images_1 = os.listdir(f"{dataset_dir}/without")
images_1 = [f"{dataset_dir}/without/{x}" for x in images_1]
images = images_0 + images_1
random.seed(1337)
random.shuffle(images)
train_data = []
test_data = []
train_labels = []
test_labels = []
splitval = int((1-0.2)*len(images))
for x in images[:splitval]:
train_data.append(cv.imread(x, cv.IMREAD_COLOR))
train_labels.append(x.split("/")[2])
for x in images[splitval:]:
test_data.append(cv.imread(x, cv.IMREAD_COLOR))
test_labels.append(x.split("/")[2])
d_labels = {"with": 0, "without": 1}
train_labels = np.array([d_labels[x] for x in train_labels])
test_labels = np.array([d_labels[x] for x in test_labels])
print(f"Train data: {len(train_data)}, test data: {len(test_data)}")
[1;31m---------------------------------------------------------------------------[0m [1;31mFileNotFoundError[0m Traceback (most recent call last) [1;32m~\AppData\Local\Temp\ipykernel_22952\4086357420.py[0m in [0;36m<module>[1;34m[0m [0;32m 1[0m [0mdataset_dir[0m [1;33m=[0m [1;34m"datasets/glasses"[0m[1;33m[0m[1;33m[0m[0m [1;32m----> 2[1;33m [0mimages_0[0m [1;33m=[0m [0mos[0m[1;33m.[0m[0mlistdir[0m[1;33m([0m[1;34mf"{dataset_dir}/with"[0m[1;33m)[0m[1;33m[0m[1;33m[0m[0m [0m[0;32m 3[0m [0mimages_0[0m [1;33m=[0m [1;33m[[0m[1;34mf"{dataset_dir}/with/{x}"[0m [1;32mfor[0m [0mx[0m [1;32min[0m [0mimages_0[0m[1;33m][0m[1;33m[0m[1;33m[0m[0m [0;32m 4[0m [0mimages_1[0m [1;33m=[0m [0mos[0m[1;33m.[0m[0mlistdir[0m[1;33m([0m[1;34mf"{dataset_dir}/without"[0m[1;33m)[0m[1;33m[0m[1;33m[0m[0m [0;32m 5[0m [0mimages_1[0m [1;33m=[0m [1;33m[[0m[1;34mf"{dataset_dir}/without/{x}"[0m [1;32mfor[0m [0mx[0m [1;32min[0m [0mimages_1[0m[1;33m][0m[1;33m[0m[1;33m[0m[0m [1;31mFileNotFoundError[0m: [WinError 3] System nie może odnaleźć określonej ścieżki: 'datasets/glasses/with'
plt.figure(figsize=(10,2))
for i in range(5):
plt.subplot(151 + i)
plt.imshow(train_data[i][:,:,::-1]);
hp_win_size = (96, 32)
hp_block_size = (8, 8)
hp_block_stride = (8, 8)
hp_cell_size = (4, 4)
hp_n_bins = 9
hp_deriv_aperture = 0
hp_win_sigma = 4.0
hp_histogram_norm_type = 1
hp_l2_hys_threshold = 0.2
hp_gamma_correction = True
hp_n_levels = 64
hp_signed_gradient = True
hog_descriptor = cv.HOGDescriptor(
hp_win_size, hp_block_size, hp_block_stride, hp_cell_size,
hp_n_bins, hp_deriv_aperture, hp_win_sigma,
hp_histogram_norm_type, hp_l2_hys_threshold,
hp_gamma_correction, hp_n_levels, hp_signed_gradient)
train_hog = np.vstack([hog_descriptor.compute(x).ravel() for x in train_data])
test_hog = np.vstack([hog_descriptor.compute(x).ravel() for x in test_data])
model = cv.ml.SVM_create()
model.setGamma(0.02)
model.setC(2.5)
model.setKernel(cv.ml.SVM_RBF)
model.setType(cv.ml.SVM_C_SVC)
model.train(np.array(train_hog), cv.ml.ROW_SAMPLE, train_labels);
predictions = model.predict(test_hog)[1].ravel()
accuracy = (test_labels == predictions).mean()
print(f"ACC: {accuracy * 100:.2f} %")
ACC: 95.30 %
model = sklearn.svm.SVC(C=2.5, gamma=0.02, kernel='rbf')
model.fit(train_hog, train_labels)
predictions = model.predict(test_hog)
accuracy = (test_labels == predictions).mean()
print(f"ACC: {accuracy * 100:.2f} %")
ACC: 95.30 %
with zipfile.ZipFile('inria-person-sub.zip', 'r') as zip_ref:
zip_ref.extractall('.')
dataset_dir = "datasets/INRIAPerson"
images_train_0 = os.listdir(f"{dataset_dir}/train_64x128_H96/negPatches")
images_train_0 = [f"{dataset_dir}/train_64x128_H96/negPatches/{x}" for x in images_train_0]
images_train_1 = os.listdir(f"{dataset_dir}/train_64x128_H96/posPatches")
images_train_1 = [f"{dataset_dir}/train_64x128_H96/posPatches/{x}" for x in images_train_1]
images_test_0 = os.listdir(f"{dataset_dir}/test_64x128_H96/negPatches")
images_test_0 = [f"{dataset_dir}/test_64x128_H96/negPatches/{x}" for x in images_test_0]
images_test_1 = os.listdir(f"{dataset_dir}/test_64x128_H96/posPatches")
images_test_1 = [f"{dataset_dir}/test_64x128_H96/posPatches/{x}" for x in images_test_1]
train_data = []
test_data = []
train_labels = []
test_labels = []
for x in images_train_0:
img = cv.imread(x, cv.IMREAD_COLOR)
if img is not None:
train_data.append(img)
train_labels.append(0)
for x in images_train_1:
img = cv.imread(x, cv.IMREAD_COLOR)
if img is not None:
train_data.append(img)
train_labels.append(1)
for x in images_test_0:
img = cv.imread(x, cv.IMREAD_COLOR)
if img is not None:
test_data.append(img)
test_labels.append(0)
for x in images_test_1:
img = cv.imread(x, cv.IMREAD_COLOR)
if img is not None:
test_data.append(img)
test_labels.append(1)
print(f"Train data: {len(train_data)}, test data: {len(test_data)}")
Train data: 1457, test data: 560
plt.figure(figsize=(10,2))
for i in range(3):
plt.subplot(161 + i)
plt.imshow(train_data[i][:,:,::-1]);
for i in range(3):
plt.subplot(164 + i)
plt.imshow(train_data[-(i+1)][:,:,::-1]);
hp_win_size = (64, 128)
hp_block_size = (16, 16)
hp_block_stride = (8, 8)
hp_cell_size = (8, 8)
hp_n_bins = 9
hp_deriv_aperture = 1
hp_win_sigma = -1
hp_histogram_norm_type = 0
hp_l2_hys_threshold = 0.2
hp_gamma_correction = True
hp_n_levels = 64
hp_signed_gradient = False
hog_descriptor = cv.HOGDescriptor(
hp_win_size, hp_block_size, hp_block_stride, hp_cell_size,
hp_n_bins, hp_deriv_aperture, hp_win_sigma,
hp_histogram_norm_type, hp_l2_hys_threshold,
hp_gamma_correction, hp_n_levels, hp_signed_gradient)
train_hog = np.vstack([hog_descriptor.compute(x).ravel() for x in train_data])
test_hog = np.vstack([hog_descriptor.compute(x).ravel() for x in test_data])
model = cv.ml.SVM_create()
model.setGamma(0)
model.setC(0.01)
model.setKernel(cv.ml.SVM_LINEAR)
model.setType(cv.ml.SVM_C_SVC)
model.setTermCriteria((cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 1000, 1e-3))
model.train(np.array(train_hog), cv.ml.ROW_SAMPLE, np.array(train_labels));
predictions = model.predict(test_hog)[1].ravel()
accuracy = (test_labels == predictions).mean()
print(f"ACC: {accuracy * 100:.2f} %")
ACC: 96.96 %
model2 = sklearn.svm.SVC(C=0.01, gamma='auto', kernel='linear', max_iter=1000)
model2.fit(train_hog, train_labels)
predictions = model2.predict(test_hog)
accuracy = (test_labels == predictions).mean()
print(f"Accuracy: {sklearn.metrics.accuracy_score(test_labels, predictions) * 100:.2f} %")
print(f"Precision: {sklearn.metrics.precision_score(test_labels, predictions) * 100:.2f} %")
print(f"Recall: {sklearn.metrics.recall_score(test_labels, predictions) * 100:.2f} %")
Accuracy: 96.96 % Precision: 93.55 % Recall: 88.78 %
image = cv.imread("pedestrians.jpg", cv.IMREAD_COLOR)
scale = 600 / image.shape[0]
image = cv.resize(image, None, fx=scale, fy=scale)
support_vectors = model.getSupportVectors()
rho, _, _ = model.getDecisionFunction(0)
detector = np.zeros(support_vectors.shape[1] + 1, dtype=support_vectors.dtype)
detector[:-1] = -support_vectors[:]
detector[-1] = rho
hog_descriptor.setSVMDetector(detector)
locations, weights = hog_descriptor.detectMultiScale(
image, winStride=(8, 8), padding=(32, 32), scale=1.05,
hitThreshold=1.0)
for location, weight in zip(locations, weights):
x1, y1, w, h = location
x2, y2 = x1 + w, y1 + h
cv.rectangle(image, (x1, y1), (x2, y2), (0, 255, 0), thickness=3, lineType=cv.LINE_AA)
#cv.putText(image, f"{weight[0]:.2f}", (x1,y1), cv.FONT_HERSHEY_SIMPLEX, 1, (0,0,255), 2, cv.LINE_AA)
plt.figure(figsize=(6,6))
plt.imshow(image[:,:,::-1]);
image = cv.imread("pedestrians.jpg", cv.IMREAD_COLOR)
hog_dflt_descriptor = cv.HOGDescriptor(
hp_win_size, hp_block_size, hp_block_stride, hp_cell_size,
hp_n_bins, hp_deriv_aperture, hp_win_sigma,
hp_histogram_norm_type, hp_l2_hys_threshold,
hp_gamma_correction, hp_n_levels, hp_signed_gradient)
detector_dflt = cv.HOGDescriptor_getDefaultPeopleDetector()
hog_dflt_descriptor.setSVMDetector(detector_dflt)
locations, weights = hog_dflt_descriptor.detectMultiScale(
image, winStride=(8, 8), padding=(32, 32), scale=1.05,
hitThreshold=1.0)
for location, weight in zip(locations, weights):
x1, y1, w, h = location
x2, y2 = x1 + w, y1 + h
cv.rectangle(image, (x1, y1), (x2, y2), (0, 255, 0), thickness=3, lineType=cv.LINE_AA)
#cv.putText(image, f"{weight[0]:.2f}", (x1,y1), cv.FONT_HERSHEY_SIMPLEX, 1, (0,0,255), 2, cv.LINE_AA)
plt.figure(figsize=(6,6))
plt.imshow(image[:,:,::-1]);
(x1, y1), (x2, y2)
((257, 309), (372, 540))
# Python program to illustrate
# foreground extraction using
# GrabCut algorithm
# organize imports
import numpy as np
import cv2 as cv
from matplotlib import pyplot as plt
# path to input image specified and
# image is loaded with imread command
image = cv.imread("messi5.jpg")
# create a simple mask image similar
# to the loaded image, with the
# shape and return type
mask = np.zeros(image.shape[:2], np.uint8)
# specify the background and foreground model
# using numpy the array is constructed of 1 row
# and 65 columns, and all array elements are 0
# Data type for the array is np.float64 (default)
backgroundModel = np.zeros((1, 65), np.float64)
foregroundModel = np.zeros((1, 65), np.float64)
# define the Region of Interest (ROI)
# as the coordinates of the rectangle
# where the values are entered as
# (startingPoint_x, startingPoint_y, width, height)
# these coordinates are according to the input image
# it may vary for different images
rectangle = (80, 40, 460, 330)
# apply the grabcut algorithm with appropriate
# values as parameters, number of iterations = 3
# cv2.GC_INIT_WITH_RECT is used because
# of the rectangle mode is used
cv.grabCut(image, mask, rectangle,
backgroundModel, foregroundModel,
3, cv.GC_INIT_WITH_RECT)
# In the new mask image, pixels will
# be marked with four flags
# four flags denote the background / foreground
# mask is changed, all the 0 and 2 pixels
# are converted to the background
# mask is changed, all the 1 and 3 pixels
# are now the part of the foreground
# the return type is also mentioned,
# this gives us the final mask
mask2 = np.where((mask == 2)|(mask == 0), 0, 1).astype('uint8')
# The final mask is multiplied with
# the input image to give the segmented image.
image = image * mask2[:, :, np.newaxis]
# output segmented image with colorbar
plt.imshow(image[:,:,::-1])
plt.colorbar()
plt.show()
[1;31m---------------------------------------------------------------------------[0m [1;31mAttributeError[0m Traceback (most recent call last) [1;32m~\AppData\Local\Temp\ipykernel_22952\55986056.py[0m in [0;36m<module>[1;34m[0m [0;32m 15[0m [1;31m# to the loaded image, with the[0m[1;33m[0m[1;33m[0m[1;33m[0m[0m [0;32m 16[0m [1;31m# shape and return type[0m[1;33m[0m[1;33m[0m[1;33m[0m[0m [1;32m---> 17[1;33m [0mmask[0m [1;33m=[0m [0mnp[0m[1;33m.[0m[0mzeros[0m[1;33m([0m[0mimage[0m[1;33m.[0m[0mshape[0m[1;33m[[0m[1;33m:[0m[1;36m2[0m[1;33m][0m[1;33m,[0m [0mnp[0m[1;33m.[0m[0muint8[0m[1;33m)[0m[1;33m[0m[1;33m[0m[0m [0m[0;32m 18[0m [1;33m[0m[0m [0;32m 19[0m [1;31m# specify the background and foreground model[0m[1;33m[0m[1;33m[0m[1;33m[0m[0m [1;31mAttributeError[0m: 'NoneType' object has no attribute 'shape'
x1, y1, x2, y2 = 60, 55, 480, 340
image = cv.imread("messi5.jpg")
# output segmented image with colorbar
cv.rectangle(image, (x1, y1), (x2, y2), (0, 255, 0), thickness=3, lineType=cv.LINE_AA)
plt.imshow(image[:,:,::-1])
<matplotlib.image.AxesImage at 0x26ad5e46dc0>
image.shape
(342, 548, 3)
import numpy as np
import cv2 as cv
from matplotlib import pyplot as plt
img = cv.imread('messi5.jpg')
mask = np.zeros(img.shape[:2],np.uint8)
bgdModel = np.zeros((1,65),np.float64)
fgdModel = np.zeros((1,65),np.float64)
rect = (50,50,450,290)
cv.grabCut(img,mask,rect,bgdModel,fgdModel,5,cv.GC_INIT_WITH_RECT)
mask2 = np.where((mask==2)|(mask==0),0,1).astype('uint8')
img = img*mask2[:,:,np.newaxis]
plt.imshow(img[:,:,::-1]),plt.colorbar(),plt.show()
(<matplotlib.image.AxesImage at 0x26ad58cf400>, <matplotlib.colorbar.Colorbar at 0x26acc8d3820>, None)
# newmask is the mask image I manually labelled
newmask = cv.imread('messi5_mask.png',0)
# wherever it is marked white (sure foreground), change mask=1
# wherever it is marked black (sure background), change mask=0
mask[newmask == 0] = 0
mask[newmask == 255] = 1
mask, bgdModel, fgdModel = cv.grabCut(img,mask,None,bgdModel,fgdModel,5,cv.GC_INIT_WITH_MASK)
mask = np.where((mask==2)|(mask==0),0,1).astype('uint8')
img = img*mask[:,:,np.newaxis]
plt.imshow(img),plt.colorbar(),plt.show()
(<matplotlib.image.AxesImage at 0x26ad560f400>, <matplotlib.colorbar.Colorbar at 0x26ad560a820>, None)
#!/usr/bin/env python
'''
===============================================================================
Interactive Image Segmentation using GrabCut algorithm.
This sample shows interactive image segmentation using grabcut algorithm.
USAGE:
python grabcut.py <filename>
README FIRST:
Two windows will show up, one for input and one for output.
At first, in input window, draw a rectangle around the object using the
right mouse button. Then press 'n' to segment the object (once or a few times)
For any finer touch-ups, you can press any of the keys below and draw lines on
the areas you want. Then again press 'n' to update the output.
Key '0' - To select areas of sure background
Key '1' - To select areas of sure foreground
Key '2' - To select areas of probable background
Key '3' - To select areas of probable foreground
Key 'n' - To update the segmentation
Key 'r' - To reset the setup
Key 's' - To save the results
===============================================================================
'''
import numpy as np
import cv2 as cv
import sys
class App():
BLUE = [255,0,0] # rectangle color
RED = [0,0,255] # PR BG
GREEN = [0,255,0] # PR FG
BLACK = [0,0,0] # sure BG
WHITE = [255,255,255] # sure FG
DRAW_BG = {'color' : BLACK, 'val' : 0}
DRAW_FG = {'color' : WHITE, 'val' : 1}
DRAW_PR_BG = {'color' : RED, 'val' : 2}
DRAW_PR_FG = {'color' : GREEN, 'val' : 3}
# setting up flags
rect = (0,0,1,1)
drawing = False # flag for drawing curves
rectangle = False # flag for drawing rect
rect_over = False # flag to check if rect drawn
rect_or_mask = 100 # flag for selecting rect or mask mode
value = DRAW_FG # drawing initialized to FG
thickness = 3 # brush thickness
def onmouse(self, event, x, y, flags, param):
# Draw Rectangle
if event == cv.EVENT_RBUTTONDOWN:
self.rectangle = True
self.ix, self.iy = x,y
elif event == cv.EVENT_MOUSEMOVE:
if self.rectangle == True:
self.img = self.img2.copy()
cv.rectangle(self.img, (self.ix, self.iy), (x, y), self.BLUE, 2)
self.rect = (min(self.ix, x), min(self.iy, y), abs(self.ix - x), abs(self.iy - y))
self.rect_or_mask = 0
elif event == cv.EVENT_RBUTTONUP:
self.rectangle = False
self.rect_over = True
cv.rectangle(self.img, (self.ix, self.iy), (x, y), self.BLUE, 2)
self.rect = (min(self.ix, x), min(self.iy, y), abs(self.ix - x), abs(self.iy - y))
self.rect_or_mask = 0
print(" Now press the key 'n' a few times until no further change \n")
# draw touchup curves
if event == cv.EVENT_LBUTTONDOWN:
if self.rect_over == False:
print("first draw rectangle \n")
else:
self.drawing = True
cv.circle(self.img, (x,y), self.thickness, self.value['color'], -1)
cv.circle(self.mask, (x,y), self.thickness, self.value['val'], -1)
elif event == cv.EVENT_MOUSEMOVE:
if self.drawing == True:
cv.circle(self.img, (x, y), self.thickness, self.value['color'], -1)
cv.circle(self.mask, (x, y), self.thickness, self.value['val'], -1)
elif event == cv.EVENT_LBUTTONUP:
if self.drawing == True:
self.drawing = False
cv.circle(self.img, (x, y), self.thickness, self.value['color'], -1)
cv.circle(self.mask, (x, y), self.thickness, self.value['val'], -1)
def run(self):
# Loading images
print("No input image given, so loading default image, lena.jpg \n")
print("Correct Usage: python grabcut.py <filename> \n")
filename = 'messi5.jpg'
self.img = cv.imread(cv.samples.findFile(filename))
self.img2 = self.img.copy() # a copy of original image
self.mask = np.zeros(self.img.shape[:2], dtype = np.uint8) # mask initialized to PR_BG
self.output = np.zeros(self.img.shape, np.uint8) # output image to be shown
# input and output windows
cv.namedWindow('output')
cv.namedWindow('input')
cv.setMouseCallback('input', self.onmouse)
cv.moveWindow('input', self.img.shape[1]+10,90)
print(" Instructions: \n")
print(" Draw a rectangle around the object using right mouse button \n")
while(1):
cv.imshow('output', self.output)
cv.imshow('input', self.img)
k = cv.waitKey(1)
# key bindings
if k == 27: # esc to exit
break
elif k == ord('0'): # BG drawing
print(" mark background regions with left mouse button \n")
self.value = self.DRAW_BG
elif k == ord('1'): # FG drawing
print(" mark foreground regions with left mouse button \n")
self.value = self.DRAW_FG
elif k == ord('2'): # PR_BG drawing
self.value = self.DRAW_PR_BG
elif k == ord('3'): # PR_FG drawing
self.value = self.DRAW_PR_FG
elif k == ord('s'): # save image
bar = np.zeros((self.img.shape[0], 5, 3), np.uint8)
res = np.hstack((self.img2, bar, self.img, bar, self.output))
cv.imwrite('grabcut_output.png', res)
print(" Result saved as image \n")
elif k == ord('r'): # reset everything
print("resetting \n")
self.rect = (0,0,1,1)
self.drawing = False
self.rectangle = False
self.rect_or_mask = 100
self.rect_over = False
self.value = self.DRAW_FG
self.img = self.img2.copy()
self.mask = np.zeros(self.img.shape[:2], dtype = np.uint8) # mask initialized to PR_BG
self.output = np.zeros(self.img.shape, np.uint8) # output image to be shown
elif k == ord('n'): # segment the image
print(""" For finer touchups, mark foreground and background after pressing keys 0-3
and again press 'n' \n""")
try:
bgdmodel = np.zeros((1, 65), np.float64)
fgdmodel = np.zeros((1, 65), np.float64)
if (self.rect_or_mask == 0): # grabcut with rect
cv.grabCut(self.img2, self.mask, self.rect, bgdmodel, fgdmodel, 1, cv.GC_INIT_WITH_RECT)
self.rect_or_mask = 1
elif (self.rect_or_mask == 1): # grabcut with mask
cv.grabCut(self.img2, self.mask, self.rect, bgdmodel, fgdmodel, 1, cv.GC_INIT_WITH_MASK)
except:
import traceback
traceback.print_exc()
mask2 = np.where((self.mask==1) + (self.mask==3), 255, 0).astype('uint8')
self.output = cv.bitwise_and(self.img2, self.img2, mask=mask2)
print('Done')
if __name__ == '__main__':
print(__doc__)
App().run()
cv.destroyAllWindows()
=============================================================================== Interactive Image Segmentation using GrabCut algorithm. This sample shows interactive image segmentation using grabcut algorithm. USAGE: python grabcut.py <filename> README FIRST: Two windows will show up, one for input and one for output. At first, in input window, draw a rectangle around the object using the right mouse button. Then press 'n' to segment the object (once or a few times) For any finer touch-ups, you can press any of the keys below and draw lines on the areas you want. Then again press 'n' to update the output. Key '0' - To select areas of sure background Key '1' - To select areas of sure foreground Key '2' - To select areas of probable background Key '3' - To select areas of probable foreground Key 'n' - To update the segmentation Key 'r' - To reset the setup Key 's' - To save the results =============================================================================== No input image given, so loading default image, lena.jpg Correct Usage: python grabcut.py <filename>
[1;31m---------------------------------------------------------------------------[0m [1;31merror[0m Traceback (most recent call last) [1;32m~\AppData\Local\Temp\ipykernel_22952\674526088.py[0m in [0;36m<module>[1;34m[0m [0;32m 170[0m [1;32mif[0m [0m__name__[0m [1;33m==[0m [1;34m'__main__'[0m[1;33m:[0m[1;33m[0m[1;33m[0m[0m [0;32m 171[0m [0mprint[0m[1;33m([0m[0m__doc__[0m[1;33m)[0m[1;33m[0m[1;33m[0m[0m [1;32m--> 172[1;33m [0mApp[0m[1;33m([0m[1;33m)[0m[1;33m.[0m[0mrun[0m[1;33m([0m[1;33m)[0m[1;33m[0m[1;33m[0m[0m [0m[0;32m 173[0m [0mcv[0m[1;33m.[0m[0mdestroyAllWindows[0m[1;33m([0m[1;33m)[0m[1;33m[0m[1;33m[0m[0m [1;32m~\AppData\Local\Temp\ipykernel_22952\674526088.py[0m in [0;36mrun[1;34m(self)[0m [0;32m 98[0m [0mfilename[0m [1;33m=[0m [1;34m'messi5.jpg'[0m[1;33m[0m[1;33m[0m[0m [0;32m 99[0m [1;33m[0m[0m [1;32m--> 100[1;33m [0mself[0m[1;33m.[0m[0mimg[0m [1;33m=[0m [0mcv[0m[1;33m.[0m[0mimread[0m[1;33m([0m[0mcv[0m[1;33m.[0m[0msamples[0m[1;33m.[0m[0mfindFile[0m[1;33m([0m[0mfilename[0m[1;33m)[0m[1;33m)[0m[1;33m[0m[1;33m[0m[0m [0m[0;32m 101[0m [0mself[0m[1;33m.[0m[0mimg2[0m [1;33m=[0m [0mself[0m[1;33m.[0m[0mimg[0m[1;33m.[0m[0mcopy[0m[1;33m([0m[1;33m)[0m [1;31m# a copy of original image[0m[1;33m[0m[1;33m[0m[0m [0;32m 102[0m [0mself[0m[1;33m.[0m[0mmask[0m [1;33m=[0m [0mnp[0m[1;33m.[0m[0mzeros[0m[1;33m([0m[0mself[0m[1;33m.[0m[0mimg[0m[1;33m.[0m[0mshape[0m[1;33m[[0m[1;33m:[0m[1;36m2[0m[1;33m][0m[1;33m,[0m [0mdtype[0m [1;33m=[0m [0mnp[0m[1;33m.[0m[0muint8[0m[1;33m)[0m [1;31m# mask initialized to PR_BG[0m[1;33m[0m[1;33m[0m[0m [1;31merror[0m: OpenCV(4.5.5) D:\a\opencv-python\opencv-python\opencv\modules\core\src\utils\samples.cpp:64: error: (-2:Unspecified error) OpenCV samples: Can't find required data file: messi5.jpg in function 'cv::samples::findFile'