after beautiful weekend all done

This commit is contained in:
Mikołaj Pokrywka 2023-01-23 12:24:42 +01:00
parent 97bef3fb5c
commit d1c7a3aaa6
9 changed files with 5019 additions and 3068 deletions

File diff suppressed because one or more lines are too long

View File

@ -1080,6 +1080,105 @@
"\n", "\n",
"![HighGUI Canny](img/highgui-canny.png)" "![HighGUI Canny](img/highgui-canny.png)"
] ]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "b68fdade",
"metadata": {},
"outputs": [],
"source": [
"import cv2 as cv\n",
"import numpy as np\n",
"import matplotlib.pyplot as plt\n",
"%matplotlib inline"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "305441e2",
"metadata": {},
"outputs": [
{
"ename": "error",
"evalue": "OpenCV(4.5.5) /io/opencv/modules/imgproc/src/smooth.dispatch.cpp:293: error: (-215:Assertion failed) ksize.width > 0 && ksize.width % 2 == 1 && ksize.height > 0 && ksize.height % 2 == 1 in function 'createGaussianKernels'\n",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31merror\u001b[0m Traceback (most recent call last)",
"\u001b[0;32m/tmp/ipykernel_17561/1543211412.py\u001b[0m in \u001b[0;36mfuncCan\u001b[0;34m(thresh1, thresh2, apertureSize, blur)\u001b[0m\n\u001b[1;32m 6\u001b[0m \u001b[0maperture_size\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;36m2\u001b[0m \u001b[0;34m*\u001b[0m \u001b[0mcv\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mgetTrackbarPos\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'Aperture size'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m'Canny'\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m+\u001b[0m \u001b[0;36m3\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 7\u001b[0m \u001b[0mblur\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;36m2\u001b[0m \u001b[0;34m*\u001b[0m \u001b[0mcv\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mgetTrackbarPos\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'Blur'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m'Canny'\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m+\u001b[0m \u001b[0;36m1\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 8\u001b[0;31m \u001b[0mimage\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mcv\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mGaussianBlur\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mimg\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mblur\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mblur\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 9\u001b[0m \u001b[0mimage2\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mcv\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mGaussianBlur\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mimg\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mblur\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mblur\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 10\u001b[0m \u001b[0medge\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mcv\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mCanny\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mimage\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mthresh1\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mthresh2\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mapertureSize\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0maperture_size\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;31merror\u001b[0m: OpenCV(4.5.5) /io/opencv/modules/imgproc/src/smooth.dispatch.cpp:293: error: (-215:Assertion failed) ksize.width > 0 && ksize.width % 2 == 1 && ksize.height > 0 && ksize.height % 2 == 1 in function 'createGaussianKernels'\n"
]
},
{
"ename": "error",
"evalue": "OpenCV(4.5.5) /io/opencv/modules/imgproc/src/smooth.dispatch.cpp:293: error: (-215:Assertion failed) ksize.width > 0 && ksize.width % 2 == 1 && ksize.height > 0 && ksize.height % 2 == 1 in function 'createGaussianKernels'\n",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31merror\u001b[0m Traceback (most recent call last)",
"\u001b[0;32m/tmp/ipykernel_17561/1543211412.py\u001b[0m in \u001b[0;36mfuncCan\u001b[0;34m(thresh1, thresh2, apertureSize, blur)\u001b[0m\n\u001b[1;32m 6\u001b[0m \u001b[0maperture_size\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;36m2\u001b[0m \u001b[0;34m*\u001b[0m \u001b[0mcv\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mgetTrackbarPos\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'Aperture size'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m'Canny'\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m+\u001b[0m \u001b[0;36m3\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 7\u001b[0m \u001b[0mblur\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;36m2\u001b[0m \u001b[0;34m*\u001b[0m \u001b[0mcv\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mgetTrackbarPos\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'Blur'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m'Canny'\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m+\u001b[0m \u001b[0;36m1\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 8\u001b[0;31m \u001b[0mimage\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mcv\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mGaussianBlur\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mimg\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mblur\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mblur\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 9\u001b[0m \u001b[0mimage2\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mcv\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mGaussianBlur\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mimg\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mblur\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mblur\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 10\u001b[0m \u001b[0medge\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mcv\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mCanny\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mimage\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mthresh1\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mthresh2\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mapertureSize\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0maperture_size\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;31merror\u001b[0m: OpenCV(4.5.5) /io/opencv/modules/imgproc/src/smooth.dispatch.cpp:293: error: (-215:Assertion failed) ksize.width > 0 && ksize.width % 2 == 1 && ksize.height > 0 && ksize.height % 2 == 1 in function 'createGaussianKernels'\n"
]
},
{
"ename": "KeyboardInterrupt",
"evalue": "",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)",
"\u001b[0;32m/tmp/ipykernel_17561/1543211412.py\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[1;32m 39\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 40\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0m__name__\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0;34m\"__main__\"\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 41\u001b[0;31m \u001b[0mmain\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
"\u001b[0;32m/tmp/ipykernel_17561/1543211412.py\u001b[0m in \u001b[0;36mmain\u001b[0;34m()\u001b[0m\n\u001b[1;32m 28\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 29\u001b[0m \u001b[0;32mwhile\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;32mTrue\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 30\u001b[0;31m \u001b[0mfuncCan\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 31\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 32\u001b[0m \u001b[0mk\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mcv\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mwaitKey\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m&\u001b[0m \u001b[0;36m0xFF\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/tmp/ipykernel_17561/1543211412.py\u001b[0m in \u001b[0;36mfuncCan\u001b[0;34m(thresh1, thresh2, apertureSize, blur)\u001b[0m\n\u001b[1;32m 6\u001b[0m \u001b[0maperture_size\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;36m2\u001b[0m \u001b[0;34m*\u001b[0m \u001b[0mcv\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mgetTrackbarPos\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'Aperture size'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m'Canny'\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m+\u001b[0m \u001b[0;36m3\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 7\u001b[0m \u001b[0mblur\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;36m2\u001b[0m \u001b[0;34m*\u001b[0m \u001b[0mcv\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mgetTrackbarPos\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'Blur'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m'Canny'\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m+\u001b[0m \u001b[0;36m1\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 8\u001b[0;31m \u001b[0mimage\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mcv\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mGaussianBlur\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mimg\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mblur\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mblur\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 9\u001b[0m \u001b[0mimage2\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mcv\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mGaussianBlur\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mimg\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mblur\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mblur\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 10\u001b[0m \u001b[0medge\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mcv\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mCanny\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mimage\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mthresh1\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mthresh2\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mapertureSize\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0maperture_size\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;31mKeyboardInterrupt\u001b[0m: "
]
}
],
"source": [
"def funcCan(thresh1=0, thresh2=0, apertureSize=0, blur=0):\n",
" original=cv.imread(\"img/lena.png\",1)\n",
" img=original.copy()\n",
" thresh1 = cv.getTrackbarPos('Threshold 1', 'Canny')\n",
" thresh2 = cv.getTrackbarPos('Threshold 2', 'Canny')\n",
" aperture_size = 2 * cv.getTrackbarPos('Aperture size', 'Canny') + 3\n",
" blur = 2 * cv.getTrackbarPos('Blur', 'Canny') + 1\n",
" image=cv.GaussianBlur(img,(blur,blur),0)\n",
" image2=cv.GaussianBlur(img,(blur,blur),0)\n",
" edge = cv.Canny(image, thresh1, thresh2, apertureSize=aperture_size)\n",
" cv.imshow('Canny', edge)\n",
" cv.imshow('Original with blur',image2)\n",
" \n",
"def main():\n",
" original=cv.imread(\"img/lena.png\",1)\n",
" cv.namedWindow('Canny')\n",
" cv.imshow('Original',original)\n",
"\n",
" thresh1=100\n",
" thresh2=1\n",
" aperture_size = 0\n",
" blur = 2\n",
"\n",
" cv.createTrackbar('Threshold 1','Canny',thresh1,255,funcCan)\n",
" cv.createTrackbar('Threshold 2','Canny',thresh2,255,funcCan)\n",
" cv.createTrackbar('Aperture size','Canny',aperture_size,2,funcCan)\n",
" cv.createTrackbar('Blur','Canny',blur,20,funcCan)\n",
"\n",
" while(True):\n",
" funcCan(0,0,0,0)\n",
"\n",
" k = cv.waitKey(1) & 0xFF\n",
" if k == 27:\n",
" break\n",
"\n",
" # Close the window\n",
" cv.destroyAllWindows()\n",
" cv.waitKey(1)\n",
" \n",
"if __name__ == \"__main__\":\n",
" main()"
]
} }
], ],
"metadata": { "metadata": {

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -487,13 +487,302 @@
"\n", "\n",
"W katalogu `vid` znajdują się filmy `blinking-*.mp4`. Napisz program do wykrywania mrugnięć. Opcjonalnie możesz użyć *eye aspect ratio* z [tego artykułu](http://vision.fe.uni-lj.si/cvww2016/proceedings/papers/05.pdf) lub zaproponować własne rozwiązanie." "W katalogu `vid` znajdują się filmy `blinking-*.mp4`. Napisz program do wykrywania mrugnięć. Opcjonalnie możesz użyć *eye aspect ratio* z [tego artykułu](http://vision.fe.uni-lj.si/cvww2016/proceedings/papers/05.pdf) lub zaproponować własne rozwiązanie."
] ]
},
{
"cell_type": "code",
"execution_count": 9,
"id": "d4795fae",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"WARNING: pip is being invoked by an old script wrapper. This will fail in a future version of pip.\n",
"Please see https://github.com/pypa/pip/issues/5599 for advice on fixing the underlying issue.\n",
"To avoid this problem you can invoke Python with '-m pip' instead of running pip directly.\n",
"Defaulting to user installation because normal site-packages is not writeable\n",
"Requirement already satisfied: dlib in /home/mikolaj/.local/lib/python3.8/site-packages (19.24.0)\n",
"\n",
"\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m A new release of pip available: \u001b[0m\u001b[31;49m22.2.2\u001b[0m\u001b[39;49m -> \u001b[0m\u001b[32;49m22.3.1\u001b[0m\n",
"\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m To update, run: \u001b[0m\u001b[32;49mpip install --upgrade pip\u001b[0m\n"
]
}
],
"source": [
"!pip3 install dlib\n"
]
},
{
"cell_type": "code",
"execution_count": 10,
"id": "f1ea5963",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"WARNING: pip is being invoked by an old script wrapper. This will fail in a future version of pip.\n",
"Please see https://github.com/pypa/pip/issues/5599 for advice on fixing the underlying issue.\n",
"To avoid this problem you can invoke Python with '-m pip' instead of running pip directly.\n",
"Defaulting to user installation because normal site-packages is not writeable\n",
"Requirement already satisfied: imutils in /home/mikolaj/.local/lib/python3.8/site-packages (0.5.4)\n",
"\n",
"\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m A new release of pip available: \u001b[0m\u001b[31;49m22.2.2\u001b[0m\u001b[39;49m -> \u001b[0m\u001b[32;49m22.3.1\u001b[0m\n",
"\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m To update, run: \u001b[0m\u001b[32;49mpip install --upgrade pip\u001b[0m\n"
]
},
{
"ename": "",
"evalue": "",
"output_type": "error",
"traceback": [
"\u001b[1;31mThe Kernel crashed while executing code in the the current cell or a previous cell. Please review the code in the cell(s) to identify a possible cause of the failure. Click <a href='https://aka.ms/vscodeJupyterKernelCrash'>here</a> for more info. View Jupyter <a href='command:jupyter.viewOutput'>log</a> for further details."
]
}
],
"source": [
"!pip3 install imutils"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "fe56a033",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[INFO] loading facial landmark predictor...\n",
"[INFO] starting video stream thread...\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"[ERROR:0@986.863] global /io/opencv/modules/videoio/src/cap.cpp (164) open VIDEOIO(CV_IMAGES): raised OpenCV exception:\n",
"\n",
"OpenCV(4.5.5) /io/opencv/modules/videoio/src/cap_images.cpp:293: error: (-215:Assertion failed) !_filename.empty() in function 'open'\n",
"\n",
"\n"
]
},
{
"ename": "KeyboardInterrupt",
"evalue": "",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)",
"\u001b[0;32m/tmp/ipykernel_1138/2981424523.py\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[1;32m 64\u001b[0m \u001b[0mgray\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mcv2\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcvtColor\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mframe\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcv2\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mCOLOR_BGR2GRAY\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 65\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 66\u001b[0;31m \u001b[0mrects\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mdetector\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mgray\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;36m0\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 67\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 68\u001b[0m \u001b[0;31m# uncomment to our video\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;31mKeyboardInterrupt\u001b[0m: "
]
}
],
"source": [
"import argparse\n",
"import time\n",
"\n",
"import cv2\n",
"import dlib\n",
"import imutils\n",
"import numpy as np\n",
"from imutils import face_utils\n",
"from imutils.video import FileVideoStream, VideoStream\n",
"from scipy.spatial import distance as dist\n",
"\n",
"def eye_aspect_ratio(eye):\n",
" A = dist.euclidean(eye[1], eye[5])\n",
" B = dist.euclidean(eye[2], eye[4])\n",
" C = dist.euclidean(eye[0], eye[3])\n",
" ear = (A + B) / (2.0 * C)\n",
" return ear\n",
"\n",
"\n",
"\n",
"\n",
"EYE_AR_THRESH = 0.3\n",
"EYE_AR_CONSEC_FRAMES = 3\n",
"\n",
"\n",
"COUNTER = 0\n",
"TOTAL = 0\n",
"\n",
"\n",
"print(\"[INFO] loading facial landmark predictor...\")\n",
"detector = dlib.get_frontal_face_detector()\n",
"predictor = dlib.shape_predictor('shape_predictor_68_face_landmarks.dat')\n",
"\n",
"\n",
"(lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS[\"left_eye\"]\n",
"(rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS[\"right_eye\"]\n",
"\n",
"\n",
"print(\"[INFO] starting video stream thread...\")\n",
"vs = FileVideoStream('').start()\n",
"fileStream = True\n",
"vs = VideoStream(src=0).start()\n",
"fileStream = False\n",
"time.sleep(1.0)\n",
"\n",
"# uncomment to our video\n",
"# video = cv2.VideoCapture(\"vid/blinking-woman1.mp4\")\n",
"\n",
"\n",
"while True:\n",
"\n",
" if fileStream and not vs.more():\n",
" break\n",
"\n",
" frame = vs.read()\n",
"\n",
" # uncomment to our video\n",
" # _, frame = video.read()\n",
"\n",
" # comment to our video\n",
" frame = imutils.resize(frame, width=800)\n",
"\n",
" # comment to our video\n",
" gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n",
"\n",
" rects = detector(gray, 0)\n",
"\n",
" # uncomment to our video\n",
" # rects = detector(frame, 0)\n",
"\n",
" for rect in rects:\n",
"\n",
" # shape = predictor(gray, rect)\n",
" shape = predictor(frame, rect)\n",
" shape = face_utils.shape_to_np(shape)\n",
"\n",
" leftEye = shape[lStart:lEnd]\n",
" rightEye = shape[rStart:rEnd]\n",
" leftEAR = eye_aspect_ratio(leftEye)\n",
" rightEAR = eye_aspect_ratio(rightEye)\n",
"\n",
" ear = (leftEAR + rightEAR) / 2.0\n",
"\n",
" leftEyeHull = cv2.convexHull(leftEye)\n",
" rightEyeHull = cv2.convexHull(rightEye)\n",
" cv2.drawContours(frame, [leftEyeHull], -1, (0, 255, 0), 1)\n",
" cv2.drawContours(frame, [rightEyeHull], -1, (0, 255, 0), 1)\n",
"\n",
" if ear < EYE_AR_THRESH:\n",
" COUNTER += 1\n",
"\n",
" else:\n",
"\n",
" if COUNTER >= EYE_AR_CONSEC_FRAMES:\n",
" TOTAL += 1\n",
"\n",
" COUNTER = 0\n",
"\n",
" cv2.putText(\n",
" frame,\n",
" \"Blinks: {}\".format(TOTAL),\n",
" (10, 30),\n",
" cv2.FONT_HERSHEY_SIMPLEX,\n",
" 0.7,\n",
" (0, 0, 255),\n",
" 2,\n",
" )\n",
" cv2.putText(\n",
" frame,\n",
" \"EAR: {:.2f}\".format(ear),\n",
" (300, 30),\n",
" cv2.FONT_HERSHEY_SIMPLEX,\n",
" 0.7,\n",
" (0, 0, 255),\n",
" 2,\n",
" )\n",
"\n",
"\n",
" cv2.imshow(\"Frame\", frame)\n",
" key = cv2.waitKey(1) & 0xFF\n",
"\n",
" if key == ord(\"q\"):\n",
" break\n",
"\n",
"\n",
"cv2.destroyAllWindows()\n",
"# vs.stop()"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "d4a60b9c",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"WARNING: pip is being invoked by an old script wrapper. This will fail in a future version of pip.\n",
"Please see https://github.com/pypa/pip/issues/5599 for advice on fixing the underlying issue.\n",
"To avoid this problem you can invoke Python with '-m pip' instead of running pip directly.\n",
"Defaulting to user installation because normal site-packages is not writeable\n",
"Collecting dlib\n",
" Using cached dlib-19.24.0.tar.gz (3.2 MB)\n",
" Preparing metadata (setup.py) ... \u001b[?25ldone\n",
"\u001b[?25hBuilding wheels for collected packages: dlib\n",
" Building wheel for dlib (setup.py) ... \u001b[?25ldone\n",
"\u001b[?25h Created wheel for dlib: filename=dlib-19.24.0-cp38-cp38-linux_x86_64.whl size=4100165 sha256=76f1ab4b327e49bd2a857100e2ac9a94dba4113bd2673dafab4a2356ef010a92\n",
" Stored in directory: /home/mikolaj/.cache/pip/wheels/4c/d8/2d/a83b10e7bf10cd7d8bef36bf4dcd15b0c9ebf98f990bc984dd\n",
"Successfully built dlib\n",
"Installing collected packages: dlib\n",
"Successfully installed dlib-19.24.0\n",
"\n",
"\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m A new release of pip available: \u001b[0m\u001b[31;49m22.2.2\u001b[0m\u001b[39;49m -> \u001b[0m\u001b[32;49m22.3.1\u001b[0m\n",
"\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m To update, run: \u001b[0m\u001b[32;49mpip install --upgrade pip\u001b[0m\n"
]
}
],
"source": []
},
{
"cell_type": "code",
"execution_count": 7,
"id": "2dd528af",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"WARNING: pip is being invoked by an old script wrapper. This will fail in a future version of pip.\n",
"Please see https://github.com/pypa/pip/issues/5599 for advice on fixing the underlying issue.\n",
"To avoid this problem you can invoke Python with '-m pip' instead of running pip directly.\n",
"Defaulting to user installation because normal site-packages is not writeable\n",
"Collecting imutils\n",
" Downloading imutils-0.5.4.tar.gz (17 kB)\n",
" Preparing metadata (setup.py) ... \u001b[?25ldone\n",
"\u001b[?25hBuilding wheels for collected packages: imutils\n",
" Building wheel for imutils (setup.py) ... \u001b[?25ldone\n",
"\u001b[?25h Created wheel for imutils: filename=imutils-0.5.4-py3-none-any.whl size=25836 sha256=fbd551cf6e0c14ad0239a80ba759a98832e345856a631e7d8ed76f2b21ea4279\n",
" Stored in directory: /home/mikolaj/.cache/pip/wheels/59/1b/52/0dea905f8278d5514dc4d0be5e251967f8681670cadd3dca89\n",
"Successfully built imutils\n",
"Installing collected packages: imutils\n",
"Successfully installed imutils-0.5.4\n",
"\n",
"\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m A new release of pip available: \u001b[0m\u001b[31;49m22.2.2\u001b[0m\u001b[39;49m -> \u001b[0m\u001b[32;49m22.3.1\u001b[0m\n",
"\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m To update, run: \u001b[0m\u001b[32;49mpip install --upgrade pip\u001b[0m\n"
]
}
],
"source": []
} }
], ],
"metadata": { "metadata": {
"author": "Andrzej Wójtowicz", "author": "Andrzej Wójtowicz",
"email": "andre@amu.edu.pl", "email": "andre@amu.edu.pl",
"kernelspec": { "kernelspec": {
"display_name": "Python 3", "display_name": "Python 3.8.12 64-bit",
"language": "python", "language": "python",
"name": "python3" "name": "python3"
}, },
@ -508,10 +797,15 @@
"name": "python", "name": "python",
"nbconvert_exporter": "python", "nbconvert_exporter": "python",
"pygments_lexer": "ipython3", "pygments_lexer": "ipython3",
"version": "3.7.3" "version": "3.8.12"
}, },
"subtitle": "09. Wykrywanie i rozpoznawanie tekstu [laboratoria]", "subtitle": "09. Wykrywanie i rozpoznawanie tekstu [laboratoria]",
"title": "Widzenie komputerowe", "title": "Widzenie komputerowe",
"vscode": {
"interpreter": {
"hash": "31f2aee4e71d21fbe5cf8b01ff0e069b9275f58929596ceb00d14d90e3e16cd6"
}
},
"year": "2021" "year": "2021"
}, },
"nbformat": 4, "nbformat": 4,

File diff suppressed because one or more lines are too long

132
zag_9.py Normal file
View File

@ -0,0 +1,132 @@
import argparse
import time
import cv2
import dlib
import imutils
import numpy as np
from imutils import face_utils
from imutils.video import FileVideoStream, VideoStream
from scipy.spatial import distance as dist
def eye_aspect_ratio(eye):
A = dist.euclidean(eye[1], eye[5])
B = dist.euclidean(eye[2], eye[4])
C = dist.euclidean(eye[0], eye[3])
ear = (A + B) / (2.0 * C)
return ear
ap = argparse.ArgumentParser()
ap.add_argument(
"-p", "--shape-predictor", required=True, help="path to facial landmark predictor"
)
ap.add_argument("-v", "--video", type=str, default="", help="path to input video file")
args = vars(ap.parse_args())
EYE_AR_THRESH = 0.3
EYE_AR_CONSEC_FRAMES = 3
COUNTER = 0
TOTAL = 0
print("[INFO] loading facial landmark predictor...")
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(args["shape_predictor"])
(lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
(rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]
print("[INFO] starting video stream thread...")
vs = FileVideoStream(args["video"]).start()
fileStream = True
vs = VideoStream(src=0).start()
fileStream = False
time.sleep(1.0)
# uncomment to our video
# video = cv2.VideoCapture("vid/blinking-woman1.mp4")
while True:
if fileStream and not vs.more():
break
frame = vs.read()
# uncomment to our video
# _, frame = video.read()
# comment to our video
frame = imutils.resize(frame, width=800)
# comment to our video
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
rects = detector(gray, 0)
# uncomment to our video
# rects = detector(frame, 0)
for rect in rects:
# shape = predictor(gray, rect)
shape = predictor(frame, rect)
shape = face_utils.shape_to_np(shape)
leftEye = shape[lStart:lEnd]
rightEye = shape[rStart:rEnd]
leftEAR = eye_aspect_ratio(leftEye)
rightEAR = eye_aspect_ratio(rightEye)
ear = (leftEAR + rightEAR) / 2.0
leftEyeHull = cv2.convexHull(leftEye)
rightEyeHull = cv2.convexHull(rightEye)
cv2.drawContours(frame, [leftEyeHull], -1, (0, 255, 0), 1)
cv2.drawContours(frame, [rightEyeHull], -1, (0, 255, 0), 1)
if ear < EYE_AR_THRESH:
COUNTER += 1
else:
if COUNTER >= EYE_AR_CONSEC_FRAMES:
TOTAL += 1
COUNTER = 0
cv2.putText(
frame,
"Blinks: {}".format(TOTAL),
(10, 30),
cv2.FONT_HERSHEY_SIMPLEX,
0.7,
(0, 0, 255),
2,
)
cv2.putText(
frame,
"EAR: {:.2f}".format(ear),
(300, 30),
cv2.FONT_HERSHEY_SIMPLEX,
0.7,
(0, 0, 255),
2,
)
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
break
cv2.destroyAllWindows()
# vs.stop()