find and crop face implementation

This commit is contained in:
Mateusz Tylka 2023-01-29 15:17:39 +01:00
parent 46f0f7fa10
commit 58d2f6f83c
4 changed files with 7 additions and 79 deletions

View File

Before

Width:  |  Height:  |  Size: 67 KiB

After

Width:  |  Height:  |  Size: 67 KiB

BIN
face0.jpg

Binary file not shown.

Before

Width:  |  Height:  |  Size: 30 KiB

10
main.py
View File

@ -8,13 +8,17 @@ import numpy as np
from source.cartoonize import Cartoonizer from source.cartoonize import Cartoonizer
def load_source(filename: str) -> np.ndarray: def load_source(filename: str) -> np.ndarray:
return cv2.imread(filename)[...,::-1] return cv2.imread(filename)[...,::-1]
def find_and_crop_face(data: np.ndarray) -> np.ndarray: def find_and_crop_face(data: np.ndarray) -> np.ndarray:
# TODO data_gray = cv2.cvtColor(data, cv2.COLOR_BGR2GRAY)
face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_default.xml')
face = face_cascade.detectMultiScale(data_gray, 1.3, 4)
face = max(face, key=len)
(x, y, w, h) = face
face = data[y:y + h, x:x + w]
return data return data
@ -23,7 +27,7 @@ def compare_with_anime_characters(data: np.ndarray) -> int:
return 1 return 1
def transfer_to_anime(ima: np.ndarray): def transfer_to_anime(img: np.ndarray):
algo = Cartoonizer(dataroot='damo/cv_unet_person-image-cartoon_compound-models') algo = Cartoonizer(dataroot='damo/cv_unet_person-image-cartoon_compound-models')
return algo.cartoonize(img) return algo.cartoonize(img)

View File

@ -1,76 +0,0 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Number of detected faces: 1\n",
"face0.jpg is saved\n"
]
}
],
"source": [
"# import required libraries\n",
"import cv2\n",
"\n",
"# read the input image\n",
"img = cv2.imread('data/UAM-Andrzej-Wójtowicz.jpg')\n",
"\n",
"# convert to grayscale of each frames\n",
"gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n",
"\n",
"# read the haarcascade to detect the faces in an image\n",
"face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_default.xml')\n",
"\n",
"# detects faces in the input image\n",
"faces = face_cascade.detectMultiScale(gray, 1.3, 4)\n",
"print('Number of detected faces:', len(faces))\n",
"\n",
"# loop over all detected faces\n",
"if len(faces) > 0:\n",
" for i, (x, y, w, h) in enumerate(faces):\n",
" # To draw a rectangle in a face\n",
" cv2.rectangle(img, (x, y), (x + w, y + h), (255, 255, 255), 2)\n",
" face = img[y:y + h, x:x + w]\n",
" cv2.imshow(\"Cropped Face\", face)\n",
" cv2.imwrite(f'face{i}.jpg', face)\n",
" print(f\"face{i}.jpg is saved\")\n",
" \n",
"# display the image with detected faces\n",
"cv2.imshow(\"image\", img)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3.10.8 64-bit",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.8"
},
"orig_nbformat": 4,
"vscode": {
"interpreter": {
"hash": "767d51c1340bd893661ea55ea3124f6de3c7a262a8b4abca0554b478b1e2ff90"
}
}
},
"nbformat": 4,
"nbformat_minor": 2
}