Addition of new tests for detection and tracking

This commit is contained in:
Katarzyna Kaczyńska 2020-02-01 13:31:09 +01:00
parent d7d35334b0
commit 7c4386206b
3 changed files with 118 additions and 49 deletions

95
motion_detector.py Normal file
View File

@ -0,0 +1,95 @@
# USAGE
# python motion_detector.py
# python motion_detector.py --video videos/example_01.mp4
from imutils.video import VideoStream
import argparse
import datetime
import imutils
import time
import cv2
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video", help="recordings/test1.webm")
ap.add_argument("-a", "--min-area", type=int, default=500, help="50px")
args = vars(ap.parse_args())
# if the video argument is None, then we are reading from webcam
if args.get("video", None) is None:
vs = VideoStream(src=0).start()
time.sleep(2.0)
# otherwise, we are reading from a video file
else:
vs = cv2.VideoCapture(args["video"])
# initialize the first frame in the video stream
firstFrame = True
# loop over the frames of the video
while True:
# grab the current frame and initialize the occupied/unoccupied
# text
frame = vs.read()
frame = frame if args.get("video", None) is None else frame[1]
text = "Unoccupied"
# if the frame could not be grabbed, then we have reached the end
# of the video
if frame is None:
break
# resize the frame, convert it to grayscale, and blur it
frame = imutils.resize(frame, width=500)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (21, 21), 0)
# if the first frame is None, initialize it
if firstFrame is None:
firstFrame = gray
continue
# compute the absolute difference between the current frame and
# first frame
frameDelta = cv2.absdiff(firstFrame, gray)
thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]
# dilate the thresholded image to fill in holes, then find contours
# on thresholded image
thresh = cv2.dilate(thresh, None, iterations=2)
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
# loop over the contours
for c in cnts:
# if the contour is too small, ignore it
if cv2.contourArea(c) < args["min_area"]:
continue
# compute the bounding box for the contour, draw it on the frame,
# and update the text
(x, y, w, h) = cv2.boundingRect(c)
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
text = "Occupied"
# draw the text and timestamp on the frame
cv2.putText(frame, "Room Status: {}".format(text), (10, 20),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
cv2.putText(frame, datetime.datetime.now().strftime("%A %d %B %Y %I:%M:%S%p"),
(10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1)
# show the frame and record if the user presses a key
cv2.imshow("Security Feed", frame)
cv2.imshow("Thresh", thresh)
cv2.imshow("Frame Delta", frameDelta)
key = cv2.waitKey(1) & 0xFF
# if the `q` key is pressed, break from the lop
if key == ord("q"):
break
# cleanup the camera and close any open windows
vs.stop() if args.get("video", None) is None else vs.release()
cv2.destroyAllWindows()

View File

@ -1,52 +1,36 @@
from pyimagesearch.centroidtracker import CentroidTracker
from pyimagesearch.trackableobject import TrackableObject
from imutils.video import VideoStream
from imutils.video import FPS
import numpy as np
import argparse
import imutils
import time
import dlib
import cv2
cap = cv2.VideoCapture('recordings/test1.webm')
ret, frame1 = cap.read()
ret, frame2 = cap.read()
while(cap.isOpened()):
diff = cv2.absdiff(frame1, frame2)
gray = cv2.cvtColor(diff, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (5,5), 0)
_, thresh = cv2.threshold(blur, 20, 255, cv2.THRESH_BINARY)
dilated = cv2.dilate(thresh, None, iterations=3)
contours, _ = cv2.findContours(dilated, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for contour in contours:
(x, y, w, h) = cv2.boundingRect(contour)
if cv2.contourArea(contour) < 9000:
continue
cv2.rectangle(frame1, (x, y), (x+w, y+h), (0, 255, 0), 2)
cv2.putText(frame1, "Status: {}".format('Movement'), (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
# import numpy as np
# import cv2
cv2.imshow("feed", frame1)
frame1 = frame2
ret, frame2 = cap.read()
# cap = cv2.VideoCapture('recordings/test1.webm')
if cv2.waitKey(25) & 0xFF == ord('q'):
break
# ret, frame1 = cap.read()
# ret, frame2 = cap.read()
# while(cap.isOpened()):
# diff = cv2.absdiff(frame1, frame2)
# gray = cv2.cvtColor(diff, cv2.COLOR_BGR2GRAY)
# blur = cv2.GaussianBlur(gray, (5,5), 0)
# _, thresh = cv2.threshold(blur, 20, 255, cv2.THRESH_BINARY)
# dilated = cv2.dilate(thresh, None, iterations=3)
# contours, _ = cv2.findContours(dilated, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# for contour in contours:
# (x, y, w, h) = cv2.boundingRect(contour)
# if cv2.contourArea(contour) < 9000:
# continue
# cv2.rectangle(frame1, (x, y), (x+w, y+h), (0, 255, 0), 2)
# cv2.putText(frame1, "Status: {}".format('Movement'), (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
# cv2.imshow("feed", frame1)
# frame1 = frame2
# ret, frame2 = cap.read()
# if cv2.waitKey(25) & 0xFF == ord('q'):
# break
# cap.release()
# cv2.destroyAllWindows()
cap.release()
cv2.destroyAllWindows()

View File

@ -1,10 +0,0 @@
class TrackableObject:
def __init__(self, objectID, centroid):
# store the object ID, then initialize a list of centroids
# using the current centroid
self.objectID = objectID
self.centroids = [centroid]
# initialize a boolean used to indicate if the object has
# already been counted or not
self.counted = False