diff --git a/camera-test.py b/camera-test.py deleted file mode 100644 index 3a33cf8..0000000 --- a/camera-test.py +++ /dev/null @@ -1,77 +0,0 @@ -# import numpy as np -import cv2 - -cap = cv2.VideoCapture('recordings/test1.webm') - -ret, frame1 = cap.read() -ret, frame2 = cap.read() - -while(cap.isOpened()): - diff = cv2.absdiff(frame1, frame2) - gray = cv2.cvtColor(diff, cv2.COLOR_BGR2GRAY) - blur = cv2.GaussianBlur(gray, (5,5), 0) - _, thresh = cv2.threshold(blur, 20, 255, cv2.THRESH_BINARY) - dilated = cv2.dilate(thresh, None, iterations=3) - contours, _ = cv2.findContours(dilated, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) - - for contour in contours: - (x, y, w, h) = cv2.boundingRect(contour) - - if cv2.contourArea(contour) < 9000: - continue - cv2.rectangle(frame1, (x, y), (x+w, y+h), (0, 255, 0), 2) - cv2.putText(frame1, "Status: {}".format('Movement'), (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2) - - cv2.imshow("feed", frame1) - frame1 = frame2 - ret, frame2 = cap.read() - - if cv2.waitKey(25) & 0xFF == ord('q'): - break - -# When everything done, release the capture -cap.release() -cv2.destroyAllWindows() - - - - -# import numpy as np -# import cv2 - -# cap = cv2.VideoCapture('recordings/test1.webm') - -# while(cap.isOpened()): -# # Capture frame-by-frame -# ret, frame = cap.read() - -# # Our operations on the frame come here -# gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) - -# # Display the resulting frame -# cv2.imshow('frame',gray) -# if cv2.waitKey(25) & 0xFF == ord('q'): -# break - -# # When everything done, release the capture -# cap.release() -# cv2.destroyAllWindows() - - - - - -# from imageai.Detection import VideoObjectDetection -# import os - -# execution_path = os.getcwd() - -# detector = VideoObjectDetection() -# detector.setModelTypeAsYOLOv3() -# detector.setModelPath( os.path.join(execution_path , "yolo.h5")) -# detector.loadModel() - -# video_path = detector.detectObjectsFromVideo(input_file_path=os.path.join( execution_path, "recordings/traffic-mini.mp4"), -# output_file_path=os.path.join(execution_path, "outputs/traffic_mini_detected_1") -# , frames_per_second=29, log_progress=True) -# print(video_path) diff --git a/project.py b/project.py new file mode 100644 index 0000000..1a6aced --- /dev/null +++ b/project.py @@ -0,0 +1,92 @@ +from pyimagesearch.centroidtracker import CentroidTracker +from pyimagesearch.trackableobject import TrackableObject +from imutils.video import VideoStream +from imutils.video import FPS +import numpy as np +import argparse +import imutils +import time +import dlib +import cv2 + + + + + + +# import numpy as np +# import cv2 + +# cap = cv2.VideoCapture('recordings/test1.webm') + +# ret, frame1 = cap.read() +# ret, frame2 = cap.read() + +# while(cap.isOpened()): +# diff = cv2.absdiff(frame1, frame2) +# gray = cv2.cvtColor(diff, cv2.COLOR_BGR2GRAY) +# blur = cv2.GaussianBlur(gray, (5,5), 0) +# _, thresh = cv2.threshold(blur, 20, 255, cv2.THRESH_BINARY) +# dilated = cv2.dilate(thresh, None, iterations=3) +# contours, _ = cv2.findContours(dilated, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) + +# for contour in contours: +# (x, y, w, h) = cv2.boundingRect(contour) + +# if cv2.contourArea(contour) < 9000: +# continue +# cv2.rectangle(frame1, (x, y), (x+w, y+h), (0, 255, 0), 2) +# cv2.putText(frame1, "Status: {}".format('Movement'), (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2) + +# cv2.imshow("feed", frame1) +# frame1 = frame2 +# ret, frame2 = cap.read() + +# if cv2.waitKey(25) & 0xFF == ord('q'): +# break + +# cap.release() +# cv2.destroyAllWindows() + + + + +# import numpy as np +# import cv2 + +# cap = cv2.VideoCapture('recordings/test1.webm') + +# while(cap.isOpened()): +# # Capture frame-by-frame +# ret, frame = cap.read() + +# # Our operations on the frame come here +# gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) + +# # Display the resulting frame +# cv2.imshow('frame',gray) +# if cv2.waitKey(25) & 0xFF == ord('q'): +# break + +# # When everything done, release the capture +# cap.release() +# cv2.destroyAllWindows() + + + + + +# from imageai.Detection import VideoObjectDetection +# import os + +# execution_path = os.getcwd() + +# detector = VideoObjectDetection() +# detector.setModelTypeAsYOLOv3() +# detector.setModelPath( os.path.join(execution_path , "yolo.h5")) +# detector.loadModel() + +# video_path = detector.detectObjectsFromVideo(input_file_path=os.path.join( execution_path, "recordings/traffic-mini.mp4"), +# output_file_path=os.path.join(execution_path, "outputs/traffic_mini_detected_1") +# , frames_per_second=29, log_progress=True) +# print(video_path) diff --git a/trackableobject.py b/trackableobject.py new file mode 100644 index 0000000..6f4137e --- /dev/null +++ b/trackableobject.py @@ -0,0 +1,10 @@ +class TrackableObject: + def __init__(self, objectID, centroid): + # store the object ID, then initialize a list of centroids + # using the current centroid + self.objectID = objectID + self.centroids = [centroid] + + # initialize a boolean used to indicate if the object has + # already been counted or not + self.counted = False \ No newline at end of file