diff --git a/camera-test.py b/camera-test.py index 6688b30..3a33cf8 100644 --- a/camera-test.py +++ b/camera-test.py @@ -1,38 +1,77 @@ -import numpy as np +# import numpy as np import cv2 -cap = cv2.VideoCapture('recordings/test.webm') +cap = cv2.VideoCapture('recordings/test1.webm') + +ret, frame1 = cap.read() +ret, frame2 = cap.read() while(cap.isOpened()): - ret, frame = cap.read() + diff = cv2.absdiff(frame1, frame2) + gray = cv2.cvtColor(diff, cv2.COLOR_BGR2GRAY) + blur = cv2.GaussianBlur(gray, (5,5), 0) + _, thresh = cv2.threshold(blur, 20, 255, cv2.THRESH_BINARY) + dilated = cv2.dilate(thresh, None, iterations=3) + contours, _ = cv2.findContours(dilated, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) - gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) + for contour in contours: + (x, y, w, h) = cv2.boundingRect(contour) + + if cv2.contourArea(contour) < 9000: + continue + cv2.rectangle(frame1, (x, y), (x+w, y+h), (0, 255, 0), 2) + cv2.putText(frame1, "Status: {}".format('Movement'), (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2) + + cv2.imshow("feed", frame1) + frame1 = frame2 + ret, frame2 = cap.read() - cv2.imshow('frame',gray) if cv2.waitKey(25) & 0xFF == ord('q'): break +# When everything done, release the capture cap.release() cv2.destroyAllWindows() + + + # import numpy as np # import cv2 -# cap = cv2.VideoCapture(0) +# cap = cv2.VideoCapture('recordings/test1.webm') -# while(True): +# while(cap.isOpened()): # # Capture frame-by-frame # ret, frame = cap.read() # # Our operations on the frame come here # gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) - + # # Display the resulting frame -# cv2.imshow('frame',frame) -# cv2.imshow('gray',gray) -# if cv2.waitKey(20) & 0xFF == ord('q'): +# cv2.imshow('frame',gray) +# if cv2.waitKey(25) & 0xFF == ord('q'): # break # # When everything done, release the capture # cap.release() -# cv2.destroyAllWindows() \ No newline at end of file +# cv2.destroyAllWindows() + + + + + +# from imageai.Detection import VideoObjectDetection +# import os + +# execution_path = os.getcwd() + +# detector = VideoObjectDetection() +# detector.setModelTypeAsYOLOv3() +# detector.setModelPath( os.path.join(execution_path , "yolo.h5")) +# detector.loadModel() + +# video_path = detector.detectObjectsFromVideo(input_file_path=os.path.join( execution_path, "recordings/traffic-mini.mp4"), +# output_file_path=os.path.join(execution_path, "outputs/traffic_mini_detected_1") +# , frames_per_second=29, log_progress=True) +# print(video_path) diff --git a/camera-test2.py b/camera-test2.py deleted file mode 100644 index 2d3f3f6..0000000 --- a/camera-test2.py +++ /dev/null @@ -1,21 +0,0 @@ -import numpy as np -import cv2 - -cap = cv2.VideoCapture(0) - -while(True): - # Capture frame-by-frame - ret, frame = cap.read() - - # Our operations on the frame come here - gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) - - # Display the resulting frame - cv2.imshow('frame',frame) - cv2.imshow('gray',gray) - if cv2.waitKey(20) & 0xFF == ord('q'): - break - -# When everything done, release the capture -cap.release() -cv2.destroyAllWindows() \ No newline at end of file diff --git a/recordings/test.webm b/recordings/test1.webm similarity index 100% rename from recordings/test.webm rename to recordings/test1.webm