Add object detection in the scene

This commit is contained in:
Katarzyna Kaczyńska 2020-01-29 14:23:32 +01:00
parent 5c71bfcd83
commit faa940ce78
3 changed files with 51 additions and 33 deletions

View File

@ -1,38 +1,77 @@
import numpy as np
# import numpy as np
import cv2
cap = cv2.VideoCapture('recordings/test.webm')
cap = cv2.VideoCapture('recordings/test1.webm')
ret, frame1 = cap.read()
ret, frame2 = cap.read()
while(cap.isOpened()):
ret, frame = cap.read()
diff = cv2.absdiff(frame1, frame2)
gray = cv2.cvtColor(diff, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (5,5), 0)
_, thresh = cv2.threshold(blur, 20, 255, cv2.THRESH_BINARY)
dilated = cv2.dilate(thresh, None, iterations=3)
contours, _ = cv2.findContours(dilated, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
for contour in contours:
(x, y, w, h) = cv2.boundingRect(contour)
if cv2.contourArea(contour) < 9000:
continue
cv2.rectangle(frame1, (x, y), (x+w, y+h), (0, 255, 0), 2)
cv2.putText(frame1, "Status: {}".format('Movement'), (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
cv2.imshow("feed", frame1)
frame1 = frame2
ret, frame2 = cap.read()
cv2.imshow('frame',gray)
if cv2.waitKey(25) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
# import numpy as np
# import cv2
# cap = cv2.VideoCapture(0)
# cap = cv2.VideoCapture('recordings/test1.webm')
# while(True):
# while(cap.isOpened()):
# # Capture frame-by-frame
# ret, frame = cap.read()
# # Our operations on the frame come here
# gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# # Display the resulting frame
# cv2.imshow('frame',frame)
# cv2.imshow('gray',gray)
# if cv2.waitKey(20) & 0xFF == ord('q'):
# cv2.imshow('frame',gray)
# if cv2.waitKey(25) & 0xFF == ord('q'):
# break
# # When everything done, release the capture
# cap.release()
# cv2.destroyAllWindows()
# cv2.destroyAllWindows()
# from imageai.Detection import VideoObjectDetection
# import os
# execution_path = os.getcwd()
# detector = VideoObjectDetection()
# detector.setModelTypeAsYOLOv3()
# detector.setModelPath( os.path.join(execution_path , "yolo.h5"))
# detector.loadModel()
# video_path = detector.detectObjectsFromVideo(input_file_path=os.path.join( execution_path, "recordings/traffic-mini.mp4"),
# output_file_path=os.path.join(execution_path, "outputs/traffic_mini_detected_1")
# , frames_per_second=29, log_progress=True)
# print(video_path)

View File

@ -1,21 +0,0 @@
import numpy as np
import cv2
cap = cv2.VideoCapture(0)
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
# Our operations on the frame come here
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Display the resulting frame
cv2.imshow('frame',frame)
cv2.imshow('gray',gray)
if cv2.waitKey(20) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()