Pracownia-programowania/project.py

77 lines
2.0 KiB
Python

import numpy as np
import cv2
cap = cv2.VideoCapture('recordings/test1.webm')
ret, frame1 = cap.read()
ret, frame2 = cap.read()
while(cap.isOpened()):
diff = cv2.absdiff(frame1, frame2)
gray = cv2.cvtColor(diff, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (5,5), 0)
_, thresh = cv2.threshold(blur, 20, 255, cv2.THRESH_BINARY)
dilated = cv2.dilate(thresh, None, iterations=3)
contours, _ = cv2.findContours(dilated, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for contour in contours:
(x, y, w, h) = cv2.boundingRect(contour)
if cv2.contourArea(contour) < 9000:
continue
cv2.rectangle(frame1, (x, y), (x+w, y+h), (0, 255, 0), 2)
cv2.putText(frame1, "Status: {}".format('Movement'), (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
cv2.imshow("feed", frame1)
frame1 = frame2
ret, frame2 = cap.read()
if cv2.waitKey(25) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
# import numpy as np
# import cv2
# cap = cv2.VideoCapture('recordings/test1.webm')
# while(cap.isOpened()):
# # Capture frame-by-frame
# ret, frame = cap.read()
# # Our operations on the frame come here
# gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# # Display the resulting frame
# cv2.imshow('frame',gray)
# if cv2.waitKey(25) & 0xFF == ord('q'):
# break
# # When everything done, release the capture
# cap.release()
# cv2.destroyAllWindows()
# from imageai.Detection import VideoObjectDetection
# import os
# execution_path = os.getcwd()
# detector = VideoObjectDetection()
# detector.setModelTypeAsYOLOv3()
# detector.setModelPath( os.path.join(execution_path , "yolo.h5"))
# detector.loadModel()
# video_path = detector.detectObjectsFromVideo(input_file_path=os.path.join( execution_path, "recordings/traffic-mini.mp4"),
# output_file_path=os.path.join(execution_path, "outputs/traffic_mini_detected_1")
# , frames_per_second=29, log_progress=True)
# print(video_path)