import cv2 as cv import argparse import numpy as np import os.path import sys import random # Inicjalizacja parametrów confThreshold = 0.5 maskThreshold = 0.3 args = parser.parse_args() # Rysuje obrawmowanie zwierzęcia, koloruje i zaznacza maską def drawBox(frame, classId, conf, left, top, right, bottom, classMask): # obramowanie. cv.rectangle(frame, (left, top), (right, bottom), (255, 178, 50), 3) # etykieta obiektu label = '%.2f' % conf if classes: assert(classId < len(classes)) label = '%s:%s' % (classes[classId], label) # wyświetla etykietę labelSize, baseLine = cv.getTextSize(label, cv.FONT_HERSHEY_SIMPLEX, 0.5, 1) top = max(top, labelSize[1]) cv.rectangle(frame, (left, top - round(1.5*labelSize[1])), (left + round(1.5*labelSize[0]), top + baseLine), (255, 255, 255), cv.FILLED) cv.putText(frame, label, (left, top), cv.FONT_HERSHEY_SIMPLEX, 0.75, (0,0,0), 1) # zmiana rozmiaru maski i nałożenie na obiekt classMask = cv.resize(classMask, (right - left + 1, bottom - top + 1)) mask = (classMask > maskThreshold) roi = frame[top:bottom+1, left:right+1][mask] colorIndex = random.randint(0, len(colors)-1) color = colors[colorIndex] frame[top:bottom+1, left:right+1][mask] = ([0.3*color[0], 0.3*color[1], 0.3*color[2]] + 0.7 * roi).astype(np.uint8) # rysuje kontury na obrazie mask = mask.astype(np.uint8) im2, contours, hierarchy = cv.findContours(mask,cv.RETR_TREE,cv.CHAIN_APPROX_SIMPLE) cv.drawContours(frame[top:bottom+1, left:right+1], contours, -1, color, 3, cv.LINE_8, hierarchy, 100) # dla każdej ramki maskuje obraz def postprocess(boxes, masks): # N - liczba znalezionych obramowań # C - liczba klas # H,W- wysokość i szerokość numClasses = masks.shape[1] numDetections = boxes.shape[2] frameH = frame.shape[0] frameW = frame.shape[1] for i in range(numDetections): box = boxes[0, 0, i] mask = masks[i] score = box[2] if score > confThreshold: classId = int(box[1]) # zaznacza ramkę left = int(frameW * box[3]) top = int(frameH * box[4]) right = int(frameW * box[5]) bottom = int(frameH * box[6]) left = max(0, min(left, frameW - 1)) top = max(0, min(top, frameH - 1)) right = max(0, min(right, frameW - 1)) bottom = max(0, min(bottom, frameH - 1)) # aktywacja maski classMask = mask[classId] # rysuje wszystko na obrazie drawBox(frame, classId, score, left, top, right, bottom, classMask) # załaduj nazwy classesFile = "mscoco_labels.names"; classes = None with open(classesFile, 'rt') as f: classes = f.read().rstrip('\n').split('\n') # Give the textGraph and weight files for the model textGraph = "./mask.pbtxt"; modelWeights = "./mask/frozen_inference_graph.pb"; # Load the network net = cv.dnn.readNetFromTensorflow(modelWeights, textGraph); net.setPreferableBackend(cv.dnn.DNN_BACKEND_OPENCV) net.setPreferableTarget(cv.dnn.DNN_TARGET_CPU) # Load the classes colorsFile = "colors.txt"; with open(colorsFile, 'rt') as f: colorsStr = f.read().rstrip('\n').split('\n') colors = [] #[0,0,0] for i in range(len(colorsStr)): rgb = colorsStr[i].split(' ') color = np.array([float(rgb[0]), float(rgb[1]), float(rgb[2])]) colors.append(color) winName = 'Mask-RCNN Object detection and Segmentation in OpenCV' cv.namedWindow(winName, cv.WINDOW_NORMAL) outputFile = "mask_rcnn_out_py.avi" if (args.image): # Open the image file if not os.path.isfile(args.image): print("Input image file ", args.image, " doesn't exist") sys.exit(1) cap = cv.VideoCapture(args.image) outputFile = args.image[:-4]+'_mask_rcnn_out_py.jpg' elif (args.video): # Open the video file if not os.path.isfile(args.video): print("Input video file ", args.video, " doesn't exist") sys.exit(1) cap = cv.VideoCapture(args.video) outputFile = args.video[:-4]+'_mask_rcnn_out_py.avi' else: # Webcam input cap = cv.VideoCapture(0) # Get the video writer initialized to save the output video if (not args.image): vid_writer = cv.VideoWriter(outputFile, cv.VideoWriter_fourcc('M','J','P','G'), 28, (round(cap.get(cv.CAP_PROP_FRAME_WIDTH)),round(cap.get(cv.CAP_PROP_FRAME_HEIGHT)))) while cv.waitKey(1) < 0: # Get frame from the video hasFrame, frame = cap.read() # Stop the program if reached end of video if not hasFrame: print("Done processing !!!") print("Output file is stored as ", outputFile) cv.waitKey(3000) break # Create a 4D blob from a frame. blob = cv.dnn.blobFromImage(frame, swapRB=True, crop=False) # Set the input to the network net.setInput(blob) # Run the forward pass to get output from the output layers boxes, masks = net.forward(['detection_out_final', 'detection_masks']) # Extract the bounding box and mask for each of the detected objects postprocess(boxes, masks) # Put efficiency information. t, _ = net.getPerfProfile() label = 'Mask-RCNN on 2.5 GHz Intel Core i7 CPU, Inference time for a frame : %0.0f ms' % abs(t * 1000.0 / cv.getTickFrequency()) cv.putText(frame, label, (0, 15), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0)) # Write the frame with the detection boxes if (args.image): cv.imwrite(outputFile, frame.astype(np.uint8)); else: vid_writer.write(frame.astype(np.uint8)) cv.imshow(winName, frame)