{ "cells": [ { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [], "source": [ "import cv2\n", "import matplotlib.pyplot as plt\n", "import keras\n", "import numpy as np\n", "import threading\n", "import tensorflow as tf" ] }, { "cell_type": "code", "execution_count": 2, "metadata": {}, "outputs": [], "source": [ "def wrap_frozen_graph(graph_def, inputs, outputs, print_graph=False):\n", " def _imports_graph_def():\n", " tf.compat.v1.import_graph_def(graph_def, name=\"\")\n", "\n", " wrapped_import = tf.compat.v1.wrap_function(_imports_graph_def, [])\n", " import_graph = wrapped_import.graph\n", "\n", " if print_graph == True:\n", " print(\"-\" * 50)\n", " print(\"Frozen model layers: \")\n", " layers = [op.name for op in import_graph.get_operations()]\n", " for layer in layers:\n", " print(layer)\n", " print(\"-\" * 50)\n", "\n", " return wrapped_import.prune(\n", " tf.nest.map_structure(import_graph.as_graph_element, inputs),\n", " tf.nest.map_structure(import_graph.as_graph_element, outputs))" ] }, { "cell_type": "code", "execution_count": 3, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "--------------------------------------------------\n", "Frozen model layers: \n", "x\n", "sequential/conv2d/Conv2D/ReadVariableOp/resource\n", "sequential/conv2d/Conv2D/ReadVariableOp\n", "sequential/conv2d/Conv2D\n", "sequential/conv2d/BiasAdd/ReadVariableOp/resource\n", "sequential/conv2d/BiasAdd/ReadVariableOp\n", "sequential/conv2d/BiasAdd\n", "sequential/conv2d/Relu\n", "sequential/batch_normalization/ReadVariableOp/resource\n", "sequential/batch_normalization/ReadVariableOp\n", "sequential/batch_normalization/ReadVariableOp_1/resource\n", "sequential/batch_normalization/ReadVariableOp_1\n", "sequential/batch_normalization/FusedBatchNormV3/ReadVariableOp/resource\n", "sequential/batch_normalization/FusedBatchNormV3/ReadVariableOp\n", "sequential/batch_normalization/FusedBatchNormV3/ReadVariableOp_1/resource\n", "sequential/batch_normalization/FusedBatchNormV3/ReadVariableOp_1\n", "sequential/batch_normalization/FusedBatchNormV3\n", "sequential/max_pooling2d/MaxPool\n", "sequential/conv2d_1/Conv2D/ReadVariableOp/resource\n", "sequential/conv2d_1/Conv2D/ReadVariableOp\n", "sequential/conv2d_1/Conv2D\n", "sequential/conv2d_1/BiasAdd/ReadVariableOp/resource\n", "sequential/conv2d_1/BiasAdd/ReadVariableOp\n", "sequential/conv2d_1/BiasAdd\n", "sequential/conv2d_1/Relu\n", "sequential/batch_normalization_1/ReadVariableOp/resource\n", "sequential/batch_normalization_1/ReadVariableOp\n", "sequential/batch_normalization_1/ReadVariableOp_1/resource\n", "sequential/batch_normalization_1/ReadVariableOp_1\n", "sequential/batch_normalization_1/FusedBatchNormV3/ReadVariableOp/resource\n", "sequential/batch_normalization_1/FusedBatchNormV3/ReadVariableOp\n", "sequential/batch_normalization_1/FusedBatchNormV3/ReadVariableOp_1/resource\n", "sequential/batch_normalization_1/FusedBatchNormV3/ReadVariableOp_1\n", "sequential/batch_normalization_1/FusedBatchNormV3\n", "sequential/max_pooling2d_1/MaxPool\n", "sequential/conv2d_2/Conv2D/ReadVariableOp/resource\n", "sequential/conv2d_2/Conv2D/ReadVariableOp\n", "sequential/conv2d_2/Conv2D\n", "sequential/conv2d_2/BiasAdd/ReadVariableOp/resource\n", "sequential/conv2d_2/BiasAdd/ReadVariableOp\n", "sequential/conv2d_2/BiasAdd\n", "sequential/conv2d_2/Relu\n", "sequential/batch_normalization_2/ReadVariableOp/resource\n", "sequential/batch_normalization_2/ReadVariableOp\n", "sequential/batch_normalization_2/ReadVariableOp_1/resource\n", "sequential/batch_normalization_2/ReadVariableOp_1\n", "sequential/batch_normalization_2/FusedBatchNormV3/ReadVariableOp/resource\n", "sequential/batch_normalization_2/FusedBatchNormV3/ReadVariableOp\n", "sequential/batch_normalization_2/FusedBatchNormV3/ReadVariableOp_1/resource\n", "sequential/batch_normalization_2/FusedBatchNormV3/ReadVariableOp_1\n", "sequential/batch_normalization_2/FusedBatchNormV3\n", "sequential/conv2d_3/Conv2D/ReadVariableOp/resource\n", "sequential/conv2d_3/Conv2D/ReadVariableOp\n", "sequential/conv2d_3/Conv2D\n", "sequential/conv2d_3/BiasAdd/ReadVariableOp/resource\n", "sequential/conv2d_3/BiasAdd/ReadVariableOp\n", "sequential/conv2d_3/BiasAdd\n", "sequential/conv2d_3/Relu\n", "sequential/batch_normalization_3/ReadVariableOp/resource\n", "sequential/batch_normalization_3/ReadVariableOp\n", "sequential/batch_normalization_3/ReadVariableOp_1/resource\n", "sequential/batch_normalization_3/ReadVariableOp_1\n", "sequential/batch_normalization_3/FusedBatchNormV3/ReadVariableOp/resource\n", "sequential/batch_normalization_3/FusedBatchNormV3/ReadVariableOp\n", "sequential/batch_normalization_3/FusedBatchNormV3/ReadVariableOp_1/resource\n", "sequential/batch_normalization_3/FusedBatchNormV3/ReadVariableOp_1\n", "sequential/batch_normalization_3/FusedBatchNormV3\n", "sequential/conv2d_4/Conv2D/ReadVariableOp/resource\n", "sequential/conv2d_4/Conv2D/ReadVariableOp\n", "sequential/conv2d_4/Conv2D\n", "sequential/conv2d_4/BiasAdd/ReadVariableOp/resource\n", "sequential/conv2d_4/BiasAdd/ReadVariableOp\n", "sequential/conv2d_4/BiasAdd\n", "sequential/conv2d_4/Relu\n", "sequential/batch_normalization_4/ReadVariableOp/resource\n", "sequential/batch_normalization_4/ReadVariableOp\n", "sequential/batch_normalization_4/ReadVariableOp_1/resource\n", "sequential/batch_normalization_4/ReadVariableOp_1\n", "sequential/batch_normalization_4/FusedBatchNormV3/ReadVariableOp/resource\n", "sequential/batch_normalization_4/FusedBatchNormV3/ReadVariableOp\n", "sequential/batch_normalization_4/FusedBatchNormV3/ReadVariableOp_1/resource\n", "sequential/batch_normalization_4/FusedBatchNormV3/ReadVariableOp_1\n", "sequential/batch_normalization_4/FusedBatchNormV3\n", "sequential/max_pooling2d_2/MaxPool\n", "sequential/flatten/Const\n", "sequential/flatten/Reshape\n", "sequential/dense/MatMul/ReadVariableOp/resource\n", "sequential/dense/MatMul/ReadVariableOp\n", "sequential/dense/MatMul\n", "sequential/dense/BiasAdd/ReadVariableOp/resource\n", "sequential/dense/BiasAdd/ReadVariableOp\n", "sequential/dense/BiasAdd\n", "sequential/dense/Relu\n", "sequential/dense_1/MatMul/ReadVariableOp/resource\n", "sequential/dense_1/MatMul/ReadVariableOp\n", "sequential/dense_1/MatMul\n", "sequential/dense_1/BiasAdd/ReadVariableOp/resource\n", "sequential/dense_1/BiasAdd/ReadVariableOp\n", "sequential/dense_1/BiasAdd\n", "sequential/dense_1/Relu\n", "sequential/dense_2/MatMul/ReadVariableOp/resource\n", "sequential/dense_2/MatMul/ReadVariableOp\n", "sequential/dense_2/MatMul\n", "sequential/dense_2/BiasAdd/ReadVariableOp/resource\n", "sequential/dense_2/BiasAdd/ReadVariableOp\n", "sequential/dense_2/BiasAdd\n", "sequential/dense_2/Softmax\n", "NoOp\n", "Identity\n", "--------------------------------------------------\n" ] } ], "source": [ " # Load frozen graph using TensorFlow 1.x functions\n", "with tf.io.gfile.GFile(\"./frozen_models/frozen_graph2.pb\", \"rb\") as f:\n", " graph_def = tf.compat.v1.GraphDef()\n", " loaded = graph_def.ParseFromString(f.read())\n", "\n", "# Wrap frozen graph to ConcreteFunctions\n", "frozen_func = wrap_frozen_graph(graph_def=graph_def,\n", " inputs=[\"x:0\"],\n", " outputs=[\"Identity:0\"],\n", " print_graph=False)" ] }, { "cell_type": "code", "execution_count": 4, "metadata": {}, "outputs": [], "source": [ "cap = cv2.VideoCapture(\"rybki.mp4\")\n", "cap.set(cv2.CAP_PROP_FPS, 60)\n", "\n", "class_names=sorted(['Fish', \"Jellyfish\", 'Penguin', 'Puffin', 'Shark', 'Starfish', 'Stingray'])\n", "object_detector = cv2.createBackgroundSubtractorMOG2(history=100, varThreshold=50)\n", "\n", "\n", "width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) \n", "height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n", "fps = cap.get(cv2.CAP_PROP_FPS)\n", "out = cv2.VideoWriter('track_fish.avi', cv2.VideoWriter_fourcc(*'MJPG'), fps, (width, height))\n", "\n", "while True:\n", " ret, frame = cap.read()\n", " if(frame is not None):\n", " roi = frame[100: 900,330:1900]\n", " mask = object_detector.apply(roi)\n", " _, mask = cv2.threshold(mask,254,255, cv2.THRESH_BINARY)\n", " conturs, _ =cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n", "\n", " images = []\n", "\n", " for cnt in conturs:\n", " area = cv2.contourArea(cnt)\n", " if area > 200:\n", " #cv2.drawContours(roi,[cnt],-1,(0,255,0),2)\n", " x,y,w,h = cv2.boundingRect(cnt)\n", " rectangle = cv2.rectangle(roi,(x,y),(x+w,y+h),(0,255,0),3)\n", " image_to_predict = roi[y:y+h,x:x+w]\n", " image_to_predict = cv2.resize(image_to_predict,(227,227))\n", " # images.append((x,y,rectangle,np.expand_dims(image_to_predict,axis=0)))\n", " \n", " pred = frozen_func(x=tf.convert_to_tensor(image_to_predict[None, :], dtype='float32'))\n", " label = class_names[np.argmax(pred)]\n", " cv2.putText(rectangle, label, (x, y-10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (36,255,12), 1)\n", " # if images:\n", " # pred = model.predict(np.vstack([image[3] for image in images]))\n", " # labels = [class_names[np.argmax(pre)] for pre in pred]\n", " # for i,image in enumerate(images):\n", " roi = cv2.resize(roi, (960, 540)) \n", " cv2.imshow(\"roi\", roi)\n", "\n", " key = cv2.waitKey(30)\n", " if key == 27:\n", " break\n", "\n", " #out.write(frame)\n", " else:\n", " break\n", "\n", "\n", "#out.release()\n", "cap.release()\n", "cv2.destroyAllWindows()" ] }, { "cell_type": "code", "execution_count": 59, "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "um", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.9.15" }, "orig_nbformat": 4, "vscode": { "interpreter": { "hash": "876e189cbbe99a9a838ece62aae1013186c4bb7e0254a10cfa2f9b2381853efb" } } }, "nbformat": 4, "nbformat_minor": 2 }