widzenie-komputerowe-projekt/rybki.ipynb
2023-02-01 10:23:06 +01:00

6.7 KiB
Raw Blame History

import cv2
import matplotlib.pyplot as plt
import keras
import numpy as np
import threading
import tensorflow as tf
def wrap_frozen_graph(graph_def, inputs, outputs, print_graph=False):
    def _imports_graph_def():
        tf.compat.v1.import_graph_def(graph_def, name="")

    wrapped_import = tf.compat.v1.wrap_function(_imports_graph_def, [])
    import_graph = wrapped_import.graph

    if print_graph == True:
        print("-" * 50)
        print("Frozen model layers: ")
        layers = [op.name for op in import_graph.get_operations()]
        for layer in layers:
            print(layer)
        print("-" * 50)

    return wrapped_import.prune(
        tf.nest.map_structure(import_graph.as_graph_element, inputs),
        tf.nest.map_structure(import_graph.as_graph_element, outputs))
   # Load frozen graph using TensorFlow 1.x functions
with tf.io.gfile.GFile("./frozen_models/frozen_graph_best_vgg.pb", "rb") as f:
    graph_def = tf.compat.v1.GraphDef()
    loaded = graph_def.ParseFromString(f.read())

# Wrap frozen graph to ConcreteFunctions
frozen_func = wrap_frozen_graph(graph_def=graph_def,
                                inputs=["x:0"],
                                outputs=["Identity:0"],
                                print_graph=False)
class_names=sorted(['Fish', "Jellyfish", 'Lionfish', 'Shark', 'Stingray', 'Turtle'])
a = cv2.imread('test.PNG')
# a.shape
a = cv2.resize(a,(227,227))
pred = frozen_func(x=tf.convert_to_tensor(a[None, :], dtype='float32'))
label = class_names[np.argmax(pred)]
label
'Jellyfish'
cap = cv2.VideoCapture("rybki4.mp4")
# cap.set(cv2.CAP_PROP_FPS, 60)

class_names=sorted(['Fish', "Jellyfish", 'Lionfish', 'Shark', 'Stingray', 'Turtle'])
object_detector = cv2.createBackgroundSubtractorMOG2(history=100, varThreshold=50)


# width  = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) 
# height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
# fps = cap.get(cv2.CAP_PROP_FPS)
# out = cv2.VideoWriter('track_fish.avi', cv2.VideoWriter_fourcc(*'MJPG'), fps, (width, height))

while True:
    ret, frame = cap.read()
    if(ret):
        roi = frame[100: 900,330:1900]
        mask = object_detector.apply(roi)
        _, mask = cv2.threshold(mask,254,255, cv2.THRESH_BINARY)
        conturs, _ =cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)

        images = []
        for cnt in conturs:
            area = cv2.contourArea(cnt)
            if area > 300:
                #cv2.drawContours(roi,[cnt],-1,(0,255,0),2)
                x,y,w,h = cv2.boundingRect(cnt)
                rectangle = cv2.rectangle(roi,(x,y),(x+w,y+h),(0,255,0),3)
                # images.append((x,y,rectangle,np.expand_dims(image_to_predict,axis=0)))
                # image_to_predict = roi[y:y+h,x:x+w]
                # image_to_predict = cv2.resize(image_to_predict,(227,227))
                # pred = frozen_func(x=tf.convert_to_tensor(image_to_predict[None, :], dtype='float32'))
                # label = class_names[np.argmax(pred)]
                # cv2.putText(rectangle, label, (x, y-10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (36,255,12), 1)
                
        # if images:
        #     pred = model.predict(np.vstack([image[3] for image in images]))
        #     labels = [class_names[np.argmax(pre)] for pre in pred]
        #     for i,image in enumerate(images):
        roi = cv2.resize(roi, (960, 540)) 
        cv2.imshow("roi", roi)

        key = cv2.waitKey(30)
        if key == 27:
            break

        #out.write(frame)
    else:
        break


#out.release()
cap.release()
cv2.destroyAllWindows()
---------------------------------------------------------------------------
KeyboardInterrupt                         Traceback (most recent call last)
Cell In[51], line 42
     39 roi = cv2.resize(roi, (960, 540)) 
     40 cv2.imshow("roi", roi)
---> 42 key = cv2.waitKey(30)
     43 if key == 27:
     44     break

KeyboardInterrupt: