SMART-62 first version of project view - capturing video from camera and displaying fingers info

This commit is contained in:
s460930 2021-01-07 20:25:38 +01:00
parent 1116f84ca0
commit 1fc537a36c
8 changed files with 225 additions and 71 deletions

View File

@ -0,0 +1,52 @@
import cv2
import mediapipe as mp
import gestures.simple_gestures_lib as sgest
class MyVideoCapture:
def __init__(self):
# Open the video source
self.vid = cv2.VideoCapture(0)
self.vid.set(3, 300)
self.vid.set(4, 150)
self.mp_drawing = mp.solutions.drawing_utils
self.mp_hands = mp.solutions.hands
self.hands = self.mp_hands.Hands(
min_detection_confidence=0.5, min_tracking_confidence=0.5)
if not self.vid.isOpened():
raise ValueError("Unable to open video source", 0)
# Get video source width and height
self.width = self.vid.get(cv2.CAP_PROP_FRAME_WIDTH)
self.height = self.vid.get(cv2.CAP_PROP_FRAME_HEIGHT)
def get_frame(self):
if self.vid.isOpened():
success, image = self.vid.read()
image = cv2.cvtColor(cv2.flip(image, 1), cv2.COLOR_BGR2RGB)
image.flags.writeable = False
results = self.hands.process(image)
image.flags.writeable = True
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
if results.multi_hand_landmarks:
for hand_landmarks in results.multi_hand_landmarks:
self.mp_drawing.draw_landmarks(
image, hand_landmarks, self.mp_hands.HAND_CONNECTIONS)
# if cv2.waitKey(33) == ord('s'):
fingers = {'index': '', 'middle': '', 'ring': '', 'pinky': ''}
if results.multi_hand_landmarks:
for hand_landmarks in results.multi_hand_landmarks:
fingers['index'] = sgest.check_index_finger(hand_landmarks)
fingers['middle'] = sgest.check_middle_finger(hand_landmarks)
fingers['ring'] = sgest.check_ring_finger(hand_landmarks)
fingers['pinky'] = sgest.check_pinky_finger(hand_landmarks)
return fingers, image, success
# Release the video source when the object is destroyed
def release(self):
print('My video capture delete')
if self.vid.isOpened():
self.vid.release()
self.hands.close()

78
client/test.py Normal file
View File

@ -0,0 +1,78 @@
import PIL.Image
import PIL.ImageTk
import cv2
import time
import tkinter
class App:
def __init__(self, window, window_title, video_source=0):
self.window = window
self.window.title(window_title)
self.video_source = video_source
# open video source (by default this will try to open the computer webcam)
self.vid = MyVideoCapture(self.video_source)
# Create a canvas that can fit the above video source size
self.canvas = tkinter.Canvas(window, width=self.vid.width, height=self.vid.height)
self.canvas.pack()
# Button that lets the user take a snapshot
self.btn_snapshot = tkinter.Button(window, text="Snapshot", width=50, command=self.snapshot)
self.btn_snapshot.pack(anchor=tkinter.CENTER, expand=True)
# After it is called once, the update method will be automatically called every delay milliseconds
self.delay = 15
self.update()
self.window.mainloop()
def snapshot(self):
# Get a frame from the video source
ret, frame = self.vid.get_frame()
if ret:
cv2.imwrite("frame-" + time.strftime("%d-%m-%Y-%H-%M-%S") + ".jpg", cv2.cvtColor(frame, cv2.COLOR_RGB2BGR))
def update(self):
# Get a frame from the video source
ret, frame = self.vid.get_frame()
if ret:
self.photo = PIL.ImageTk.PhotoImage(image=PIL.Image.fromarray(frame))
self.canvas.create_image(0, 0, image=self.photo, anchor=tkinter.NW)
self.window.after(self.delay, self.update)
class MyVideoCapture:
def __init__(self, video_source=0):
# Open the video source
self.vid = cv2.VideoCapture(video_source)
if not self.vid.isOpened():
raise ValueError("Unable to open video source", video_source)
# Get video source width and height
self.width = self.vid.get(cv2.CAP_PROP_FRAME_WIDTH)
self.height = self.vid.get(cv2.CAP_PROP_FRAME_HEIGHT)
def get_frame(self):
if self.vid.isOpened():
ret, frame = self.vid.read()
if ret:
# Return a boolean success flag and the current frame converted to BGR
return (ret, cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
else:
return (ret, None)
else:
return (ret, None)
# Release the video source when the object is destroyed
def __del__(self):
if self.vid.isOpened():
self.vid.release()
App(tkinter.Tk(), "Tkinter and OpenCV")

33
client/test2.py Normal file
View File

@ -0,0 +1,33 @@
import cv2
import tkinter as tk
from PIL import Image, ImageTk
class MainWindow():
def __init__(self, window, cap):
self.window = window
self.cap = cap
self.width = self.cap.get(cv2.CAP_PROP_FRAME_WIDTH)
self.height = self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
self.interval = 20 # Interval in ms to get the latest frame
# Create canvas for image
self.canvas = tk.Canvas(self.window, width=self.width, height=self.height)
self.canvas.grid(row=0, column=0)
# Update image on canvas
self.update_image()
def update_image(self):
# Get the latest frame and convert image format
self.image = cv2.cvtColor(self.cap.read()[1], cv2.COLOR_BGR2RGB) # to RGB
self.image = Image.fromarray(self.image) # to PIL format
self.image = ImageTk.PhotoImage(self.image) # to ImageTk format
# Update image
self.canvas.create_image(0, 0, anchor=tk.NW, image=self.image)
# Repeat every 'interval' ms
self.window.after(self.interval, self.update_image)
if __name__ == "__main__":
root = tk.Tk()
MainWindow(root, cv2.VideoCapture(0))
root.mainloop()

View File

@ -16,12 +16,14 @@ class LoginView(tk.Frame, AbstractView):
label1.pack()
input1 = tk.Entry(self)
input1.insert(0, 'test@test.pl')
input1.pack()
label2 = tk.Label(self, text='Password:', font=FONT)
label2.pack()
input2 = tk.Entry(self, show="*")
input2.insert(0, 'test123')
input2.pack()
button = tk.Button(self, text="Login", font=FONT,

View File

@ -1,6 +1,9 @@
import PIL.Image
import PIL.ImageTk
import tkinter as tk
from constants import PROJECT_VIEW_NAME
from constants import PROJECT_VIEW_NAME, FONT, PROJECTS_VIEW_NAME
from gestures.gesture_recognition import MyVideoCapture
from views.abstract_view import AbstractView
@ -9,42 +12,66 @@ class ProjectView(tk.Frame, AbstractView):
def __init__(self, parent, controller, main_view_controller):
tk.Frame.__init__(self, parent)
self.token = ''
# label0 = tk.Label(self, text='Project name:', font=FONT)
# label0.pack()
#
# input0 = tk.Entry(self)
# input0.pack()
#
# button_add = tk.Button(self, text="Confirm and add", font=FONT,
# command=lambda: self.add_project(main_view_controller, input0.get()))
# button_add.pack()
#
# button_back = tk.Button(self, text="Back", font=FONT,
# command=lambda: main_view_controller.show_frame(ProjectsView))
# button_back.pack()
self.window = controller
self.main_view_controller = main_view_controller
self.delay = 20
self.vid = None
self.canvas = None
self.index_label = None
self.middle_label = None
self.ring_label = None
self.pinky_label = None
self.back_button = None
@staticmethod
def get_view_name() -> str:
return PROJECT_VIEW_NAME
def start(self):
# tutaj pobieranie info o projekcie
print("ok")
self.vid = MyVideoCapture()
# def add_project(self, controller, project_name):
# headers = {'Authorization': 'Bearer ' + self.token}
# data = {
# "name": str(project_name)
# }
# print(data)
# response = requests.post(URL_PROJECTS, json=data, headers=headers)
# print(response)
# if response.status_code == 201:
# response = response.json()
# controller.show_frame(ProjectsView)
# else:
# print("sth wrong")
# bad_pass_label = tk.Label(self, text='Something went wrong!', font=FONT)
# bad_pass_label.pack()
# return ()
self.canvas = tk.Canvas(self, width=self.vid.width, height=self.vid.height)
self.canvas.pack()
self.index_label = tk.Label(self, font=FONT)
self.index_label.pack(anchor=tk.CENTER, expand=True)
self.middle_label = tk.Label(self, font=FONT)
self.middle_label.pack(anchor=tk.CENTER, expand=True)
self.ring_label = tk.Label(self, font=FONT)
self.ring_label.pack(anchor=tk.CENTER, expand=True)
self.pinky_label = tk.Label(self, font=FONT)
self.pinky_label.pack(anchor=tk.CENTER, expand=True)
self.back_button = tk.Button(self, text="Back", font=FONT, command=lambda: self.back_to_projects_view())
self.back_button.pack()
self.update()
def update(self):
# Get a frame from the video source
if self.vid is not None:
fingers, frame, success = self.vid.get_frame()
self.index_label['text'] = fingers['index']
self.middle_label['text'] = fingers['middle']
self.ring_label['text'] = fingers['ring']
self.pinky_label['text'] = fingers['pinky']
if success:
self.photo = PIL.ImageTk.PhotoImage(image=PIL.Image.fromarray(frame))
self.canvas.create_image(0, 0, image=self.photo, anchor=tk.NW)
self.window.after(self.delay, self.update)
def back_to_projects_view(self):
self.main_view_controller.show_frame(PROJECTS_VIEW_NAME)
self.vid.release()
self.destroy_components()
def destroy_components(self):
self.vid = None
self.canvas.destroy()
self.index_label.destroy()
self.middle_label.destroy()
self.ring_label.destroy()
self.pinky_label.destroy()
self.back_button.destroy()

View File

@ -1,38 +0,0 @@
import cv2
import mediapipe as mp
import simple_gestures_lib as sgest
mp_drawing = mp.solutions.drawing_utils
mp_hands = mp.solutions.hands
from math import sqrt
hands = mp_hands.Hands(
min_detection_confidence=0.5, min_tracking_confidence=0.5)
cap = cv2.VideoCapture(0)
while cap.isOpened():
success, image = cap.read()
image = cv2.cvtColor(cv2.flip(image, 1), cv2.COLOR_BGR2RGB)
image.flags.writeable = False
results = hands.process(image)
image.flags.writeable = True
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
if results.multi_hand_landmarks:
for hand_landmarks in results.multi_hand_landmarks:
mp_drawing.draw_landmarks(
image, hand_landmarks, mp_hands.HAND_CONNECTIONS)
if cv2.waitKey(33) == ord('s'):
if results.multi_hand_landmarks:
for hand_landmarks in results.multi_hand_landmarks:
print(sgest.check_index_finger(hand_landmarks))
print(sgest.check_middle_finger(hand_landmarks))
print(sgest.check_ring_finger(hand_landmarks))
print(sgest.check_pinky_finger(hand_landmarks))
cv2.imshow('MediaPipe Hands', image)
if cv2.waitKey(5) & 0xFF == 27:
break
hands.close()
cap.release()

Binary file not shown.