import streamlit as st from process_video import segment_video, classify from io import StringIO import cv2 as cv import tempfile import numpy as np import tensorflow as tf from crop_hand_skeleton import crop_hand from cvzone.HandTrackingModule import HandDetector if __name__ == "__main__": detector = HandDetector(maxHands=1, mode=True, detectionCon=0.7, minTrackCon=0.8) model = tf.keras.models.load_model('model_pred/VGG16_sign_char_detection_model') st.set_page_config( page_title="Projekt widzenie" ) st.title("Projekt rozpoznawanie liter z alfabetu znaków migowych z wideo") st.write('Załaduj film') upload_movie = st.file_uploader("Wybierz film", type=["mp4"]) if upload_movie: st.write("Film się ładuje.....") tfile = tempfile.NamedTemporaryFile(delete=False) tfile.write(upload_movie.read()) video_cap = cv.VideoCapture(tfile.name) result, num = segment_video(video_cap, fps=10) st.write(f"Załadowano {num} klatek") for img in result: img_skeleton = crop_hand(img, detector) img2= cv.resize(img_skeleton,dsize=(224,224)) #breakpoint() img_np = np.asarray(img2) classification = classify(img_np[:,:,::-1], model) st.image(img_skeleton[:,:,::-1]) st.write(classification)