projekt_widzenie/training_model/projekt_widzenie.py

132 lines
2.9 KiB
Python
Raw Normal View History

2023-01-21 16:22:16 +01:00
# %% [markdown]
# # Import the required libraries
# %%
import pandas as pd
import numpy as np
import tensorflow as tf
# %% [markdown]
# # Preprocessing the image data
# %%
#taking the train validation ratio as 4:1
# %%
batch_size=32
img_height=256
img_width=256
train_ds = tf.keras.utils.image_dataset_from_directory(
"asl_alphabet_train/asl_alphabet_train/",
validation_split=0.2,
subset="training",
seed=123,
image_size=(img_height, img_width),
batch_size=batch_size)
# %%
test_ds = tf.keras.utils.image_dataset_from_directory(
"asl_alphabet_train/asl_alphabet_train/",
validation_split=0.2,
subset="validation",
seed=123,
image_size=(img_height, img_width),
batch_size=batch_size)
# %%
class_names = train_ds.class_names
print("Class names:",class_names)
print("Total classes:",len(class_names))
# %%
#random samples of images from the train data
# %%
import matplotlib.pyplot as plt
plt.figure(figsize=(10, 10))
for images, labels in train_ds.take(1):
for i in range(29):
ax = plt.subplot(6,5 , i + 1)
plt.imshow(images[i].numpy().astype("uint8"))
plt.title(class_names[labels[i]])
plt.axis("off")
# %% [markdown]
# # Modelling and training
# %%
#modelling
from tensorflow.keras import Sequential
from tensorflow.keras import layers
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense
model = Sequential([
layers.Rescaling(1./255, input_shape=(img_height, img_width, 3)),
layers.Conv2D(16, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Conv2D(32, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Conv2D(64, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Flatten(),
layers.Dense(128, activation='relu'),
layers.Dense(29,activation='softmax')
])
# %%
model.summary()
# %%
model.compile(loss='sparse_categorical_crossentropy',optimizer='adam',metrics=['accuracy'])
# %%
model.fit(train_ds, batch_size=128,validation_batch_size=128, validation_data=test_ds,epochs=20)
model.save('sign_car_detection_model')
# %% [markdown]
# # Prediction on the test data
# %%
import os
# Get the list of all files and directories
path = "asl_alphabet_test/asl_alphabet_test/"
dir_list = os.listdir(path)
print(dir_list)
# %%
from sklearn.preprocessing import OneHotEncoder, LabelEncoder
tf.keras.utils.load_img
actual=[]
pred=[]
for i in dir_list:
actual.append(i.split('_')[0])
test_image = tf.keras.utils.load_img('asl_alphabet_test/asl_alphabet_test/'+i, target_size = (256, 256))
test_image = tf.keras.utils.img_to_array(test_image)
test_image = np.expand_dims(test_image, axis = 0)
result = model.predict(test_image)
pred.append(class_names[np.argmax(result)])
# %%
from sklearn.metrics import confusion_matrix, classification_report
from sklearn.metrics import accuracy_score
print("Test accuracy=",accuracy_score(pred,actual))
print("Classification report:\n",classification_report(pred,actual))
# %%