332 KiB
332 KiB
! pip install -q kaggle
from google.colab import files
# run and paste your kaggle.json file (api key)
files.upload()
! mkdir ~/.kaggle
! cp kaggle.json ~/.kaggle/
! chmod 600 ~/.kaggle/kaggle.json
! kaggle datasets list
! kaggle datasets download -d jonathanoheix/face-expression-recognition-dataset
Downloading face-expression-recognition-dataset.zip to /content 93% 112M/121M [00:00<00:00, 217MB/s] 100% 121M/121M [00:00<00:00, 217MB/s]
Load Data
# importing required modules
from zipfile import ZipFile
# specifying the zip file name
file_name = "/content/face-expression-recognition-dataset.zip"
# opening the zip file in READ mode
with ZipFile(file_name, 'r') as zip:
# printing all the contents of the zip file
zip.printdir()
# extracting all the files
print('Extracting all the files now...')
zip.extractall()
print('Done!')
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
import matplotlib.pyplot as plt
import seaborn as sns
from tensorflow.keras.utils import load_img, img_to_array
from keras.preprocessing.image import ImageDataGenerator
from keras.layers import Dense, Input, Dropout, GlobalAveragePooling2D
from keras.layers import Flatten
from keras.layers import Conv2D
from keras.layers import BatchNormalization
from keras.layers import Activation
from keras.layers import MaxPooling2D
from keras.models import Model, Sequential
#from tensorflow.python.keras.optimizer_v2.adam import Adam
#from keras.optimizers import adam_v2
from tensorflow.keras.optimizers import RMSprop
#from keras.optimizers import rmsprop
from tensorflow.keras.optimizers import SGD
from tensorflow.keras.optimizers import Adam
#, SGD,RMSpro
picture_size = 64
folder_path = "/content/images/"
expression = 'happy'
plt.figure(figsize= (12,12))
for i in range(1, 10, 1):
plt.subplot(3,3,i)
img = load_img(folder_path+"train/"+expression+"/"+
os.listdir(folder_path + "train/" + expression)[i], target_size=(picture_size, picture_size))
plt.imshow(img)
plt.show()
Image augmentation using keras ImageDataGenerator
batch_size = 128
datagen_train = ImageDataGenerator()
datagen_val = ImageDataGenerator()
train_set = datagen_train.flow_from_directory(folder_path+"train",
target_size = (picture_size,picture_size),
color_mode = "grayscale",
batch_size=batch_size,
class_mode='categorical',
shuffle=True)
test_set = datagen_val.flow_from_directory(folder_path+"validation",
target_size = (picture_size,picture_size),
color_mode = "grayscale",
batch_size=batch_size,
class_mode='categorical',
shuffle=False)
Found 28821 images belonging to 7 classes. Found 7066 images belonging to 7 classes.
Define model
no_of_classes = 7
model = Sequential()
#1st CNN layer
model.add(Conv2D(64,(3,3),padding = 'same',input_shape = (picture_size,picture_size,1)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size = (2,2)))
model.add(Dropout(0.25))
#2nd CNN layer
model.add(Conv2D(128,(5,5),padding = 'same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size = (2,2)))
model.add(Dropout (0.25))
#3rd CNN layer
model.add(Conv2D(512,(3,3),padding = 'same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size = (2,2)))
model.add(Dropout (0.25))
#4th CNN layer
model.add(Conv2D(512,(3,3), padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
#Fully connected 1st layer
model.add(Dense(256))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.25))
# Fully connected layer 2nd layer
model.add(Dense(512))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.25))
model.add(Dense(no_of_classes, activation='softmax'))
opt = Adam(lr = 0.0001)
model.compile(optimizer=opt,loss='categorical_crossentropy', metrics=['accuracy'])
model.summary()
Model: "sequential_9" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d_36 (Conv2D) (None, 64, 64, 64) 640 batch_normalization_54 (Bat (None, 64, 64, 64) 256 chNormalization) activation_54 (Activation) (None, 64, 64, 64) 0 max_pooling2d_36 (MaxPoolin (None, 32, 32, 64) 0 g2D) dropout_54 (Dropout) (None, 32, 32, 64) 0 conv2d_37 (Conv2D) (None, 32, 32, 128) 204928 batch_normalization_55 (Bat (None, 32, 32, 128) 512 chNormalization) activation_55 (Activation) (None, 32, 32, 128) 0 max_pooling2d_37 (MaxPoolin (None, 16, 16, 128) 0 g2D) dropout_55 (Dropout) (None, 16, 16, 128) 0 conv2d_38 (Conv2D) (None, 16, 16, 512) 590336 batch_normalization_56 (Bat (None, 16, 16, 512) 2048 chNormalization) activation_56 (Activation) (None, 16, 16, 512) 0 max_pooling2d_38 (MaxPoolin (None, 8, 8, 512) 0 g2D) dropout_56 (Dropout) (None, 8, 8, 512) 0 conv2d_39 (Conv2D) (None, 8, 8, 512) 2359808 batch_normalization_57 (Bat (None, 8, 8, 512) 2048 chNormalization) activation_57 (Activation) (None, 8, 8, 512) 0 max_pooling2d_39 (MaxPoolin (None, 4, 4, 512) 0 g2D) dropout_57 (Dropout) (None, 4, 4, 512) 0 flatten_9 (Flatten) (None, 8192) 0 dense_27 (Dense) (None, 256) 2097408 batch_normalization_58 (Bat (None, 256) 1024 chNormalization) activation_58 (Activation) (None, 256) 0 dropout_58 (Dropout) (None, 256) 0 dense_28 (Dense) (None, 512) 131584 batch_normalization_59 (Bat (None, 512) 2048 chNormalization) activation_59 (Activation) (None, 512) 0 dropout_59 (Dropout) (None, 512) 0 dense_29 (Dense) (None, 7) 3591 ================================================================= Total params: 5,396,231 Trainable params: 5,392,263 Non-trainable params: 3,968 _________________________________________________________________
from keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau
checkpoint = ModelCheckpoint("./model_weights.h5", monitor='val_accuracy', verbose=1, save_best_only=True, mode='max')
# early_stopping = EarlyStopping(monitor='val_loss',
# min_delta=0,
# patience=3,
# verbose=1,
# restore_best_weights=True
# )
reduce_learningrate = ReduceLROnPlateau(monitor='val_loss',
factor=0.2,
patience=3,
verbose=1,
min_delta=0.0001)
# callbacks_list = [early_stopping,checkpoint,reduce_learningrate]
callbacks_list = [checkpoint,reduce_learningrate]
epochs = 25
model.compile(loss='categorical_crossentropy',
optimizer = Adam(lr=0.001),
metrics=['accuracy'])
Train model
history = model.fit(train_set,
steps_per_epoch=train_set.n//train_set.batch_size,
epochs=epochs,
validation_data = test_set,
validation_steps = test_set.n//test_set.batch_size,
callbacks=callbacks_list
)
Epoch 1/25 225/225 [==============================] - ETA: 0s - loss: 1.7601 - accuracy: 0.3258 Epoch 1: val_accuracy improved from -inf to 0.38281, saving model to ./model_weights.h5 225/225 [==============================] - 22s 94ms/step - loss: 1.7601 - accuracy: 0.3258 - val_loss: 1.6382 - val_accuracy: 0.3828 - lr: 0.0010 Epoch 2/25 225/225 [==============================] - ETA: 0s - loss: 1.4146 - accuracy: 0.4591 Epoch 2: val_accuracy improved from 0.38281 to 0.45355, saving model to ./model_weights.h5 225/225 [==============================] - 20s 91ms/step - loss: 1.4146 - accuracy: 0.4591 - val_loss: 1.4391 - val_accuracy: 0.4536 - lr: 0.0010 Epoch 3/25 225/225 [==============================] - ETA: 0s - loss: 1.2636 - accuracy: 0.5179 Epoch 3: val_accuracy improved from 0.45355 to 0.50767, saving model to ./model_weights.h5 225/225 [==============================] - 21s 93ms/step - loss: 1.2636 - accuracy: 0.5179 - val_loss: 1.3042 - val_accuracy: 0.5077 - lr: 0.0010 Epoch 4/25 225/225 [==============================] - ETA: 0s - loss: 1.1674 - accuracy: 0.5561 Epoch 4: val_accuracy did not improve from 0.50767 225/225 [==============================] - 20s 90ms/step - loss: 1.1674 - accuracy: 0.5561 - val_loss: 1.3320 - val_accuracy: 0.4825 - lr: 0.0010 Epoch 5/25 225/225 [==============================] - ETA: 0s - loss: 1.0966 - accuracy: 0.5830 Epoch 5: val_accuracy improved from 0.50767 to 0.53991, saving model to ./model_weights.h5 225/225 [==============================] - 21s 93ms/step - loss: 1.0966 - accuracy: 0.5830 - val_loss: 1.2159 - val_accuracy: 0.5399 - lr: 0.0010 Epoch 6/25 225/225 [==============================] - ETA: 0s - loss: 1.0419 - accuracy: 0.6049 Epoch 6: val_accuracy improved from 0.53991 to 0.58281, saving model to ./model_weights.h5 225/225 [==============================] - 20s 90ms/step - loss: 1.0419 - accuracy: 0.6049 - val_loss: 1.1275 - val_accuracy: 0.5828 - lr: 0.0010 Epoch 7/25 225/225 [==============================] - ETA: 0s - loss: 0.9855 - accuracy: 0.6304 Epoch 7: val_accuracy did not improve from 0.58281 225/225 [==============================] - 20s 89ms/step - loss: 0.9855 - accuracy: 0.6304 - val_loss: 1.1953 - val_accuracy: 0.5558 - lr: 0.0010 Epoch 8/25 225/225 [==============================] - ETA: 0s - loss: 0.9394 - accuracy: 0.6445 Epoch 8: val_accuracy improved from 0.58281 to 0.58750, saving model to ./model_weights.h5 225/225 [==============================] - 20s 90ms/step - loss: 0.9394 - accuracy: 0.6445 - val_loss: 1.1160 - val_accuracy: 0.5875 - lr: 0.0010 Epoch 9/25 225/225 [==============================] - ETA: 0s - loss: 0.8893 - accuracy: 0.6663 Epoch 9: val_accuracy did not improve from 0.58750 225/225 [==============================] - 20s 89ms/step - loss: 0.8893 - accuracy: 0.6663 - val_loss: 1.2381 - val_accuracy: 0.5601 - lr: 0.0010 Epoch 10/25 225/225 [==============================] - ETA: 0s - loss: 0.8335 - accuracy: 0.6852 Epoch 10: val_accuracy improved from 0.58750 to 0.60099, saving model to ./model_weights.h5 225/225 [==============================] - 20s 90ms/step - loss: 0.8335 - accuracy: 0.6852 - val_loss: 1.1159 - val_accuracy: 0.6010 - lr: 0.0010 Epoch 11/25 225/225 [==============================] - ETA: 0s - loss: 0.7846 - accuracy: 0.7061 Epoch 11: val_accuracy did not improve from 0.60099 Epoch 11: ReduceLROnPlateau reducing learning rate to 0.00020000000949949026. 225/225 [==============================] - 20s 89ms/step - loss: 0.7846 - accuracy: 0.7061 - val_loss: 1.2025 - val_accuracy: 0.5655 - lr: 0.0010 Epoch 12/25 225/225 [==============================] - ETA: 0s - loss: 0.6298 - accuracy: 0.7674 Epoch 12: val_accuracy improved from 0.60099 to 0.63878, saving model to ./model_weights.h5 225/225 [==============================] - 20s 90ms/step - loss: 0.6298 - accuracy: 0.7674 - val_loss: 1.0266 - val_accuracy: 0.6388 - lr: 2.0000e-04 Epoch 13/25 225/225 [==============================] - ETA: 0s - loss: 0.5706 - accuracy: 0.7908 Epoch 13: val_accuracy did not improve from 0.63878 225/225 [==============================] - 20s 89ms/step - loss: 0.5706 - accuracy: 0.7908 - val_loss: 1.0355 - val_accuracy: 0.6385 - lr: 2.0000e-04 Epoch 14/25 225/225 [==============================] - ETA: 0s - loss: 0.5303 - accuracy: 0.8050 Epoch 14: val_accuracy did not improve from 0.63878 225/225 [==============================] - 20s 90ms/step - loss: 0.5303 - accuracy: 0.8050 - val_loss: 1.0725 - val_accuracy: 0.6388 - lr: 2.0000e-04 Epoch 15/25 225/225 [==============================] - ETA: 0s - loss: 0.5019 - accuracy: 0.8157 Epoch 15: val_accuracy improved from 0.63878 to 0.64645, saving model to ./model_weights.h5 Epoch 15: ReduceLROnPlateau reducing learning rate to 4.0000001899898055e-05. 225/225 [==============================] - 20s 90ms/step - loss: 0.5019 - accuracy: 0.8157 - val_loss: 1.0776 - val_accuracy: 0.6464 - lr: 2.0000e-04 Epoch 16/25 225/225 [==============================] - ETA: 0s - loss: 0.4565 - accuracy: 0.8337 Epoch 16: val_accuracy improved from 0.64645 to 0.65227, saving model to ./model_weights.h5 225/225 [==============================] - 20s 90ms/step - loss: 0.4565 - accuracy: 0.8337 - val_loss: 1.0730 - val_accuracy: 0.6523 - lr: 4.0000e-05 Epoch 17/25 225/225 [==============================] - ETA: 0s - loss: 0.4429 - accuracy: 0.8410 Epoch 17: val_accuracy did not improve from 0.65227 225/225 [==============================] - 20s 89ms/step - loss: 0.4429 - accuracy: 0.8410 - val_loss: 1.0827 - val_accuracy: 0.6517 - lr: 4.0000e-05 Epoch 18/25 225/225 [==============================] - ETA: 0s - loss: 0.4390 - accuracy: 0.8398 Epoch 18: val_accuracy improved from 0.65227 to 0.65469, saving model to ./model_weights.h5 Epoch 18: ReduceLROnPlateau reducing learning rate to 8.000000525498762e-06. 225/225 [==============================] - 20s 90ms/step - loss: 0.4390 - accuracy: 0.8398 - val_loss: 1.0853 - val_accuracy: 0.6547 - lr: 4.0000e-05 Epoch 19/25 225/225 [==============================] - ETA: 0s - loss: 0.4266 - accuracy: 0.8454 Epoch 19: val_accuracy did not improve from 0.65469 225/225 [==============================] - 20s 89ms/step - loss: 0.4266 - accuracy: 0.8454 - val_loss: 1.0830 - val_accuracy: 0.6530 - lr: 8.0000e-06 Epoch 20/25 225/225 [==============================] - ETA: 0s - loss: 0.4197 - accuracy: 0.8485 Epoch 20: val_accuracy did not improve from 0.65469 225/225 [==============================] - 20s 89ms/step - loss: 0.4197 - accuracy: 0.8485 - val_loss: 1.0833 - val_accuracy: 0.6533 - lr: 8.0000e-06 Epoch 21/25 225/225 [==============================] - ETA: 0s - loss: 0.4251 - accuracy: 0.8440 Epoch 21: val_accuracy did not improve from 0.65469 Epoch 21: ReduceLROnPlateau reducing learning rate to 1.6000001778593287e-06. 225/225 [==============================] - 20s 89ms/step - loss: 0.4251 - accuracy: 0.8440 - val_loss: 1.0864 - val_accuracy: 0.6523 - lr: 8.0000e-06 Epoch 22/25 225/225 [==============================] - ETA: 0s - loss: 0.4230 - accuracy: 0.8463 Epoch 22: val_accuracy did not improve from 0.65469 225/225 [==============================] - 20s 89ms/step - loss: 0.4230 - accuracy: 0.8463 - val_loss: 1.0885 - val_accuracy: 0.6528 - lr: 1.6000e-06 Epoch 23/25 225/225 [==============================] - ETA: 0s - loss: 0.4205 - accuracy: 0.8486 Epoch 23: val_accuracy did not improve from 0.65469 225/225 [==============================] - 21s 92ms/step - loss: 0.4205 - accuracy: 0.8486 - val_loss: 1.0864 - val_accuracy: 0.6527 - lr: 1.6000e-06 Epoch 24/25 225/225 [==============================] - ETA: 0s - loss: 0.4184 - accuracy: 0.8478 Epoch 24: val_accuracy did not improve from 0.65469 Epoch 24: ReduceLROnPlateau reducing learning rate to 3.200000264769187e-07. 225/225 [==============================] - 20s 90ms/step - loss: 0.4184 - accuracy: 0.8478 - val_loss: 1.0869 - val_accuracy: 0.6530 - lr: 1.6000e-06 Epoch 25/25 225/225 [==============================] - ETA: 0s - loss: 0.4186 - accuracy: 0.8486 Epoch 25: val_accuracy did not improve from 0.65469 225/225 [==============================] - 20s 90ms/step - loss: 0.4186 - accuracy: 0.8486 - val_loss: 1.0870 - val_accuracy: 0.6521 - lr: 3.2000e-07
plt.figure(figsize=(20,10))
plt.subplot(1, 2, 1)
plt.suptitle('Optimizer : Adam', fontsize=10)
plt.ylabel('Loss', fontsize=16)
plt.plot(history.history['loss'], label='Training Loss')
plt.plot(history.history['val_loss'], label='Validation Loss')
plt.legend(loc='upper right')
plt.subplot(1, 2, 2)
plt.ylabel('Accuracy', fontsize=16)
plt.plot(history.history['accuracy'], label='Training Accuracy')
plt.plot(history.history['val_accuracy'], label='Validation Accuracy')
plt.legend(loc='lower right')
plt.show()
# serialize model to JSON
model_json = model.to_json()
with open("/content/model.json", "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
# model.save_weights("model.h5")
print("Saved model to disk")
Saved model to disk
from tensorflow.keras.models import model_from_json
model_json_file = '/content/model.json'
model_weights_file = '/content/model_weights.h5'
with open(model_json_file, "r") as json_file:
loaded_model_json = json_file.read()
loaded_model = model_from_json(loaded_model_json)
loaded_model.load_weights(model_weights_file)
images_predict = {"angry": "8969.jpg", "disgust": "14954.jpg", "fear": "10409.jpg", "happy": "10019.jpg", "neutral": "10033.jpg", "sad": "10004.jpg", "surprise": "1033.jpg"}
import cv2
Test predictions
images_path = "/content/images/images/validation/"
label_map = (train_set.class_indices)
print(label_map)
for key in images_predict:
image_sample = cv2.imread(images_path + key + "/" + images_predict[key])
gray = cv2.cvtColor(image_sample, cv2.COLOR_BGR2GRAY)
roi = cv2.resize(gray, (picture_size,picture_size))
y_probs = loaded_model.predict(roi[np.newaxis, :, :, np.newaxis])
val = np.argmax(y_probs, axis=-1)[0]
for k, v in label_map.items():
if val == v:
print("Predicted: {}, Should be: {}".format(k,key))
{'angry': 0, 'disgust': 1, 'fear': 2, 'happy': 3, 'neutral': 4, 'sad': 5, 'surprise': 6} 1/1 [==============================] - 0s 136ms/step Predicted: fear, Should be: angry 1/1 [==============================] - 0s 17ms/step Predicted: neutral, Should be: disgust 1/1 [==============================] - 0s 20ms/step Predicted: happy, Should be: fear 1/1 [==============================] - 0s 15ms/step Predicted: happy, Should be: happy 1/1 [==============================] - 0s 14ms/step Predicted: neutral, Should be: neutral 1/1 [==============================] - 0s 14ms/step Predicted: angry, Should be: sad 1/1 [==============================] - 0s 19ms/step Predicted: surprise, Should be: surprise