403 KiB
403 KiB
Load Data
# importing required modules
from zipfile import ZipFile
# specifying the zip file name
file_name = "/content/face-expression-recognition-dataset.zip"
# opening the zip file in READ mode
with ZipFile(file_name, 'r') as zip:
# printing all the contents of the zip file
zip.printdir()
# extracting all the files
print('Extracting all the files now...')
zip.extractall()
print('Done!')
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
import matplotlib.pyplot as plt
from tensorflow.keras.utils import load_img, img_to_array
from keras.preprocessing.image import ImageDataGenerator
from keras.layers import Dense, Input, Dropout, GlobalAveragePooling2D
from keras.layers import Flatten
from keras.layers import Conv2D
from keras.layers import BatchNormalization
from keras.layers import Activation
from keras.layers import MaxPooling2D
from keras.models import Model, Sequential
#from tensorflow.python.keras.optimizer_v2.adam import Adam
#from keras.optimizers import adam_v2
from tensorflow.keras.optimizers import RMSprop
#from keras.optimizers import rmsprop
from tensorflow.keras.optimizers import SGD
from tensorflow.keras.optimizers import Adam
#, SGD,RMSpro
picture_size = 64
folder_path = "/content/images/"
expression = 'happy'
plt.figure(figsize= (12,12))
for i in range(1, 10, 1):
plt.subplot(3,3,i)
img = load_img(folder_path+"train/"+expression+"/"+
os.listdir(folder_path + "train/" + expression)[i], target_size=(picture_size, picture_size))
plt.imshow(img)
plt.show()
Image augmentation using keras ImageDataGenerator
batch_size = 128
datagen_train = ImageDataGenerator()
datagen_val = ImageDataGenerator()
train_set = datagen_train.flow_from_directory(folder_path+"train",
target_size = (picture_size,picture_size),
color_mode = "grayscale",
batch_size=batch_size,
class_mode='categorical',
shuffle=True)
test_set = datagen_val.flow_from_directory(folder_path+"validation",
target_size = (picture_size,picture_size),
color_mode = "grayscale",
batch_size=batch_size,
class_mode='categorical',
shuffle=False)
Found 28821 images belonging to 7 classes. Found 7066 images belonging to 7 classes.
Define model
no_of_classes = 7
model = Sequential()
#1st CNN layer
model.add(Conv2D(64,(3,3),padding = 'same',input_shape = (picture_size,picture_size,1)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size = (2,2)))
model.add(Dropout(0.25))
#2nd CNN layer
model.add(Conv2D(128,(5,5),padding = 'same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size = (2,2)))
model.add(Dropout (0.25))
#3rd CNN layer
model.add(Conv2D(512,(3,3),padding = 'same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size = (2,2)))
model.add(Dropout (0.25))
#4th CNN layer
model.add(Conv2D(512,(3,3), padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
#Fully connected 1st layer
model.add(Dense(256))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.25))
# Fully connected layer 2nd layer
model.add(Dense(512))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.25))
model.add(Dense(no_of_classes, activation='softmax'))
model.summary()
Model: "sequential_9" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d_36 (Conv2D) (None, 64, 64, 64) 640 batch_normalization_54 (Bat (None, 64, 64, 64) 256 chNormalization) activation_54 (Activation) (None, 64, 64, 64) 0 max_pooling2d_36 (MaxPoolin (None, 32, 32, 64) 0 g2D) dropout_54 (Dropout) (None, 32, 32, 64) 0 conv2d_37 (Conv2D) (None, 32, 32, 128) 204928 batch_normalization_55 (Bat (None, 32, 32, 128) 512 chNormalization) activation_55 (Activation) (None, 32, 32, 128) 0 max_pooling2d_37 (MaxPoolin (None, 16, 16, 128) 0 g2D) dropout_55 (Dropout) (None, 16, 16, 128) 0 conv2d_38 (Conv2D) (None, 16, 16, 512) 590336 batch_normalization_56 (Bat (None, 16, 16, 512) 2048 chNormalization) activation_56 (Activation) (None, 16, 16, 512) 0 max_pooling2d_38 (MaxPoolin (None, 8, 8, 512) 0 g2D) dropout_56 (Dropout) (None, 8, 8, 512) 0 conv2d_39 (Conv2D) (None, 8, 8, 512) 2359808 batch_normalization_57 (Bat (None, 8, 8, 512) 2048 chNormalization) activation_57 (Activation) (None, 8, 8, 512) 0 max_pooling2d_39 (MaxPoolin (None, 4, 4, 512) 0 g2D) dropout_57 (Dropout) (None, 4, 4, 512) 0 flatten_9 (Flatten) (None, 8192) 0 dense_27 (Dense) (None, 256) 2097408 batch_normalization_58 (Bat (None, 256) 1024 chNormalization) activation_58 (Activation) (None, 256) 0 dropout_58 (Dropout) (None, 256) 0 dense_28 (Dense) (None, 512) 131584 batch_normalization_59 (Bat (None, 512) 2048 chNormalization) activation_59 (Activation) (None, 512) 0 dropout_59 (Dropout) (None, 512) 0 dense_29 (Dense) (None, 7) 3591 ================================================================= Total params: 5,396,231 Trainable params: 5,392,263 Non-trainable params: 3,968 _________________________________________________________________
from keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau
import tensorflow as tf
checkpoint = ModelCheckpoint("./model_weights.h5", monitor='val_accuracy', verbose=1, save_best_only=True, mode='max')
reduce_learningrate = ReduceLROnPlateau(monitor='val_loss',
factor=0.2,
patience=3,
verbose=1,
min_delta=0.0001)
early = EarlyStopping(monitor='val_accuracy', min_delta=0, patience=5, verbose=1, mode='auto')
callbacks_list = [checkpoint,reduce_learningrate, early]
epochs = 100
model.compile(loss='categorical_crossentropy',
optimizer = Adam(lr=0.001),
metrics=['accuracy', tf.keras.metrics.Precision(), tf.keras.metrics.Recall()])
Train model
history = model.fit(train_set,
steps_per_epoch=train_set.n//train_set.batch_size,
epochs=epochs,
validation_data = test_set,
validation_steps = test_set.n//test_set.batch_size,
callbacks=callbacks_list
)
Epoch 1/100 225/225 [==============================] - ETA: 0s - loss: 1.7420 - accuracy: 0.3358 - precision: 0.5071 - recall: 0.1418 Epoch 1: val_accuracy improved from -inf to 0.41293, saving model to ./model_weights.h5 225/225 [==============================] - 31s 90ms/step - loss: 1.7420 - accuracy: 0.3358 - precision: 0.5071 - recall: 0.1418 - val_loss: 1.5676 - val_accuracy: 0.4129 - val_precision: 0.5814 - val_recall: 0.2881 - lr: 0.0010 Epoch 2/100 225/225 [==============================] - ETA: 0s - loss: 1.4073 - accuracy: 0.4584 - precision: 0.6749 - recall: 0.2597 Epoch 2: val_accuracy improved from 0.41293 to 0.46293, saving model to ./model_weights.h5 225/225 [==============================] - 20s 88ms/step - loss: 1.4073 - accuracy: 0.4584 - precision: 0.6749 - recall: 0.2597 - val_loss: 1.5090 - val_accuracy: 0.4629 - val_precision: 0.5566 - val_recall: 0.3661 - lr: 0.0010 Epoch 3/100 225/225 [==============================] - ETA: 0s - loss: 1.2546 - accuracy: 0.5188 - precision: 0.7175 - recall: 0.3240 Epoch 3: val_accuracy improved from 0.46293 to 0.52088, saving model to ./model_weights.h5 225/225 [==============================] - 20s 89ms/step - loss: 1.2546 - accuracy: 0.5188 - precision: 0.7175 - recall: 0.3240 - val_loss: 1.2621 - val_accuracy: 0.5209 - val_precision: 0.7545 - val_recall: 0.2872 - lr: 0.0010 Epoch 4/100 225/225 [==============================] - ETA: 0s - loss: 1.1689 - accuracy: 0.5555 - precision: 0.7336 - recall: 0.3686 Epoch 4: val_accuracy did not improve from 0.52088 225/225 [==============================] - 19s 86ms/step - loss: 1.1689 - accuracy: 0.5555 - precision: 0.7336 - recall: 0.3686 - val_loss: 1.2827 - val_accuracy: 0.5192 - val_precision: 0.7136 - val_recall: 0.3149 - lr: 0.0010 Epoch 5/100 225/225 [==============================] - ETA: 0s - loss: 1.1016 - accuracy: 0.5805 - precision: 0.7429 - recall: 0.4076 Epoch 5: val_accuracy improved from 0.52088 to 0.57514, saving model to ./model_weights.h5 225/225 [==============================] - 20s 87ms/step - loss: 1.1016 - accuracy: 0.5805 - precision: 0.7429 - recall: 0.4076 - val_loss: 1.1303 - val_accuracy: 0.5751 - val_precision: 0.7255 - val_recall: 0.4307 - lr: 0.0010 Epoch 6/100 225/225 [==============================] - ETA: 0s - loss: 1.0435 - accuracy: 0.6037 - precision: 0.7507 - recall: 0.4417 Epoch 6: val_accuracy did not improve from 0.57514 225/225 [==============================] - 19s 86ms/step - loss: 1.0435 - accuracy: 0.6037 - precision: 0.7507 - recall: 0.4417 - val_loss: 1.2345 - val_accuracy: 0.5409 - val_precision: 0.6644 - val_recall: 0.4257 - lr: 0.0010 Epoch 7/100 225/225 [==============================] - ETA: 0s - loss: 0.9941 - accuracy: 0.6260 - precision: 0.7590 - recall: 0.4753 Epoch 7: val_accuracy did not improve from 0.57514 225/225 [==============================] - 20s 87ms/step - loss: 0.9941 - accuracy: 0.6260 - precision: 0.7590 - recall: 0.4753 - val_loss: 1.2263 - val_accuracy: 0.5273 - val_precision: 0.6728 - val_recall: 0.3714 - lr: 0.0010 Epoch 8/100 225/225 [==============================] - ETA: 0s - loss: 0.9451 - accuracy: 0.6442 - precision: 0.7697 - recall: 0.5077 Epoch 8: val_accuracy did not improve from 0.57514 Epoch 8: ReduceLROnPlateau reducing learning rate to 0.00020000000949949026. 225/225 [==============================] - 20s 87ms/step - loss: 0.9451 - accuracy: 0.6442 - precision: 0.7697 - recall: 0.5077 - val_loss: 1.1971 - val_accuracy: 0.5574 - val_precision: 0.6877 - val_recall: 0.4463 - lr: 0.0010 Epoch 9/100 225/225 [==============================] - ETA: 0s - loss: 0.8071 - accuracy: 0.7006 - precision: 0.8094 - recall: 0.5765 Epoch 9: val_accuracy improved from 0.57514 to 0.62713, saving model to ./model_weights.h5 225/225 [==============================] - 20s 88ms/step - loss: 0.8071 - accuracy: 0.7006 - precision: 0.8094 - recall: 0.5765 - val_loss: 1.0264 - val_accuracy: 0.6271 - val_precision: 0.7245 - val_recall: 0.5330 - lr: 2.0000e-04 Epoch 10/100 225/225 [==============================] - ETA: 0s - loss: 0.7597 - accuracy: 0.7165 - precision: 0.8103 - recall: 0.6141 Epoch 10: val_accuracy improved from 0.62713 to 0.63778, saving model to ./model_weights.h5 225/225 [==============================] - 20s 87ms/step - loss: 0.7597 - accuracy: 0.7165 - precision: 0.8103 - recall: 0.6141 - val_loss: 1.0111 - val_accuracy: 0.6378 - val_precision: 0.7350 - val_recall: 0.5436 - lr: 2.0000e-04 Epoch 11/100 225/225 [==============================] - ETA: 0s - loss: 0.7127 - accuracy: 0.7345 - precision: 0.8185 - recall: 0.6381 Epoch 11: val_accuracy did not improve from 0.63778 225/225 [==============================] - 19s 87ms/step - loss: 0.7127 - accuracy: 0.7345 - precision: 0.8185 - recall: 0.6381 - val_loss: 1.0742 - val_accuracy: 0.6089 - val_precision: 0.6938 - val_recall: 0.5284 - lr: 2.0000e-04 Epoch 12/100 225/225 [==============================] - ETA: 0s - loss: 0.6812 - accuracy: 0.7474 - precision: 0.8247 - recall: 0.6629 Epoch 12: val_accuracy did not improve from 0.63778 225/225 [==============================] - 20s 87ms/step - loss: 0.6812 - accuracy: 0.7474 - precision: 0.8247 - recall: 0.6629 - val_loss: 1.0393 - val_accuracy: 0.6374 - val_precision: 0.7161 - val_recall: 0.5658 - lr: 2.0000e-04 Epoch 13/100 225/225 [==============================] - ETA: 0s - loss: 0.6441 - accuracy: 0.7623 - precision: 0.8317 - recall: 0.6828 Epoch 13: val_accuracy did not improve from 0.63778 Epoch 13: ReduceLROnPlateau reducing learning rate to 4.0000001899898055e-05. 225/225 [==============================] - 19s 86ms/step - loss: 0.6441 - accuracy: 0.7623 - precision: 0.8317 - recall: 0.6828 - val_loss: 1.0473 - val_accuracy: 0.6325 - val_precision: 0.7094 - val_recall: 0.5625 - lr: 2.0000e-04 Epoch 14/100 225/225 [==============================] - ETA: 0s - loss: 0.5993 - accuracy: 0.7806 - precision: 0.8453 - recall: 0.7078 Epoch 14: val_accuracy improved from 0.63778 to 0.64986, saving model to ./model_weights.h5 225/225 [==============================] - 20s 87ms/step - loss: 0.5993 - accuracy: 0.7806 - precision: 0.8453 - recall: 0.7078 - val_loss: 1.0256 - val_accuracy: 0.6499 - val_precision: 0.7211 - val_recall: 0.5791 - lr: 4.0000e-05 Epoch 15/100 225/225 [==============================] - ETA: 0s - loss: 0.5814 - accuracy: 0.7874 - precision: 0.8508 - recall: 0.7183 Epoch 15: val_accuracy did not improve from 0.64986 225/225 [==============================] - 19s 87ms/step - loss: 0.5814 - accuracy: 0.7874 - precision: 0.8508 - recall: 0.7183 - val_loss: 1.0313 - val_accuracy: 0.6473 - val_precision: 0.7133 - val_recall: 0.5827 - lr: 4.0000e-05 Epoch 16/100 225/225 [==============================] - ETA: 0s - loss: 0.5639 - accuracy: 0.7922 - precision: 0.8535 - recall: 0.7272 Epoch 16: val_accuracy improved from 0.64986 to 0.65142, saving model to ./model_weights.h5 Epoch 16: ReduceLROnPlateau reducing learning rate to 8.000000525498762e-06. 225/225 [==============================] - 20s 87ms/step - loss: 0.5639 - accuracy: 0.7922 - precision: 0.8535 - recall: 0.7272 - val_loss: 1.0401 - val_accuracy: 0.6514 - val_precision: 0.7128 - val_recall: 0.5865 - lr: 4.0000e-05 Epoch 17/100 225/225 [==============================] - ETA: 0s - loss: 0.5567 - accuracy: 0.7941 - precision: 0.8535 - recall: 0.7311 Epoch 17: val_accuracy did not improve from 0.65142 225/225 [==============================] - 20s 87ms/step - loss: 0.5567 - accuracy: 0.7941 - precision: 0.8535 - recall: 0.7311 - val_loss: 1.0323 - val_accuracy: 0.6514 - val_precision: 0.7176 - val_recall: 0.5898 - lr: 8.0000e-06 Epoch 18/100 225/225 [==============================] - ETA: 0s - loss: 0.5549 - accuracy: 0.7951 - precision: 0.8538 - recall: 0.7285 Epoch 18: val_accuracy improved from 0.65142 to 0.65213, saving model to ./model_weights.h5 225/225 [==============================] - 20s 87ms/step - loss: 0.5549 - accuracy: 0.7951 - precision: 0.8538 - recall: 0.7285 - val_loss: 1.0329 - val_accuracy: 0.6521 - val_precision: 0.7159 - val_recall: 0.5892 - lr: 8.0000e-06 Epoch 19/100 225/225 [==============================] - ETA: 0s - loss: 0.5445 - accuracy: 0.7993 - precision: 0.8575 - recall: 0.7373 Epoch 19: val_accuracy improved from 0.65213 to 0.65256, saving model to ./model_weights.h5 Epoch 19: ReduceLROnPlateau reducing learning rate to 1.6000001778593287e-06. 225/225 [==============================] - 20s 87ms/step - loss: 0.5445 - accuracy: 0.7993 - precision: 0.8575 - recall: 0.7373 - val_loss: 1.0337 - val_accuracy: 0.6526 - val_precision: 0.7167 - val_recall: 0.5899 - lr: 8.0000e-06 Epoch 20/100 225/225 [==============================] - ETA: 0s - loss: 0.5475 - accuracy: 0.7987 - precision: 0.8584 - recall: 0.7367 Epoch 20: val_accuracy improved from 0.65256 to 0.65270, saving model to ./model_weights.h5 225/225 [==============================] - 20s 87ms/step - loss: 0.5475 - accuracy: 0.7987 - precision: 0.8584 - recall: 0.7367 - val_loss: 1.0340 - val_accuracy: 0.6527 - val_precision: 0.7180 - val_recall: 0.5905 - lr: 1.6000e-06 Epoch 21/100 225/225 [==============================] - ETA: 0s - loss: 0.5481 - accuracy: 0.7980 - precision: 0.8559 - recall: 0.7342 Epoch 21: val_accuracy did not improve from 0.65270 225/225 [==============================] - 20s 87ms/step - loss: 0.5481 - accuracy: 0.7980 - precision: 0.8559 - recall: 0.7342 - val_loss: 1.0343 - val_accuracy: 0.6524 - val_precision: 0.7179 - val_recall: 0.5906 - lr: 1.6000e-06 Epoch 22/100 225/225 [==============================] - ETA: 0s - loss: 0.5465 - accuracy: 0.8001 - precision: 0.8579 - recall: 0.7357 Epoch 22: val_accuracy did not improve from 0.65270 Epoch 22: ReduceLROnPlateau reducing learning rate to 3.200000264769187e-07. 225/225 [==============================] - 19s 87ms/step - loss: 0.5465 - accuracy: 0.8001 - precision: 0.8579 - recall: 0.7357 - val_loss: 1.0348 - val_accuracy: 0.6518 - val_precision: 0.7165 - val_recall: 0.5911 - lr: 1.6000e-06 Epoch 23/100 225/225 [==============================] - ETA: 0s - loss: 0.5509 - accuracy: 0.7977 - precision: 0.8551 - recall: 0.7329 Epoch 23: val_accuracy improved from 0.65270 to 0.65298, saving model to ./model_weights.h5 225/225 [==============================] - 20s 88ms/step - loss: 0.5509 - accuracy: 0.7977 - precision: 0.8551 - recall: 0.7329 - val_loss: 1.0352 - val_accuracy: 0.6530 - val_precision: 0.7173 - val_recall: 0.5906 - lr: 3.2000e-07 Epoch 24/100 225/225 [==============================] - ETA: 0s - loss: 0.5495 - accuracy: 0.7991 - precision: 0.8578 - recall: 0.7371 Epoch 24: val_accuracy did not improve from 0.65298 225/225 [==============================] - 19s 86ms/step - loss: 0.5495 - accuracy: 0.7991 - precision: 0.8578 - recall: 0.7371 - val_loss: 1.0349 - val_accuracy: 0.6530 - val_precision: 0.7172 - val_recall: 0.5909 - lr: 3.2000e-07 Epoch 25/100 225/225 [==============================] - ETA: 0s - loss: 0.5462 - accuracy: 0.7996 - precision: 0.8584 - recall: 0.7371 Epoch 25: val_accuracy improved from 0.65298 to 0.65312, saving model to ./model_weights.h5 Epoch 25: ReduceLROnPlateau reducing learning rate to 6.400000529538374e-08. 225/225 [==============================] - 20s 87ms/step - loss: 0.5462 - accuracy: 0.7996 - precision: 0.8584 - recall: 0.7371 - val_loss: 1.0344 - val_accuracy: 0.6531 - val_precision: 0.7175 - val_recall: 0.5903 - lr: 3.2000e-07 Epoch 26/100 225/225 [==============================] - ETA: 0s - loss: 0.5404 - accuracy: 0.8024 - precision: 0.8617 - recall: 0.7379 Epoch 26: val_accuracy did not improve from 0.65312 225/225 [==============================] - 19s 86ms/step - loss: 0.5404 - accuracy: 0.8024 - precision: 0.8617 - recall: 0.7379 - val_loss: 1.0357 - val_accuracy: 0.6524 - val_precision: 0.7160 - val_recall: 0.5906 - lr: 6.4000e-08 Epoch 27/100 225/225 [==============================] - ETA: 0s - loss: 0.5482 - accuracy: 0.7965 - precision: 0.8563 - recall: 0.7341 Epoch 27: val_accuracy did not improve from 0.65312 225/225 [==============================] - 19s 86ms/step - loss: 0.5482 - accuracy: 0.7965 - precision: 0.8563 - recall: 0.7341 - val_loss: 1.0346 - val_accuracy: 0.6531 - val_precision: 0.7173 - val_recall: 0.5905 - lr: 6.4000e-08 Epoch 28/100 225/225 [==============================] - ETA: 0s - loss: 0.5436 - accuracy: 0.8013 - precision: 0.8621 - recall: 0.7372 Epoch 28: val_accuracy improved from 0.65312 to 0.65327, saving model to ./model_weights.h5 Epoch 28: ReduceLROnPlateau reducing learning rate to 1.2800001059076749e-08. 225/225 [==============================] - 20s 87ms/step - loss: 0.5436 - accuracy: 0.8013 - precision: 0.8621 - recall: 0.7372 - val_loss: 1.0353 - val_accuracy: 0.6533 - val_precision: 0.7171 - val_recall: 0.5908 - lr: 6.4000e-08 Epoch 29/100 225/225 [==============================] - ETA: 0s - loss: 0.5509 - accuracy: 0.7975 - precision: 0.8563 - recall: 0.7354 Epoch 29: val_accuracy did not improve from 0.65327 225/225 [==============================] - 20s 88ms/step - loss: 0.5509 - accuracy: 0.7975 - precision: 0.8563 - recall: 0.7354 - val_loss: 1.0347 - val_accuracy: 0.6527 - val_precision: 0.7175 - val_recall: 0.5919 - lr: 1.2800e-08 Epoch 30/100 225/225 [==============================] - ETA: 0s - loss: 0.5535 - accuracy: 0.7965 - precision: 0.8578 - recall: 0.7320 Epoch 30: val_accuracy did not improve from 0.65327 225/225 [==============================] - 19s 86ms/step - loss: 0.5535 - accuracy: 0.7965 - precision: 0.8578 - recall: 0.7320 - val_loss: 1.0349 - val_accuracy: 0.6526 - val_precision: 0.7172 - val_recall: 0.5915 - lr: 1.2800e-08 Epoch 31/100 225/225 [==============================] - ETA: 0s - loss: 0.5521 - accuracy: 0.7971 - precision: 0.8559 - recall: 0.7335 Epoch 31: val_accuracy did not improve from 0.65327 Epoch 31: ReduceLROnPlateau reducing learning rate to 2.5600002118153498e-09. 225/225 [==============================] - 19s 87ms/step - loss: 0.5521 - accuracy: 0.7971 - precision: 0.8559 - recall: 0.7335 - val_loss: 1.0362 - val_accuracy: 0.6528 - val_precision: 0.7168 - val_recall: 0.5922 - lr: 1.2800e-08 Epoch 32/100 225/225 [==============================] - ETA: 0s - loss: 0.5469 - accuracy: 0.7996 - precision: 0.8587 - recall: 0.7378 Epoch 32: val_accuracy did not improve from 0.65327 225/225 [==============================] - 19s 86ms/step - loss: 0.5469 - accuracy: 0.7996 - precision: 0.8587 - recall: 0.7378 - val_loss: 1.0347 - val_accuracy: 0.6527 - val_precision: 0.7173 - val_recall: 0.5912 - lr: 2.5600e-09 Epoch 33/100 225/225 [==============================] - ETA: 0s - loss: 0.5545 - accuracy: 0.7952 - precision: 0.8534 - recall: 0.7318 Epoch 33: val_accuracy did not improve from 0.65327 225/225 [==============================] - 19s 86ms/step - loss: 0.5545 - accuracy: 0.7952 - precision: 0.8534 - recall: 0.7318 - val_loss: 1.0355 - val_accuracy: 0.6524 - val_precision: 0.7167 - val_recall: 0.5915 - lr: 2.5600e-09 Epoch 33: early stopping
plt.figure(figsize=(20,10))
plt.subplot(1, 2, 1)
plt.suptitle('Optimizer : Adam', fontsize=10)
plt.ylabel('Loss', fontsize=16)
plt.plot(history.history['loss'], label='Training Loss')
plt.plot(history.history['val_loss'], label='Validation Loss')
plt.legend(loc='upper right')
plt.subplot(1, 2, 2)
plt.ylabel('Accuracy', fontsize=16)
plt.plot(history.history['accuracy'], label='Training Accuracy')
plt.plot(history.history['val_accuracy'], label='Validation Accuracy')
plt.legend(loc='lower right')
plt.show()
plt.figure(figsize=(20,10))
plt.subplot(1, 2, 1)
plt.suptitle('Optimizer : Adam', fontsize=10)
plt.ylabel('Recall', fontsize=16)
plt.plot(history.history['recall'], label='Training Recall')
plt.plot(history.history['val_recall'], label='Validation Recall')
plt.legend(loc='upper right')
plt.subplot(1, 2, 2)
plt.ylabel('Precision', fontsize=16)
plt.plot(history.history['precision'], label='Training Precision')
plt.plot(history.history['val_precision'], label='Validation Precision')
plt.legend(loc='lower right')
plt.show()
# serialize model to JSON
model_json = model.to_json()
with open("/content/model.json", "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
# model.save_weights("model.h5")
print("Saved model to disk")
Saved model to disk
from tensorflow.keras.models import model_from_json
model_json_file = '/content/model.json'
model_weights_file = '/content/model_weights.h5'
with open(model_json_file, "r") as json_file:
loaded_model_json = json_file.read()
loaded_model = model_from_json(loaded_model_json)
loaded_model.load_weights(model_weights_file)
images_predict = {"angry": "8969.jpg", "disgust": "14954.jpg", "fear": "10409.jpg", "happy": "10019.jpg", "neutral": "10033.jpg", "sad": "10004.jpg", "surprise": "1033.jpg"}
import cv2
Test predictions
images_path = "/content/images/images/validation/"
label_map = (train_set.class_indices)
print(label_map)
for key in images_predict:
image_sample = cv2.imread(images_path + key + "/" + images_predict[key])
gray = cv2.cvtColor(image_sample, cv2.COLOR_BGR2GRAY)
roi = cv2.resize(gray, (picture_size,picture_size))
y_probs = loaded_model.predict(roi[np.newaxis, :, :, np.newaxis])
val = np.argmax(y_probs, axis=-1)[0]
for k, v in label_map.items():
if val == v:
print("Predicted: {}, Should be: {}".format(k,key))
{'angry': 0, 'disgust': 1, 'fear': 2, 'happy': 3, 'neutral': 4, 'sad': 5, 'surprise': 6} 1/1 [==============================] - 0s 136ms/step Predicted: fear, Should be: angry 1/1 [==============================] - 0s 17ms/step Predicted: neutral, Should be: disgust 1/1 [==============================] - 0s 20ms/step Predicted: happy, Should be: fear 1/1 [==============================] - 0s 15ms/step Predicted: happy, Should be: happy 1/1 [==============================] - 0s 14ms/step Predicted: neutral, Should be: neutral 1/1 [==============================] - 0s 14ms/step Predicted: angry, Should be: sad 1/1 [==============================] - 0s 19ms/step Predicted: surprise, Should be: surprise