342 KiB
342 KiB
Aleksandra Jonas, Aleksandra Gronowska, Iwona Christop
Zestaw 9-10/zadanie2 - AlexNet, VGG16, ResNet on village
Przygotowanie danych
from IPython.display import Image, display
import sys
import subprocess
import pkg_resources
import numpy as np
required = { 'scikit-image'}
installed = {pkg.key for pkg in pkg_resources.working_set}
missing = required - installed
if missing:
python = sys.executable
subprocess.check_call([python, '-m', 'pip', 'install', *missing], stdout=subprocess.DEVNULL)
def load_data(input_dir, img_size):
import numpy as np
import pandas as pd
import os
from skimage.io import imread
import cv2 as cv
from pathlib import Path
import random
from shutil import copyfile, rmtree
import json
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib
image_dir = Path(input_dir)
categories_name = []
for file in os.listdir(image_dir):
d = os.path.join(image_dir, file)
if os.path.isdir(d):
categories_name.append(file)
folders = [directory for directory in image_dir.iterdir() if directory.is_dir()]
ds_img = []
categories_count=[]
labels=[]
for i, direc in enumerate(folders):
count = 0
for obj in direc.iterdir():
if os.path.isfile(obj) and os.path.basename(os.path.normpath(obj)) != 'desktop.ini':
labels.append(os.path.basename(os.path.normpath(direc)))
count += 1
img = imread(obj)#zwraca ndarry postaci xSize x ySize x colorDepth
img = img[:, :, :3]
img = cv.resize(img, img_size, interpolation=cv.INTER_AREA)# zwraca ndarray
img = img / 255 #normalizacja
ds_img.append(img)
categories_count.append(count)
X={}
X["values"] = np.array(ds_img)
X["categories_name"] = categories_name
X["categories_count"] = categories_count
X["labels"]=labels
return X
def get_run_logdir(root_logdir):
import os
import time
run_id = time.strftime("run_%Y_%m_%d-%H_%M_%S")
return os.path.join(root_logdir, run_id)
def diagram_setup(model_name):
from tensorflow import keras
import os
root_logdir = os.path.join(os.curdir, f"logs\\\\fit\\\\\{model_name}\\\\")
run_logdir = get_run_logdir(root_logdir)
tensorboard_cb = keras.callbacks.TensorBoard(run_logdir)
def prepare_data(path, img_size, test_size, val_size):
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
import tensorflow as tf
data = load_data(path, img_size)
values = data['values']
labels = data['labels']
X_train, X_test, y_train, y_test = train_test_split(values, labels, test_size=test_size, random_state=42)
X_train, X_validate, y_train, y_validate = train_test_split(X_train, y_train, test_size=val_size, random_state=42)
class_le = LabelEncoder()
y_train_enc = class_le.fit_transform(y_train)
y_validate_enc = class_le.fit_transform(y_validate)
y_test_enc = class_le.fit_transform(y_test)
train_ds = tf.data.Dataset.from_tensor_slices((X_train, y_train_enc))
validation_ds = tf.data.Dataset.from_tensor_slices((X_validate, y_validate_enc))
test_ds = tf.data.Dataset.from_tensor_slices((X_test, y_test_enc))
train_ds_size = tf.data.experimental.cardinality(train_ds).numpy()
test_ds_size = tf.data.experimental.cardinality(test_ds).numpy()
validation_ds_size = tf.data.experimental.cardinality(validation_ds).numpy()
#Rozmiary zbiorów
print("Training:", train_ds_size)
print("Test:", test_ds_size)
print("Validation:", validation_ds_size)
# Mieszanie zriorów
train_ds = (train_ds.shuffle(buffer_size=train_ds_size).batch(batch_size=32, drop_remainder=True))
test_ds = (test_ds.shuffle(buffer_size=train_ds_size).batch(batch_size=32, drop_remainder=True))
validation_ds = (validation_ds.shuffle(buffer_size=train_ds_size).batch(batch_size=32, drop_remainder=True))
return train_ds, test_ds, validation_ds
AlexNet
from tensorflow import keras
import tensorflow as tf
import os
import time
model = keras.models.Sequential([
keras.layers.Conv2D(filters=96, kernel_size=(11,11), strides=(4,4), activation='relu', input_shape=(227,227,3)),
keras.layers.MaxPool2D(pool_size=(3,3), strides=(2,2)),
keras.layers.Conv2D(filters=256, kernel_size=(5,5), strides=(1,1), activation='relu', padding="same"),
keras.layers.MaxPool2D(pool_size=(3,3), strides=(2,2)),
keras.layers.Conv2D(filters=384, kernel_size=(3,3), strides=(1,1), activation='relu', padding="same"),
keras.layers.Conv2D(filters=384, kernel_size=(3,3), strides=(1,1), activation='relu', padding="same"),
keras.layers.Conv2D(filters=256, kernel_size=(3,3), strides=(1,1), activation='relu', padding="same"),
keras.layers.MaxPool2D(pool_size=(3,3), strides=(2,2)),
keras.layers.Flatten(),
keras.layers.Dense(4096, activation='relu'),
keras.layers.Dense(4096, activation='relu'),
keras.layers.Dense(12, activation='softmax')
])
model.compile(loss='sparse_categorical_crossentropy', optimizer=tf.optimizers.SGD(lr=.001), metrics=['accuracy'])
model.summary()
WARNING:absl:`lr` is deprecated, please use `learning_rate` instead, or use the legacy optimizer, e.g.,tf.keras.optimizers.legacy.SGD.
Model: "sequential" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d (Conv2D) (None, 55, 55, 96) 34944 max_pooling2d (MaxPooling2D (None, 27, 27, 96) 0 ) conv2d_1 (Conv2D) (None, 27, 27, 256) 614656 max_pooling2d_1 (MaxPooling (None, 13, 13, 256) 0 2D) conv2d_2 (Conv2D) (None, 13, 13, 384) 885120 conv2d_3 (Conv2D) (None, 13, 13, 384) 1327488 conv2d_4 (Conv2D) (None, 13, 13, 256) 884992 max_pooling2d_2 (MaxPooling (None, 6, 6, 256) 0 2D) flatten (Flatten) (None, 9216) 0 dense (Dense) (None, 4096) 37752832 dense_1 (Dense) (None, 4096) 16781312 dense_2 (Dense) (None, 12) 49164 ================================================================= Total params: 58,330,508 Trainable params: 58,330,508 Non-trainable params: 0 _________________________________________________________________
train_ds_a, test_ds_a, val_ds_a = prepare_data("./plantvillage/color", (227, 227), 0.2, 0.2)
Training: 2990 Test: 935 Validation: 748
from keras.callbacks import ModelCheckpoint, EarlyStopping
checkpoint = ModelCheckpoint("alex_2.h5", monitor='val_accuracy', verbose=1, save_best_only=True, save_weights_only=False, mode='auto', period=1)
early = EarlyStopping(monitor='val_accuracy', min_delta=0, patience=20, verbose=1, mode='auto')
alex = model.fit_generator(
steps_per_epoch=len(train_ds_a),
generator=train_ds_a,
validation_data= val_ds_a,
validation_steps=len(val_ds_a),
epochs=25,
callbacks=[checkpoint,early])
WARNING:tensorflow:`period` argument is deprecated. Please use `save_freq` to specify the frequency in number of batches seen.
WARNING:tensorflow:`period` argument is deprecated. Please use `save_freq` to specify the frequency in number of batches seen. /var/folders/3r/c8tg1h051m18qhsdccdysrt40000gn/T/ipykernel_14470/2397086753.py:6: UserWarning: `Model.fit_generator` is deprecated and will be removed in a future version. Please use `Model.fit`, which supports generators. alex = model.fit_generator(
Epoch 1/25
2023-01-09 18:33:27.636772: W tensorflow/tsl/platform/profile_utils/cpu_utils.cc:128] Failed to get CPU frequency: 0 Hz
93/93 [==============================] - ETA: 0s - loss: 1.5758 - accuracy: 0.3474 Epoch 1: val_accuracy improved from -inf to 0.38179, saving model to alex_2.h5 93/93 [==============================] - 95s 1s/step - loss: 1.5758 - accuracy: 0.3474 - val_loss: 1.4164 - val_accuracy: 0.3818 Epoch 2/25 93/93 [==============================] - ETA: 0s - loss: 1.4061 - accuracy: 0.3609 Epoch 2: val_accuracy did not improve from 0.38179 93/93 [==============================] - 100s 1s/step - loss: 1.4061 - accuracy: 0.3609 - val_loss: 1.4139 - val_accuracy: 0.3098 Epoch 3/25 93/93 [==============================] - ETA: 0s - loss: 1.3158 - accuracy: 0.3999 Epoch 3: val_accuracy improved from 0.38179 to 0.38995, saving model to alex_2.h5 93/93 [==============================] - 102s 1s/step - loss: 1.3158 - accuracy: 0.3999 - val_loss: 1.2847 - val_accuracy: 0.3899 Epoch 4/25 93/93 [==============================] - ETA: 0s - loss: 1.2229 - accuracy: 0.4792 Epoch 4: val_accuracy improved from 0.38995 to 0.57201, saving model to alex_2.h5 93/93 [==============================] - 102s 1s/step - loss: 1.2229 - accuracy: 0.4792 - val_loss: 1.1064 - val_accuracy: 0.5720 Epoch 5/25 93/93 [==============================] - ETA: 0s - loss: 1.0983 - accuracy: 0.5625 Epoch 5: val_accuracy improved from 0.57201 to 0.64946, saving model to alex_2.h5 93/93 [==============================] - 104s 1s/step - loss: 1.0983 - accuracy: 0.5625 - val_loss: 0.9796 - val_accuracy: 0.6495 Epoch 6/25 93/93 [==============================] - ETA: 0s - loss: 0.9776 - accuracy: 0.6253 Epoch 6: val_accuracy did not improve from 0.64946 93/93 [==============================] - 105s 1s/step - loss: 0.9776 - accuracy: 0.6253 - val_loss: 1.1308 - val_accuracy: 0.5476 Epoch 7/25 93/93 [==============================] - ETA: 0s - loss: 0.8467 - accuracy: 0.6969 Epoch 7: val_accuracy improved from 0.64946 to 0.67663, saving model to alex_2.h5 93/93 [==============================] - 105s 1s/step - loss: 0.8467 - accuracy: 0.6969 - val_loss: 0.9045 - val_accuracy: 0.6766 Epoch 8/25 93/93 [==============================] - ETA: 0s - loss: 0.7437 - accuracy: 0.7312 Epoch 8: val_accuracy improved from 0.67663 to 0.77853, saving model to alex_2.h5 93/93 [==============================] - 105s 1s/step - loss: 0.7437 - accuracy: 0.7312 - val_loss: 0.5997 - val_accuracy: 0.7785 Epoch 9/25 93/93 [==============================] - ETA: 0s - loss: 0.6769 - accuracy: 0.7638 Epoch 9: val_accuracy improved from 0.77853 to 0.80978, saving model to alex_2.h5 93/93 [==============================] - 105s 1s/step - loss: 0.6769 - accuracy: 0.7638 - val_loss: 0.5234 - val_accuracy: 0.8098 Epoch 10/25 93/93 [==============================] - ETA: 0s - loss: 0.5742 - accuracy: 0.7950 Epoch 10: val_accuracy did not improve from 0.80978 93/93 [==============================] - 106s 1s/step - loss: 0.5742 - accuracy: 0.7950 - val_loss: 1.3374 - val_accuracy: 0.5068 Epoch 11/25 93/93 [==============================] - ETA: 0s - loss: 0.5694 - accuracy: 0.8041 Epoch 11: val_accuracy improved from 0.80978 to 0.84375, saving model to alex_2.h5 93/93 [==============================] - 107s 1s/step - loss: 0.5694 - accuracy: 0.8041 - val_loss: 0.5118 - val_accuracy: 0.8438 Epoch 12/25 93/93 [==============================] - ETA: 0s - loss: 0.4730 - accuracy: 0.8347 Epoch 12: val_accuracy did not improve from 0.84375 93/93 [==============================] - 106s 1s/step - loss: 0.4730 - accuracy: 0.8347 - val_loss: 0.6001 - val_accuracy: 0.7826 Epoch 13/25 93/93 [==============================] - ETA: 0s - loss: 0.4713 - accuracy: 0.8364 Epoch 13: val_accuracy did not improve from 0.84375 93/93 [==============================] - 106s 1s/step - loss: 0.4713 - accuracy: 0.8364 - val_loss: 0.5150 - val_accuracy: 0.8125 Epoch 14/25 93/93 [==============================] - ETA: 0s - loss: 0.3892 - accuracy: 0.8646 Epoch 14: val_accuracy improved from 0.84375 to 0.86821, saving model to alex_2.h5 93/93 [==============================] - 110s 1s/step - loss: 0.3892 - accuracy: 0.8646 - val_loss: 0.3537 - val_accuracy: 0.8682 Epoch 15/25 93/93 [==============================] - ETA: 0s - loss: 0.3787 - accuracy: 0.8632 Epoch 15: val_accuracy did not improve from 0.86821 93/93 [==============================] - 109s 1s/step - loss: 0.3787 - accuracy: 0.8632 - val_loss: 0.5223 - val_accuracy: 0.7880 Epoch 16/25 93/93 [==============================] - ETA: 0s - loss: 0.3409 - accuracy: 0.8770 Epoch 16: val_accuracy did not improve from 0.86821 93/93 [==============================] - 110s 1s/step - loss: 0.3409 - accuracy: 0.8770 - val_loss: 0.3797 - val_accuracy: 0.8451 Epoch 17/25 93/93 [==============================] - ETA: 0s - loss: 0.4428 - accuracy: 0.8508 Epoch 17: val_accuracy did not improve from 0.86821 93/93 [==============================] - 108s 1s/step - loss: 0.4428 - accuracy: 0.8508 - val_loss: 0.9765 - val_accuracy: 0.6304 Epoch 18/25 93/93 [==============================] - ETA: 0s - loss: 0.3638 - accuracy: 0.8740 Epoch 18: val_accuracy improved from 0.86821 to 0.88451, saving model to alex_2.h5 93/93 [==============================] - 108s 1s/step - loss: 0.3638 - accuracy: 0.8740 - val_loss: 0.2889 - val_accuracy: 0.8845 Epoch 19/25 93/93 [==============================] - ETA: 0s - loss: 0.2869 - accuracy: 0.8942 Epoch 19: val_accuracy improved from 0.88451 to 0.89674, saving model to alex_2.h5 93/93 [==============================] - 109s 1s/step - loss: 0.2869 - accuracy: 0.8942 - val_loss: 0.2879 - val_accuracy: 0.8967 Epoch 20/25 93/93 [==============================] - ETA: 0s - loss: 0.2724 - accuracy: 0.9015 Epoch 20: val_accuracy improved from 0.89674 to 0.91168, saving model to alex_2.h5 93/93 [==============================] - 108s 1s/step - loss: 0.2724 - accuracy: 0.9015 - val_loss: 0.2781 - val_accuracy: 0.9117 Epoch 21/25 93/93 [==============================] - ETA: 0s - loss: 0.5926 - accuracy: 0.8021 Epoch 21: val_accuracy did not improve from 0.91168 93/93 [==============================] - 107s 1s/step - loss: 0.5926 - accuracy: 0.8021 - val_loss: 0.3587 - val_accuracy: 0.8709 Epoch 22/25 93/93 [==============================] - ETA: 0s - loss: 0.2875 - accuracy: 0.8978 Epoch 22: val_accuracy did not improve from 0.91168 93/93 [==============================] - 108s 1s/step - loss: 0.2875 - accuracy: 0.8978 - val_loss: 0.2895 - val_accuracy: 0.9035 Epoch 23/25 93/93 [==============================] - ETA: 0s - loss: 0.2233 - accuracy: 0.9267 Epoch 23: val_accuracy did not improve from 0.91168 93/93 [==============================] - 108s 1s/step - loss: 0.2233 - accuracy: 0.9267 - val_loss: 0.3617 - val_accuracy: 0.8723 Epoch 24/25 93/93 [==============================] - ETA: 0s - loss: 0.2837 - accuracy: 0.9005 Epoch 24: val_accuracy did not improve from 0.91168 93/93 [==============================] - 107s 1s/step - loss: 0.2837 - accuracy: 0.9005 - val_loss: 0.3122 - val_accuracy: 0.8981 Epoch 25/25 93/93 [==============================] - ETA: 0s - loss: 0.2049 - accuracy: 0.9368 Epoch 25: val_accuracy did not improve from 0.91168 93/93 [==============================] - 109s 1s/step - loss: 0.2049 - accuracy: 0.9368 - val_loss: 0.3776 - val_accuracy: 0.8750
import matplotlib.pyplot as plt
plt.plot(alex.history["accuracy"])
plt.plot(alex.history['val_accuracy'])
plt.plot(alex.history['loss'])
plt.plot(alex.history['val_loss'])
plt.title("Model accuracy")
plt.ylabel("Value")
plt.xlabel("Epoch")
plt.legend(["Accuracy","Validation Accuracy","Loss","Validation Loss"])
plt.show()
model.evaluate(test_ds_a)
29/29 [==============================] - 10s 306ms/step - loss: 0.3675 - accuracy: 0.8631
[0.367510586977005, 0.8631465435028076]
VGG16
train_ds_v, test_ds_v, val_ds_v = prepare_data('./plantvillage/color', (224, 224), 0.2, 0.2)
Training: 2990 Test: 935 Validation: 748
import keras,os
from keras.models import Sequential
from keras.layers import Dense, Conv2D, MaxPool2D , Flatten
from keras.preprocessing.image import ImageDataGenerator
import numpy as np
from keras.applications import VGG16
from keras.layers import Input, Lambda, Dense, Flatten
from keras.models import Model
from keras.preprocessing import image
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
import numpy as np
from glob import glob
import matplotlib.pyplot as plt
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
IMAGE_SIZE = [224, 224]
# add preprocessing layer to the front of resnet
vgg2 = VGG16(input_shape=IMAGE_SIZE + [3], weights='imagenet', include_top=False)
# don't train existing weights
for layer in vgg2.layers:
layer.trainable = False
# useful for getting number of classes
classes = 5
# our layers - you can add more if you want
x = Flatten()(vgg2.output)
# x = Dense(1000, activation='relu')(x)
prediction = Dense(5, activation='softmax')(x)
# create a model object
model = Model(inputs=vgg2.input, outputs=prediction)
# view the structure of the model
model.summary()
# tell the model what cost and optimization method to use
model.compile(
loss='sparse_categorical_crossentropy',
optimizer='adam',
metrics=['accuracy']
)
#train_ds_vgg_sw, test_ds_vgg_sw, validation_ds_vgg_sw
# fit the model
vggr = model.fit_generator(
train_ds_v,
validation_data=val_ds_v,
epochs=25,
steps_per_epoch=len(train_ds_v),
validation_steps=len(val_ds_v))
Model: "model_1" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= input_2 (InputLayer) [(None, 224, 224, 3)] 0 block1_conv1 (Conv2D) (None, 224, 224, 64) 1792 block1_conv2 (Conv2D) (None, 224, 224, 64) 36928 block1_pool (MaxPooling2D) (None, 112, 112, 64) 0 block2_conv1 (Conv2D) (None, 112, 112, 128) 73856 block2_conv2 (Conv2D) (None, 112, 112, 128) 147584 block2_pool (MaxPooling2D) (None, 56, 56, 128) 0 block3_conv1 (Conv2D) (None, 56, 56, 256) 295168 block3_conv2 (Conv2D) (None, 56, 56, 256) 590080 block3_conv3 (Conv2D) (None, 56, 56, 256) 590080 block3_pool (MaxPooling2D) (None, 28, 28, 256) 0 block4_conv1 (Conv2D) (None, 28, 28, 512) 1180160 block4_conv2 (Conv2D) (None, 28, 28, 512) 2359808 block4_conv3 (Conv2D) (None, 28, 28, 512) 2359808 block4_pool (MaxPooling2D) (None, 14, 14, 512) 0 block5_conv1 (Conv2D) (None, 14, 14, 512) 2359808 block5_conv2 (Conv2D) (None, 14, 14, 512) 2359808 block5_conv3 (Conv2D) (None, 14, 14, 512) 2359808 block5_pool (MaxPooling2D) (None, 7, 7, 512) 0 flatten_2 (Flatten) (None, 25088) 0 dense_4 (Dense) (None, 5) 125445 ================================================================= Total params: 14,840,133 Trainable params: 125,445 Non-trainable params: 14,714,688 _________________________________________________________________
/var/folders/3r/c8tg1h051m18qhsdccdysrt40000gn/T/ipykernel_14470/2199093522.py:50: UserWarning: `Model.fit_generator` is deprecated and will be removed in a future version. Please use `Model.fit`, which supports generators. vggr = model.fit_generator(
Epoch 1/25 93/93 [==============================] - 389s 4s/step - loss: 0.3938 - accuracy: 0.8753 - val_loss: 0.1230 - val_accuracy: 0.9647 Epoch 2/25 93/93 [==============================] - 417s 4s/step - loss: 0.0512 - accuracy: 0.9909 - val_loss: 0.0867 - val_accuracy: 0.9715 Epoch 3/25 93/93 [==============================] - 424s 5s/step - loss: 0.0243 - accuracy: 0.9990 - val_loss: 0.0692 - val_accuracy: 0.9769 Epoch 4/25 93/93 [==============================] - 431s 5s/step - loss: 0.0148 - accuracy: 1.0000 - val_loss: 0.0614 - val_accuracy: 0.9769 Epoch 5/25 93/93 [==============================] - 439s 5s/step - loss: 0.0107 - accuracy: 1.0000 - val_loss: 0.0607 - val_accuracy: 0.9810 Epoch 6/25 93/93 [==============================] - 445s 5s/step - loss: 0.0073 - accuracy: 1.0000 - val_loss: 0.0670 - val_accuracy: 0.9755 Epoch 7/25 93/93 [==============================] - 448s 5s/step - loss: 0.0058 - accuracy: 1.0000 - val_loss: 0.0559 - val_accuracy: 0.9783 Epoch 8/25 93/93 [==============================] - 451s 5s/step - loss: 0.0046 - accuracy: 1.0000 - val_loss: 0.0530 - val_accuracy: 0.9796 Epoch 9/25 93/93 [==============================] - 482s 5s/step - loss: 0.0038 - accuracy: 1.0000 - val_loss: 0.0538 - val_accuracy: 0.9783 Epoch 10/25 93/93 [==============================] - 488s 5s/step - loss: 0.0032 - accuracy: 1.0000 - val_loss: 0.0494 - val_accuracy: 0.9810 Epoch 11/25 93/93 [==============================] - 494s 5s/step - loss: 0.0028 - accuracy: 1.0000 - val_loss: 0.0502 - val_accuracy: 0.9796 Epoch 12/25 93/93 [==============================] - 491s 5s/step - loss: 0.0024 - accuracy: 1.0000 - val_loss: 0.0503 - val_accuracy: 0.9837 Epoch 13/25 93/93 [==============================] - 494s 5s/step - loss: 0.0021 - accuracy: 1.0000 - val_loss: 0.0485 - val_accuracy: 0.9810 Epoch 14/25 93/93 [==============================] - 486s 5s/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 0.0448 - val_accuracy: 0.9851 Epoch 15/25 93/93 [==============================] - 485s 5s/step - loss: 0.0017 - accuracy: 1.0000 - val_loss: 0.0474 - val_accuracy: 0.9810 Epoch 16/25 93/93 [==============================] - 503s 5s/step - loss: 0.0015 - accuracy: 1.0000 - val_loss: 0.0430 - val_accuracy: 0.9823 Epoch 17/25 93/93 [==============================] - 472s 5s/step - loss: 0.0013 - accuracy: 1.0000 - val_loss: 0.0481 - val_accuracy: 0.9796 Epoch 18/25 93/93 [==============================] - 474s 5s/step - loss: 0.0012 - accuracy: 1.0000 - val_loss: 0.0503 - val_accuracy: 0.9783 Epoch 19/25 93/93 [==============================] - 9356s 102s/step - loss: 0.0011 - accuracy: 1.0000 - val_loss: 0.0496 - val_accuracy: 0.9783 Epoch 20/25 93/93 [==============================] - 10544s 115s/step - loss: 0.0010 - accuracy: 1.0000 - val_loss: 0.0466 - val_accuracy: 0.9837 Epoch 21/25 93/93 [==============================] - 10648s 116s/step - loss: 9.2169e-04 - accuracy: 1.0000 - val_loss: 0.0457 - val_accuracy: 0.9837 Epoch 22/25 93/93 [==============================] - 11629s 116s/step - loss: 8.5353e-04 - accuracy: 1.0000 - val_loss: 0.0462 - val_accuracy: 0.9837 Epoch 23/25 93/93 [==============================] - 4931s 54s/step - loss: 7.7390e-04 - accuracy: 1.0000 - val_loss: 0.0466 - val_accuracy: 0.9837 Epoch 24/25 93/93 [==============================] - 419s 5s/step - loss: 7.1216e-04 - accuracy: 1.0000 - val_loss: 0.0456 - val_accuracy: 0.9823 Epoch 25/25 93/93 [==============================] - 444s 5s/step - loss: 6.6600e-04 - accuracy: 1.0000 - val_loss: 0.0463 - val_accuracy: 0.9837
import matplotlib.pyplot as plt
plt.plot(vggr.history["accuracy"])
plt.plot(vggr.history['val_accuracy'])
plt.plot(vggr.history['loss'])
plt.plot(vggr.history['val_loss'])
plt.title("Model accuracy")
plt.ylabel("Value")
plt.xlabel("Epoch")
plt.legend(["Accuracy","Validation Accuracy","Loss","Validation Loss"])
plt.show()
model.evaluate(test_ds_v)
29/29 [==============================] - 112s 4s/step - loss: 0.0430 - accuracy: 0.9860
[0.043045952916145325, 0.985991358757019]
ResNet101V2
from keras.layers import Input, Lambda, Dense, Flatten
from keras.models import Model
from keras.preprocessing import image
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
import numpy as np
from glob import glob
import matplotlib.pyplot as plt
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
from keras.applications import ResNet101V2
# re-size all the images to this
IMAGE_SIZE = [224, 224]
# add preprocessing layer to the front of resnet
resnet = ResNet101V2(input_shape=IMAGE_SIZE + [3], weights='imagenet', include_top=False)
# don't train existing weights
for layer in resnet.layers:
layer.trainable = False
# useful for getting number of classes
classes = 5
# our layers - you can add more if you want
x = Flatten()(resnet.output)
# x = Dense(1000, activation='relu')(x)
prediction = Dense(5, activation='softmax')(x)
# create a model object
model = Model(inputs=resnet.input, outputs=prediction)
# view the structure of the model
model.summary()
Model: "model_2" __________________________________________________________________________________________________ Layer (type) Output Shape Param # Connected to ================================================================================================== input_3 (InputLayer) [(None, 224, 224, 3 0 [] )] conv1_pad (ZeroPadding2D) (None, 230, 230, 3) 0 ['input_3[0][0]'] conv1_conv (Conv2D) (None, 112, 112, 64 9472 ['conv1_pad[0][0]'] ) pool1_pad (ZeroPadding2D) (None, 114, 114, 64 0 ['conv1_conv[0][0]'] ) pool1_pool (MaxPooling2D) (None, 56, 56, 64) 0 ['pool1_pad[0][0]'] conv2_block1_preact_bn (BatchN (None, 56, 56, 64) 256 ['pool1_pool[0][0]'] ormalization) conv2_block1_preact_relu (Acti (None, 56, 56, 64) 0 ['conv2_block1_preact_bn[0][0]'] vation) conv2_block1_1_conv (Conv2D) (None, 56, 56, 64) 4096 ['conv2_block1_preact_relu[0][0]' ] conv2_block1_1_bn (BatchNormal (None, 56, 56, 64) 256 ['conv2_block1_1_conv[0][0]'] ization) conv2_block1_1_relu (Activatio (None, 56, 56, 64) 0 ['conv2_block1_1_bn[0][0]'] n) conv2_block1_2_pad (ZeroPaddin (None, 58, 58, 64) 0 ['conv2_block1_1_relu[0][0]'] g2D) conv2_block1_2_conv (Conv2D) (None, 56, 56, 64) 36864 ['conv2_block1_2_pad[0][0]'] conv2_block1_2_bn (BatchNormal (None, 56, 56, 64) 256 ['conv2_block1_2_conv[0][0]'] ization) conv2_block1_2_relu (Activatio (None, 56, 56, 64) 0 ['conv2_block1_2_bn[0][0]'] n) conv2_block1_0_conv (Conv2D) (None, 56, 56, 256) 16640 ['conv2_block1_preact_relu[0][0]' ] conv2_block1_3_conv (Conv2D) (None, 56, 56, 256) 16640 ['conv2_block1_2_relu[0][0]'] conv2_block1_out (Add) (None, 56, 56, 256) 0 ['conv2_block1_0_conv[0][0]', 'conv2_block1_3_conv[0][0]'] conv2_block2_preact_bn (BatchN (None, 56, 56, 256) 1024 ['conv2_block1_out[0][0]'] ormalization) conv2_block2_preact_relu (Acti (None, 56, 56, 256) 0 ['conv2_block2_preact_bn[0][0]'] vation) conv2_block2_1_conv (Conv2D) (None, 56, 56, 64) 16384 ['conv2_block2_preact_relu[0][0]' ] conv2_block2_1_bn (BatchNormal (None, 56, 56, 64) 256 ['conv2_block2_1_conv[0][0]'] ization) conv2_block2_1_relu (Activatio (None, 56, 56, 64) 0 ['conv2_block2_1_bn[0][0]'] n) conv2_block2_2_pad (ZeroPaddin (None, 58, 58, 64) 0 ['conv2_block2_1_relu[0][0]'] g2D) conv2_block2_2_conv (Conv2D) (None, 56, 56, 64) 36864 ['conv2_block2_2_pad[0][0]'] conv2_block2_2_bn (BatchNormal (None, 56, 56, 64) 256 ['conv2_block2_2_conv[0][0]'] ization) conv2_block2_2_relu (Activatio (None, 56, 56, 64) 0 ['conv2_block2_2_bn[0][0]'] n) conv2_block2_3_conv (Conv2D) (None, 56, 56, 256) 16640 ['conv2_block2_2_relu[0][0]'] conv2_block2_out (Add) (None, 56, 56, 256) 0 ['conv2_block1_out[0][0]', 'conv2_block2_3_conv[0][0]'] conv2_block3_preact_bn (BatchN (None, 56, 56, 256) 1024 ['conv2_block2_out[0][0]'] ormalization) conv2_block3_preact_relu (Acti (None, 56, 56, 256) 0 ['conv2_block3_preact_bn[0][0]'] vation) conv2_block3_1_conv (Conv2D) (None, 56, 56, 64) 16384 ['conv2_block3_preact_relu[0][0]' ] conv2_block3_1_bn (BatchNormal (None, 56, 56, 64) 256 ['conv2_block3_1_conv[0][0]'] ization) conv2_block3_1_relu (Activatio (None, 56, 56, 64) 0 ['conv2_block3_1_bn[0][0]'] n) conv2_block3_2_pad (ZeroPaddin (None, 58, 58, 64) 0 ['conv2_block3_1_relu[0][0]'] g2D) conv2_block3_2_conv (Conv2D) (None, 28, 28, 64) 36864 ['conv2_block3_2_pad[0][0]'] conv2_block3_2_bn (BatchNormal (None, 28, 28, 64) 256 ['conv2_block3_2_conv[0][0]'] ization) conv2_block3_2_relu (Activatio (None, 28, 28, 64) 0 ['conv2_block3_2_bn[0][0]'] n) max_pooling2d_3 (MaxPooling2D) (None, 28, 28, 256) 0 ['conv2_block2_out[0][0]'] conv2_block3_3_conv (Conv2D) (None, 28, 28, 256) 16640 ['conv2_block3_2_relu[0][0]'] conv2_block3_out (Add) (None, 28, 28, 256) 0 ['max_pooling2d_3[0][0]', 'conv2_block3_3_conv[0][0]'] conv3_block1_preact_bn (BatchN (None, 28, 28, 256) 1024 ['conv2_block3_out[0][0]'] ormalization) conv3_block1_preact_relu (Acti (None, 28, 28, 256) 0 ['conv3_block1_preact_bn[0][0]'] vation) conv3_block1_1_conv (Conv2D) (None, 28, 28, 128) 32768 ['conv3_block1_preact_relu[0][0]' ] conv3_block1_1_bn (BatchNormal (None, 28, 28, 128) 512 ['conv3_block1_1_conv[0][0]'] ization) conv3_block1_1_relu (Activatio (None, 28, 28, 128) 0 ['conv3_block1_1_bn[0][0]'] n) conv3_block1_2_pad (ZeroPaddin (None, 30, 30, 128) 0 ['conv3_block1_1_relu[0][0]'] g2D) conv3_block1_2_conv (Conv2D) (None, 28, 28, 128) 147456 ['conv3_block1_2_pad[0][0]'] conv3_block1_2_bn (BatchNormal (None, 28, 28, 128) 512 ['conv3_block1_2_conv[0][0]'] ization) conv3_block1_2_relu (Activatio (None, 28, 28, 128) 0 ['conv3_block1_2_bn[0][0]'] n) conv3_block1_0_conv (Conv2D) (None, 28, 28, 512) 131584 ['conv3_block1_preact_relu[0][0]' ] conv3_block1_3_conv (Conv2D) (None, 28, 28, 512) 66048 ['conv3_block1_2_relu[0][0]'] conv3_block1_out (Add) (None, 28, 28, 512) 0 ['conv3_block1_0_conv[0][0]', 'conv3_block1_3_conv[0][0]'] conv3_block2_preact_bn (BatchN (None, 28, 28, 512) 2048 ['conv3_block1_out[0][0]'] ormalization) conv3_block2_preact_relu (Acti (None, 28, 28, 512) 0 ['conv3_block2_preact_bn[0][0]'] vation) conv3_block2_1_conv (Conv2D) (None, 28, 28, 128) 65536 ['conv3_block2_preact_relu[0][0]' ] conv3_block2_1_bn (BatchNormal (None, 28, 28, 128) 512 ['conv3_block2_1_conv[0][0]'] ization) conv3_block2_1_relu (Activatio (None, 28, 28, 128) 0 ['conv3_block2_1_bn[0][0]'] n) conv3_block2_2_pad (ZeroPaddin (None, 30, 30, 128) 0 ['conv3_block2_1_relu[0][0]'] g2D) conv3_block2_2_conv (Conv2D) (None, 28, 28, 128) 147456 ['conv3_block2_2_pad[0][0]'] conv3_block2_2_bn (BatchNormal (None, 28, 28, 128) 512 ['conv3_block2_2_conv[0][0]'] ization) conv3_block2_2_relu (Activatio (None, 28, 28, 128) 0 ['conv3_block2_2_bn[0][0]'] n) conv3_block2_3_conv (Conv2D) (None, 28, 28, 512) 66048 ['conv3_block2_2_relu[0][0]'] conv3_block2_out (Add) (None, 28, 28, 512) 0 ['conv3_block1_out[0][0]', 'conv3_block2_3_conv[0][0]'] conv3_block3_preact_bn (BatchN (None, 28, 28, 512) 2048 ['conv3_block2_out[0][0]'] ormalization) conv3_block3_preact_relu (Acti (None, 28, 28, 512) 0 ['conv3_block3_preact_bn[0][0]'] vation) conv3_block3_1_conv (Conv2D) (None, 28, 28, 128) 65536 ['conv3_block3_preact_relu[0][0]' ] conv3_block3_1_bn (BatchNormal (None, 28, 28, 128) 512 ['conv3_block3_1_conv[0][0]'] ization) conv3_block3_1_relu (Activatio (None, 28, 28, 128) 0 ['conv3_block3_1_bn[0][0]'] n) conv3_block3_2_pad (ZeroPaddin (None, 30, 30, 128) 0 ['conv3_block3_1_relu[0][0]'] g2D) conv3_block3_2_conv (Conv2D) (None, 28, 28, 128) 147456 ['conv3_block3_2_pad[0][0]'] conv3_block3_2_bn (BatchNormal (None, 28, 28, 128) 512 ['conv3_block3_2_conv[0][0]'] ization) conv3_block3_2_relu (Activatio (None, 28, 28, 128) 0 ['conv3_block3_2_bn[0][0]'] n) conv3_block3_3_conv (Conv2D) (None, 28, 28, 512) 66048 ['conv3_block3_2_relu[0][0]'] conv3_block3_out (Add) (None, 28, 28, 512) 0 ['conv3_block2_out[0][0]', 'conv3_block3_3_conv[0][0]'] conv3_block4_preact_bn (BatchN (None, 28, 28, 512) 2048 ['conv3_block3_out[0][0]'] ormalization) conv3_block4_preact_relu (Acti (None, 28, 28, 512) 0 ['conv3_block4_preact_bn[0][0]'] vation) conv3_block4_1_conv (Conv2D) (None, 28, 28, 128) 65536 ['conv3_block4_preact_relu[0][0]' ] conv3_block4_1_bn (BatchNormal (None, 28, 28, 128) 512 ['conv3_block4_1_conv[0][0]'] ization) conv3_block4_1_relu (Activatio (None, 28, 28, 128) 0 ['conv3_block4_1_bn[0][0]'] n) conv3_block4_2_pad (ZeroPaddin (None, 30, 30, 128) 0 ['conv3_block4_1_relu[0][0]'] g2D) conv3_block4_2_conv (Conv2D) (None, 14, 14, 128) 147456 ['conv3_block4_2_pad[0][0]'] conv3_block4_2_bn (BatchNormal (None, 14, 14, 128) 512 ['conv3_block4_2_conv[0][0]'] ization) conv3_block4_2_relu (Activatio (None, 14, 14, 128) 0 ['conv3_block4_2_bn[0][0]'] n) max_pooling2d_4 (MaxPooling2D) (None, 14, 14, 512) 0 ['conv3_block3_out[0][0]'] conv3_block4_3_conv (Conv2D) (None, 14, 14, 512) 66048 ['conv3_block4_2_relu[0][0]'] conv3_block4_out (Add) (None, 14, 14, 512) 0 ['max_pooling2d_4[0][0]', 'conv3_block4_3_conv[0][0]'] conv4_block1_preact_bn (BatchN (None, 14, 14, 512) 2048 ['conv3_block4_out[0][0]'] ormalization) conv4_block1_preact_relu (Acti (None, 14, 14, 512) 0 ['conv4_block1_preact_bn[0][0]'] vation) conv4_block1_1_conv (Conv2D) (None, 14, 14, 256) 131072 ['conv4_block1_preact_relu[0][0]' ] conv4_block1_1_bn (BatchNormal (None, 14, 14, 256) 1024 ['conv4_block1_1_conv[0][0]'] ization) conv4_block1_1_relu (Activatio (None, 14, 14, 256) 0 ['conv4_block1_1_bn[0][0]'] n) conv4_block1_2_pad (ZeroPaddin (None, 16, 16, 256) 0 ['conv4_block1_1_relu[0][0]'] g2D) conv4_block1_2_conv (Conv2D) (None, 14, 14, 256) 589824 ['conv4_block1_2_pad[0][0]'] conv4_block1_2_bn (BatchNormal (None, 14, 14, 256) 1024 ['conv4_block1_2_conv[0][0]'] ization) conv4_block1_2_relu (Activatio (None, 14, 14, 256) 0 ['conv4_block1_2_bn[0][0]'] n) conv4_block1_0_conv (Conv2D) (None, 14, 14, 1024 525312 ['conv4_block1_preact_relu[0][0]' ) ] conv4_block1_3_conv (Conv2D) (None, 14, 14, 1024 263168 ['conv4_block1_2_relu[0][0]'] ) conv4_block1_out (Add) (None, 14, 14, 1024 0 ['conv4_block1_0_conv[0][0]', ) 'conv4_block1_3_conv[0][0]'] conv4_block2_preact_bn (BatchN (None, 14, 14, 1024 4096 ['conv4_block1_out[0][0]'] ormalization) ) conv4_block2_preact_relu (Acti (None, 14, 14, 1024 0 ['conv4_block2_preact_bn[0][0]'] vation) ) conv4_block2_1_conv (Conv2D) (None, 14, 14, 256) 262144 ['conv4_block2_preact_relu[0][0]' ] conv4_block2_1_bn (BatchNormal (None, 14, 14, 256) 1024 ['conv4_block2_1_conv[0][0]'] ization) conv4_block2_1_relu (Activatio (None, 14, 14, 256) 0 ['conv4_block2_1_bn[0][0]'] n) conv4_block2_2_pad (ZeroPaddin (None, 16, 16, 256) 0 ['conv4_block2_1_relu[0][0]'] g2D) conv4_block2_2_conv (Conv2D) (None, 14, 14, 256) 589824 ['conv4_block2_2_pad[0][0]'] conv4_block2_2_bn (BatchNormal (None, 14, 14, 256) 1024 ['conv4_block2_2_conv[0][0]'] ization) conv4_block2_2_relu (Activatio (None, 14, 14, 256) 0 ['conv4_block2_2_bn[0][0]'] n) conv4_block2_3_conv (Conv2D) (None, 14, 14, 1024 263168 ['conv4_block2_2_relu[0][0]'] ) conv4_block2_out (Add) (None, 14, 14, 1024 0 ['conv4_block1_out[0][0]', ) 'conv4_block2_3_conv[0][0]'] conv4_block3_preact_bn (BatchN (None, 14, 14, 1024 4096 ['conv4_block2_out[0][0]'] ormalization) ) conv4_block3_preact_relu (Acti (None, 14, 14, 1024 0 ['conv4_block3_preact_bn[0][0]'] vation) ) conv4_block3_1_conv (Conv2D) (None, 14, 14, 256) 262144 ['conv4_block3_preact_relu[0][0]' ] conv4_block3_1_bn (BatchNormal (None, 14, 14, 256) 1024 ['conv4_block3_1_conv[0][0]'] ization) conv4_block3_1_relu (Activatio (None, 14, 14, 256) 0 ['conv4_block3_1_bn[0][0]'] n) conv4_block3_2_pad (ZeroPaddin (None, 16, 16, 256) 0 ['conv4_block3_1_relu[0][0]'] g2D) conv4_block3_2_conv (Conv2D) (None, 14, 14, 256) 589824 ['conv4_block3_2_pad[0][0]'] conv4_block3_2_bn (BatchNormal (None, 14, 14, 256) 1024 ['conv4_block3_2_conv[0][0]'] ization) conv4_block3_2_relu (Activatio (None, 14, 14, 256) 0 ['conv4_block3_2_bn[0][0]'] n) conv4_block3_3_conv (Conv2D) (None, 14, 14, 1024 263168 ['conv4_block3_2_relu[0][0]'] ) conv4_block3_out (Add) (None, 14, 14, 1024 0 ['conv4_block2_out[0][0]', ) 'conv4_block3_3_conv[0][0]'] conv4_block4_preact_bn (BatchN (None, 14, 14, 1024 4096 ['conv4_block3_out[0][0]'] ormalization) ) conv4_block4_preact_relu (Acti (None, 14, 14, 1024 0 ['conv4_block4_preact_bn[0][0]'] vation) ) conv4_block4_1_conv (Conv2D) (None, 14, 14, 256) 262144 ['conv4_block4_preact_relu[0][0]' ] conv4_block4_1_bn (BatchNormal (None, 14, 14, 256) 1024 ['conv4_block4_1_conv[0][0]'] ization) conv4_block4_1_relu (Activatio (None, 14, 14, 256) 0 ['conv4_block4_1_bn[0][0]'] n) conv4_block4_2_pad (ZeroPaddin (None, 16, 16, 256) 0 ['conv4_block4_1_relu[0][0]'] g2D) conv4_block4_2_conv (Conv2D) (None, 14, 14, 256) 589824 ['conv4_block4_2_pad[0][0]'] conv4_block4_2_bn (BatchNormal (None, 14, 14, 256) 1024 ['conv4_block4_2_conv[0][0]'] ization) conv4_block4_2_relu (Activatio (None, 14, 14, 256) 0 ['conv4_block4_2_bn[0][0]'] n) conv4_block4_3_conv (Conv2D) (None, 14, 14, 1024 263168 ['conv4_block4_2_relu[0][0]'] ) conv4_block4_out (Add) (None, 14, 14, 1024 0 ['conv4_block3_out[0][0]', ) 'conv4_block4_3_conv[0][0]'] conv4_block5_preact_bn (BatchN (None, 14, 14, 1024 4096 ['conv4_block4_out[0][0]'] ormalization) ) conv4_block5_preact_relu (Acti (None, 14, 14, 1024 0 ['conv4_block5_preact_bn[0][0]'] vation) ) conv4_block5_1_conv (Conv2D) (None, 14, 14, 256) 262144 ['conv4_block5_preact_relu[0][0]' ] conv4_block5_1_bn (BatchNormal (None, 14, 14, 256) 1024 ['conv4_block5_1_conv[0][0]'] ization) conv4_block5_1_relu (Activatio (None, 14, 14, 256) 0 ['conv4_block5_1_bn[0][0]'] n) conv4_block5_2_pad (ZeroPaddin (None, 16, 16, 256) 0 ['conv4_block5_1_relu[0][0]'] g2D) conv4_block5_2_conv (Conv2D) (None, 14, 14, 256) 589824 ['conv4_block5_2_pad[0][0]'] conv4_block5_2_bn (BatchNormal (None, 14, 14, 256) 1024 ['conv4_block5_2_conv[0][0]'] ization) conv4_block5_2_relu (Activatio (None, 14, 14, 256) 0 ['conv4_block5_2_bn[0][0]'] n) conv4_block5_3_conv (Conv2D) (None, 14, 14, 1024 263168 ['conv4_block5_2_relu[0][0]'] ) conv4_block5_out (Add) (None, 14, 14, 1024 0 ['conv4_block4_out[0][0]', ) 'conv4_block5_3_conv[0][0]'] conv4_block6_preact_bn (BatchN (None, 14, 14, 1024 4096 ['conv4_block5_out[0][0]'] ormalization) ) conv4_block6_preact_relu (Acti (None, 14, 14, 1024 0 ['conv4_block6_preact_bn[0][0]'] vation) ) conv4_block6_1_conv (Conv2D) (None, 14, 14, 256) 262144 ['conv4_block6_preact_relu[0][0]' ] conv4_block6_1_bn (BatchNormal (None, 14, 14, 256) 1024 ['conv4_block6_1_conv[0][0]'] ization) conv4_block6_1_relu (Activatio (None, 14, 14, 256) 0 ['conv4_block6_1_bn[0][0]'] n) conv4_block6_2_pad (ZeroPaddin (None, 16, 16, 256) 0 ['conv4_block6_1_relu[0][0]'] g2D) conv4_block6_2_conv (Conv2D) (None, 14, 14, 256) 589824 ['conv4_block6_2_pad[0][0]'] conv4_block6_2_bn (BatchNormal (None, 14, 14, 256) 1024 ['conv4_block6_2_conv[0][0]'] ization) conv4_block6_2_relu (Activatio (None, 14, 14, 256) 0 ['conv4_block6_2_bn[0][0]'] n) conv4_block6_3_conv (Conv2D) (None, 14, 14, 1024 263168 ['conv4_block6_2_relu[0][0]'] ) conv4_block6_out (Add) (None, 14, 14, 1024 0 ['conv4_block5_out[0][0]', ) 'conv4_block6_3_conv[0][0]'] conv4_block7_preact_bn (BatchN (None, 14, 14, 1024 4096 ['conv4_block6_out[0][0]'] ormalization) ) conv4_block7_preact_relu (Acti (None, 14, 14, 1024 0 ['conv4_block7_preact_bn[0][0]'] vation) ) conv4_block7_1_conv (Conv2D) (None, 14, 14, 256) 262144 ['conv4_block7_preact_relu[0][0]' ] conv4_block7_1_bn (BatchNormal (None, 14, 14, 256) 1024 ['conv4_block7_1_conv[0][0]'] ization) conv4_block7_1_relu (Activatio (None, 14, 14, 256) 0 ['conv4_block7_1_bn[0][0]'] n) conv4_block7_2_pad (ZeroPaddin (None, 16, 16, 256) 0 ['conv4_block7_1_relu[0][0]'] g2D) conv4_block7_2_conv (Conv2D) (None, 14, 14, 256) 589824 ['conv4_block7_2_pad[0][0]'] conv4_block7_2_bn (BatchNormal (None, 14, 14, 256) 1024 ['conv4_block7_2_conv[0][0]'] ization) conv4_block7_2_relu (Activatio (None, 14, 14, 256) 0 ['conv4_block7_2_bn[0][0]'] n) conv4_block7_3_conv (Conv2D) (None, 14, 14, 1024 263168 ['conv4_block7_2_relu[0][0]'] ) conv4_block7_out (Add) (None, 14, 14, 1024 0 ['conv4_block6_out[0][0]', ) 'conv4_block7_3_conv[0][0]'] conv4_block8_preact_bn (BatchN (None, 14, 14, 1024 4096 ['conv4_block7_out[0][0]'] ormalization) ) conv4_block8_preact_relu (Acti (None, 14, 14, 1024 0 ['conv4_block8_preact_bn[0][0]'] vation) ) conv4_block8_1_conv (Conv2D) (None, 14, 14, 256) 262144 ['conv4_block8_preact_relu[0][0]' ] conv4_block8_1_bn (BatchNormal (None, 14, 14, 256) 1024 ['conv4_block8_1_conv[0][0]'] ization) conv4_block8_1_relu (Activatio (None, 14, 14, 256) 0 ['conv4_block8_1_bn[0][0]'] n) conv4_block8_2_pad (ZeroPaddin (None, 16, 16, 256) 0 ['conv4_block8_1_relu[0][0]'] g2D) conv4_block8_2_conv (Conv2D) (None, 14, 14, 256) 589824 ['conv4_block8_2_pad[0][0]'] conv4_block8_2_bn (BatchNormal (None, 14, 14, 256) 1024 ['conv4_block8_2_conv[0][0]'] ization) conv4_block8_2_relu (Activatio (None, 14, 14, 256) 0 ['conv4_block8_2_bn[0][0]'] n) conv4_block8_3_conv (Conv2D) (None, 14, 14, 1024 263168 ['conv4_block8_2_relu[0][0]'] ) conv4_block8_out (Add) (None, 14, 14, 1024 0 ['conv4_block7_out[0][0]', ) 'conv4_block8_3_conv[0][0]'] conv4_block9_preact_bn (BatchN (None, 14, 14, 1024 4096 ['conv4_block8_out[0][0]'] ormalization) ) conv4_block9_preact_relu (Acti (None, 14, 14, 1024 0 ['conv4_block9_preact_bn[0][0]'] vation) ) conv4_block9_1_conv (Conv2D) (None, 14, 14, 256) 262144 ['conv4_block9_preact_relu[0][0]' ] conv4_block9_1_bn (BatchNormal (None, 14, 14, 256) 1024 ['conv4_block9_1_conv[0][0]'] ization) conv4_block9_1_relu (Activatio (None, 14, 14, 256) 0 ['conv4_block9_1_bn[0][0]'] n) conv4_block9_2_pad (ZeroPaddin (None, 16, 16, 256) 0 ['conv4_block9_1_relu[0][0]'] g2D) conv4_block9_2_conv (Conv2D) (None, 14, 14, 256) 589824 ['conv4_block9_2_pad[0][0]'] conv4_block9_2_bn (BatchNormal (None, 14, 14, 256) 1024 ['conv4_block9_2_conv[0][0]'] ization) conv4_block9_2_relu (Activatio (None, 14, 14, 256) 0 ['conv4_block9_2_bn[0][0]'] n) conv4_block9_3_conv (Conv2D) (None, 14, 14, 1024 263168 ['conv4_block9_2_relu[0][0]'] ) conv4_block9_out (Add) (None, 14, 14, 1024 0 ['conv4_block8_out[0][0]', ) 'conv4_block9_3_conv[0][0]'] conv4_block10_preact_bn (Batch (None, 14, 14, 1024 4096 ['conv4_block9_out[0][0]'] Normalization) ) conv4_block10_preact_relu (Act (None, 14, 14, 1024 0 ['conv4_block10_preact_bn[0][0]'] ivation) ) conv4_block10_1_conv (Conv2D) (None, 14, 14, 256) 262144 ['conv4_block10_preact_relu[0][0] '] conv4_block10_1_bn (BatchNorma (None, 14, 14, 256) 1024 ['conv4_block10_1_conv[0][0]'] lization) conv4_block10_1_relu (Activati (None, 14, 14, 256) 0 ['conv4_block10_1_bn[0][0]'] on) conv4_block10_2_pad (ZeroPaddi (None, 16, 16, 256) 0 ['conv4_block10_1_relu[0][0]'] ng2D) conv4_block10_2_conv (Conv2D) (None, 14, 14, 256) 589824 ['conv4_block10_2_pad[0][0]'] conv4_block10_2_bn (BatchNorma (None, 14, 14, 256) 1024 ['conv4_block10_2_conv[0][0]'] lization) conv4_block10_2_relu (Activati (None, 14, 14, 256) 0 ['conv4_block10_2_bn[0][0]'] on) conv4_block10_3_conv (Conv2D) (None, 14, 14, 1024 263168 ['conv4_block10_2_relu[0][0]'] ) conv4_block10_out (Add) (None, 14, 14, 1024 0 ['conv4_block9_out[0][0]', ) 'conv4_block10_3_conv[0][0]'] conv4_block11_preact_bn (Batch (None, 14, 14, 1024 4096 ['conv4_block10_out[0][0]'] Normalization) ) conv4_block11_preact_relu (Act (None, 14, 14, 1024 0 ['conv4_block11_preact_bn[0][0]'] ivation) ) conv4_block11_1_conv (Conv2D) (None, 14, 14, 256) 262144 ['conv4_block11_preact_relu[0][0] '] conv4_block11_1_bn (BatchNorma (None, 14, 14, 256) 1024 ['conv4_block11_1_conv[0][0]'] lization) conv4_block11_1_relu (Activati (None, 14, 14, 256) 0 ['conv4_block11_1_bn[0][0]'] on) conv4_block11_2_pad (ZeroPaddi (None, 16, 16, 256) 0 ['conv4_block11_1_relu[0][0]'] ng2D) conv4_block11_2_conv (Conv2D) (None, 14, 14, 256) 589824 ['conv4_block11_2_pad[0][0]'] conv4_block11_2_bn (BatchNorma (None, 14, 14, 256) 1024 ['conv4_block11_2_conv[0][0]'] lization) conv4_block11_2_relu (Activati (None, 14, 14, 256) 0 ['conv4_block11_2_bn[0][0]'] on) conv4_block11_3_conv (Conv2D) (None, 14, 14, 1024 263168 ['conv4_block11_2_relu[0][0]'] ) conv4_block11_out (Add) (None, 14, 14, 1024 0 ['conv4_block10_out[0][0]', ) 'conv4_block11_3_conv[0][0]'] conv4_block12_preact_bn (Batch (None, 14, 14, 1024 4096 ['conv4_block11_out[0][0]'] Normalization) ) conv4_block12_preact_relu (Act (None, 14, 14, 1024 0 ['conv4_block12_preact_bn[0][0]'] ivation) ) conv4_block12_1_conv (Conv2D) (None, 14, 14, 256) 262144 ['conv4_block12_preact_relu[0][0] '] conv4_block12_1_bn (BatchNorma (None, 14, 14, 256) 1024 ['conv4_block12_1_conv[0][0]'] lization) conv4_block12_1_relu (Activati (None, 14, 14, 256) 0 ['conv4_block12_1_bn[0][0]'] on) conv4_block12_2_pad (ZeroPaddi (None, 16, 16, 256) 0 ['conv4_block12_1_relu[0][0]'] ng2D) conv4_block12_2_conv (Conv2D) (None, 14, 14, 256) 589824 ['conv4_block12_2_pad[0][0]'] conv4_block12_2_bn (BatchNorma (None, 14, 14, 256) 1024 ['conv4_block12_2_conv[0][0]'] lization) conv4_block12_2_relu (Activati (None, 14, 14, 256) 0 ['conv4_block12_2_bn[0][0]'] on) conv4_block12_3_conv (Conv2D) (None, 14, 14, 1024 263168 ['conv4_block12_2_relu[0][0]'] ) conv4_block12_out (Add) (None, 14, 14, 1024 0 ['conv4_block11_out[0][0]', ) 'conv4_block12_3_conv[0][0]'] conv4_block13_preact_bn (Batch (None, 14, 14, 1024 4096 ['conv4_block12_out[0][0]'] Normalization) ) conv4_block13_preact_relu (Act (None, 14, 14, 1024 0 ['conv4_block13_preact_bn[0][0]'] ivation) ) conv4_block13_1_conv (Conv2D) (None, 14, 14, 256) 262144 ['conv4_block13_preact_relu[0][0] '] conv4_block13_1_bn (BatchNorma (None, 14, 14, 256) 1024 ['conv4_block13_1_conv[0][0]'] lization) conv4_block13_1_relu (Activati (None, 14, 14, 256) 0 ['conv4_block13_1_bn[0][0]'] on) conv4_block13_2_pad (ZeroPaddi (None, 16, 16, 256) 0 ['conv4_block13_1_relu[0][0]'] ng2D) conv4_block13_2_conv (Conv2D) (None, 14, 14, 256) 589824 ['conv4_block13_2_pad[0][0]'] conv4_block13_2_bn (BatchNorma (None, 14, 14, 256) 1024 ['conv4_block13_2_conv[0][0]'] lization) conv4_block13_2_relu (Activati (None, 14, 14, 256) 0 ['conv4_block13_2_bn[0][0]'] on) conv4_block13_3_conv (Conv2D) (None, 14, 14, 1024 263168 ['conv4_block13_2_relu[0][0]'] ) conv4_block13_out (Add) (None, 14, 14, 1024 0 ['conv4_block12_out[0][0]', ) 'conv4_block13_3_conv[0][0]'] conv4_block14_preact_bn (Batch (None, 14, 14, 1024 4096 ['conv4_block13_out[0][0]'] Normalization) ) conv4_block14_preact_relu (Act (None, 14, 14, 1024 0 ['conv4_block14_preact_bn[0][0]'] ivation) ) conv4_block14_1_conv (Conv2D) (None, 14, 14, 256) 262144 ['conv4_block14_preact_relu[0][0] '] conv4_block14_1_bn (BatchNorma (None, 14, 14, 256) 1024 ['conv4_block14_1_conv[0][0]'] lization) conv4_block14_1_relu (Activati (None, 14, 14, 256) 0 ['conv4_block14_1_bn[0][0]'] on) conv4_block14_2_pad (ZeroPaddi (None, 16, 16, 256) 0 ['conv4_block14_1_relu[0][0]'] ng2D) conv4_block14_2_conv (Conv2D) (None, 14, 14, 256) 589824 ['conv4_block14_2_pad[0][0]'] conv4_block14_2_bn (BatchNorma (None, 14, 14, 256) 1024 ['conv4_block14_2_conv[0][0]'] lization) conv4_block14_2_relu (Activati (None, 14, 14, 256) 0 ['conv4_block14_2_bn[0][0]'] on) conv4_block14_3_conv (Conv2D) (None, 14, 14, 1024 263168 ['conv4_block14_2_relu[0][0]'] ) conv4_block14_out (Add) (None, 14, 14, 1024 0 ['conv4_block13_out[0][0]', ) 'conv4_block14_3_conv[0][0]'] conv4_block15_preact_bn (Batch (None, 14, 14, 1024 4096 ['conv4_block14_out[0][0]'] Normalization) ) conv4_block15_preact_relu (Act (None, 14, 14, 1024 0 ['conv4_block15_preact_bn[0][0]'] ivation) ) conv4_block15_1_conv (Conv2D) (None, 14, 14, 256) 262144 ['conv4_block15_preact_relu[0][0] '] conv4_block15_1_bn (BatchNorma (None, 14, 14, 256) 1024 ['conv4_block15_1_conv[0][0]'] lization) conv4_block15_1_relu (Activati (None, 14, 14, 256) 0 ['conv4_block15_1_bn[0][0]'] on) conv4_block15_2_pad (ZeroPaddi (None, 16, 16, 256) 0 ['conv4_block15_1_relu[0][0]'] ng2D) conv4_block15_2_conv (Conv2D) (None, 14, 14, 256) 589824 ['conv4_block15_2_pad[0][0]'] conv4_block15_2_bn (BatchNorma (None, 14, 14, 256) 1024 ['conv4_block15_2_conv[0][0]'] lization) conv4_block15_2_relu (Activati (None, 14, 14, 256) 0 ['conv4_block15_2_bn[0][0]'] on) conv4_block15_3_conv (Conv2D) (None, 14, 14, 1024 263168 ['conv4_block15_2_relu[0][0]'] ) conv4_block15_out (Add) (None, 14, 14, 1024 0 ['conv4_block14_out[0][0]', ) 'conv4_block15_3_conv[0][0]'] conv4_block16_preact_bn (Batch (None, 14, 14, 1024 4096 ['conv4_block15_out[0][0]'] Normalization) ) conv4_block16_preact_relu (Act (None, 14, 14, 1024 0 ['conv4_block16_preact_bn[0][0]'] ivation) ) conv4_block16_1_conv (Conv2D) (None, 14, 14, 256) 262144 ['conv4_block16_preact_relu[0][0] '] conv4_block16_1_bn (BatchNorma (None, 14, 14, 256) 1024 ['conv4_block16_1_conv[0][0]'] lization) conv4_block16_1_relu (Activati (None, 14, 14, 256) 0 ['conv4_block16_1_bn[0][0]'] on) conv4_block16_2_pad (ZeroPaddi (None, 16, 16, 256) 0 ['conv4_block16_1_relu[0][0]'] ng2D) conv4_block16_2_conv (Conv2D) (None, 14, 14, 256) 589824 ['conv4_block16_2_pad[0][0]'] conv4_block16_2_bn (BatchNorma (None, 14, 14, 256) 1024 ['conv4_block16_2_conv[0][0]'] lization) conv4_block16_2_relu (Activati (None, 14, 14, 256) 0 ['conv4_block16_2_bn[0][0]'] on) conv4_block16_3_conv (Conv2D) (None, 14, 14, 1024 263168 ['conv4_block16_2_relu[0][0]'] ) conv4_block16_out (Add) (None, 14, 14, 1024 0 ['conv4_block15_out[0][0]', ) 'conv4_block16_3_conv[0][0]'] conv4_block17_preact_bn (Batch (None, 14, 14, 1024 4096 ['conv4_block16_out[0][0]'] Normalization) ) conv4_block17_preact_relu (Act (None, 14, 14, 1024 0 ['conv4_block17_preact_bn[0][0]'] ivation) ) conv4_block17_1_conv (Conv2D) (None, 14, 14, 256) 262144 ['conv4_block17_preact_relu[0][0] '] conv4_block17_1_bn (BatchNorma (None, 14, 14, 256) 1024 ['conv4_block17_1_conv[0][0]'] lization) conv4_block17_1_relu (Activati (None, 14, 14, 256) 0 ['conv4_block17_1_bn[0][0]'] on) conv4_block17_2_pad (ZeroPaddi (None, 16, 16, 256) 0 ['conv4_block17_1_relu[0][0]'] ng2D) conv4_block17_2_conv (Conv2D) (None, 14, 14, 256) 589824 ['conv4_block17_2_pad[0][0]'] conv4_block17_2_bn (BatchNorma (None, 14, 14, 256) 1024 ['conv4_block17_2_conv[0][0]'] lization) conv4_block17_2_relu (Activati (None, 14, 14, 256) 0 ['conv4_block17_2_bn[0][0]'] on) conv4_block17_3_conv (Conv2D) (None, 14, 14, 1024 263168 ['conv4_block17_2_relu[0][0]'] ) conv4_block17_out (Add) (None, 14, 14, 1024 0 ['conv4_block16_out[0][0]', ) 'conv4_block17_3_conv[0][0]'] conv4_block18_preact_bn (Batch (None, 14, 14, 1024 4096 ['conv4_block17_out[0][0]'] Normalization) ) conv4_block18_preact_relu (Act (None, 14, 14, 1024 0 ['conv4_block18_preact_bn[0][0]'] ivation) ) conv4_block18_1_conv (Conv2D) (None, 14, 14, 256) 262144 ['conv4_block18_preact_relu[0][0] '] conv4_block18_1_bn (BatchNorma (None, 14, 14, 256) 1024 ['conv4_block18_1_conv[0][0]'] lization) conv4_block18_1_relu (Activati (None, 14, 14, 256) 0 ['conv4_block18_1_bn[0][0]'] on) conv4_block18_2_pad (ZeroPaddi (None, 16, 16, 256) 0 ['conv4_block18_1_relu[0][0]'] ng2D) conv4_block18_2_conv (Conv2D) (None, 14, 14, 256) 589824 ['conv4_block18_2_pad[0][0]'] conv4_block18_2_bn (BatchNorma (None, 14, 14, 256) 1024 ['conv4_block18_2_conv[0][0]'] lization) conv4_block18_2_relu (Activati (None, 14, 14, 256) 0 ['conv4_block18_2_bn[0][0]'] on) conv4_block18_3_conv (Conv2D) (None, 14, 14, 1024 263168 ['conv4_block18_2_relu[0][0]'] ) conv4_block18_out (Add) (None, 14, 14, 1024 0 ['conv4_block17_out[0][0]', ) 'conv4_block18_3_conv[0][0]'] conv4_block19_preact_bn (Batch (None, 14, 14, 1024 4096 ['conv4_block18_out[0][0]'] Normalization) ) conv4_block19_preact_relu (Act (None, 14, 14, 1024 0 ['conv4_block19_preact_bn[0][0]'] ivation) ) conv4_block19_1_conv (Conv2D) (None, 14, 14, 256) 262144 ['conv4_block19_preact_relu[0][0] '] conv4_block19_1_bn (BatchNorma (None, 14, 14, 256) 1024 ['conv4_block19_1_conv[0][0]'] lization) conv4_block19_1_relu (Activati (None, 14, 14, 256) 0 ['conv4_block19_1_bn[0][0]'] on) conv4_block19_2_pad (ZeroPaddi (None, 16, 16, 256) 0 ['conv4_block19_1_relu[0][0]'] ng2D) conv4_block19_2_conv (Conv2D) (None, 14, 14, 256) 589824 ['conv4_block19_2_pad[0][0]'] conv4_block19_2_bn (BatchNorma (None, 14, 14, 256) 1024 ['conv4_block19_2_conv[0][0]'] lization) conv4_block19_2_relu (Activati (None, 14, 14, 256) 0 ['conv4_block19_2_bn[0][0]'] on) conv4_block19_3_conv (Conv2D) (None, 14, 14, 1024 263168 ['conv4_block19_2_relu[0][0]'] ) conv4_block19_out (Add) (None, 14, 14, 1024 0 ['conv4_block18_out[0][0]', ) 'conv4_block19_3_conv[0][0]'] conv4_block20_preact_bn (Batch (None, 14, 14, 1024 4096 ['conv4_block19_out[0][0]'] Normalization) ) conv4_block20_preact_relu (Act (None, 14, 14, 1024 0 ['conv4_block20_preact_bn[0][0]'] ivation) ) conv4_block20_1_conv (Conv2D) (None, 14, 14, 256) 262144 ['conv4_block20_preact_relu[0][0] '] conv4_block20_1_bn (BatchNorma (None, 14, 14, 256) 1024 ['conv4_block20_1_conv[0][0]'] lization) conv4_block20_1_relu (Activati (None, 14, 14, 256) 0 ['conv4_block20_1_bn[0][0]'] on) conv4_block20_2_pad (ZeroPaddi (None, 16, 16, 256) 0 ['conv4_block20_1_relu[0][0]'] ng2D) conv4_block20_2_conv (Conv2D) (None, 14, 14, 256) 589824 ['conv4_block20_2_pad[0][0]'] conv4_block20_2_bn (BatchNorma (None, 14, 14, 256) 1024 ['conv4_block20_2_conv[0][0]'] lization) conv4_block20_2_relu (Activati (None, 14, 14, 256) 0 ['conv4_block20_2_bn[0][0]'] on) conv4_block20_3_conv (Conv2D) (None, 14, 14, 1024 263168 ['conv4_block20_2_relu[0][0]'] ) conv4_block20_out (Add) (None, 14, 14, 1024 0 ['conv4_block19_out[0][0]', ) 'conv4_block20_3_conv[0][0]'] conv4_block21_preact_bn (Batch (None, 14, 14, 1024 4096 ['conv4_block20_out[0][0]'] Normalization) ) conv4_block21_preact_relu (Act (None, 14, 14, 1024 0 ['conv4_block21_preact_bn[0][0]'] ivation) ) conv4_block21_1_conv (Conv2D) (None, 14, 14, 256) 262144 ['conv4_block21_preact_relu[0][0] '] conv4_block21_1_bn (BatchNorma (None, 14, 14, 256) 1024 ['conv4_block21_1_conv[0][0]'] lization) conv4_block21_1_relu (Activati (None, 14, 14, 256) 0 ['conv4_block21_1_bn[0][0]'] on) conv4_block21_2_pad (ZeroPaddi (None, 16, 16, 256) 0 ['conv4_block21_1_relu[0][0]'] ng2D) conv4_block21_2_conv (Conv2D) (None, 14, 14, 256) 589824 ['conv4_block21_2_pad[0][0]'] conv4_block21_2_bn (BatchNorma (None, 14, 14, 256) 1024 ['conv4_block21_2_conv[0][0]'] lization) conv4_block21_2_relu (Activati (None, 14, 14, 256) 0 ['conv4_block21_2_bn[0][0]'] on) conv4_block21_3_conv (Conv2D) (None, 14, 14, 1024 263168 ['conv4_block21_2_relu[0][0]'] ) conv4_block21_out (Add) (None, 14, 14, 1024 0 ['conv4_block20_out[0][0]', ) 'conv4_block21_3_conv[0][0]'] conv4_block22_preact_bn (Batch (None, 14, 14, 1024 4096 ['conv4_block21_out[0][0]'] Normalization) ) conv4_block22_preact_relu (Act (None, 14, 14, 1024 0 ['conv4_block22_preact_bn[0][0]'] ivation) ) conv4_block22_1_conv (Conv2D) (None, 14, 14, 256) 262144 ['conv4_block22_preact_relu[0][0] '] conv4_block22_1_bn (BatchNorma (None, 14, 14, 256) 1024 ['conv4_block22_1_conv[0][0]'] lization) conv4_block22_1_relu (Activati (None, 14, 14, 256) 0 ['conv4_block22_1_bn[0][0]'] on) conv4_block22_2_pad (ZeroPaddi (None, 16, 16, 256) 0 ['conv4_block22_1_relu[0][0]'] ng2D) conv4_block22_2_conv (Conv2D) (None, 14, 14, 256) 589824 ['conv4_block22_2_pad[0][0]'] conv4_block22_2_bn (BatchNorma (None, 14, 14, 256) 1024 ['conv4_block22_2_conv[0][0]'] lization) conv4_block22_2_relu (Activati (None, 14, 14, 256) 0 ['conv4_block22_2_bn[0][0]'] on) conv4_block22_3_conv (Conv2D) (None, 14, 14, 1024 263168 ['conv4_block22_2_relu[0][0]'] ) conv4_block22_out (Add) (None, 14, 14, 1024 0 ['conv4_block21_out[0][0]', ) 'conv4_block22_3_conv[0][0]'] conv4_block23_preact_bn (Batch (None, 14, 14, 1024 4096 ['conv4_block22_out[0][0]'] Normalization) ) conv4_block23_preact_relu (Act (None, 14, 14, 1024 0 ['conv4_block23_preact_bn[0][0]'] ivation) ) conv4_block23_1_conv (Conv2D) (None, 14, 14, 256) 262144 ['conv4_block23_preact_relu[0][0] '] conv4_block23_1_bn (BatchNorma (None, 14, 14, 256) 1024 ['conv4_block23_1_conv[0][0]'] lization) conv4_block23_1_relu (Activati (None, 14, 14, 256) 0 ['conv4_block23_1_bn[0][0]'] on) conv4_block23_2_pad (ZeroPaddi (None, 16, 16, 256) 0 ['conv4_block23_1_relu[0][0]'] ng2D) conv4_block23_2_conv (Conv2D) (None, 7, 7, 256) 589824 ['conv4_block23_2_pad[0][0]'] conv4_block23_2_bn (BatchNorma (None, 7, 7, 256) 1024 ['conv4_block23_2_conv[0][0]'] lization) conv4_block23_2_relu (Activati (None, 7, 7, 256) 0 ['conv4_block23_2_bn[0][0]'] on) max_pooling2d_5 (MaxPooling2D) (None, 7, 7, 1024) 0 ['conv4_block22_out[0][0]'] conv4_block23_3_conv (Conv2D) (None, 7, 7, 1024) 263168 ['conv4_block23_2_relu[0][0]'] conv4_block23_out (Add) (None, 7, 7, 1024) 0 ['max_pooling2d_5[0][0]', 'conv4_block23_3_conv[0][0]'] conv5_block1_preact_bn (BatchN (None, 7, 7, 1024) 4096 ['conv4_block23_out[0][0]'] ormalization) conv5_block1_preact_relu (Acti (None, 7, 7, 1024) 0 ['conv5_block1_preact_bn[0][0]'] vation) conv5_block1_1_conv (Conv2D) (None, 7, 7, 512) 524288 ['conv5_block1_preact_relu[0][0]' ] conv5_block1_1_bn (BatchNormal (None, 7, 7, 512) 2048 ['conv5_block1_1_conv[0][0]'] ization) conv5_block1_1_relu (Activatio (None, 7, 7, 512) 0 ['conv5_block1_1_bn[0][0]'] n) conv5_block1_2_pad (ZeroPaddin (None, 9, 9, 512) 0 ['conv5_block1_1_relu[0][0]'] g2D) conv5_block1_2_conv (Conv2D) (None, 7, 7, 512) 2359296 ['conv5_block1_2_pad[0][0]'] conv5_block1_2_bn (BatchNormal (None, 7, 7, 512) 2048 ['conv5_block1_2_conv[0][0]'] ization) conv5_block1_2_relu (Activatio (None, 7, 7, 512) 0 ['conv5_block1_2_bn[0][0]'] n) conv5_block1_0_conv (Conv2D) (None, 7, 7, 2048) 2099200 ['conv5_block1_preact_relu[0][0]' ] conv5_block1_3_conv (Conv2D) (None, 7, 7, 2048) 1050624 ['conv5_block1_2_relu[0][0]'] conv5_block1_out (Add) (None, 7, 7, 2048) 0 ['conv5_block1_0_conv[0][0]', 'conv5_block1_3_conv[0][0]'] conv5_block2_preact_bn (BatchN (None, 7, 7, 2048) 8192 ['conv5_block1_out[0][0]'] ormalization) conv5_block2_preact_relu (Acti (None, 7, 7, 2048) 0 ['conv5_block2_preact_bn[0][0]'] vation) conv5_block2_1_conv (Conv2D) (None, 7, 7, 512) 1048576 ['conv5_block2_preact_relu[0][0]' ] conv5_block2_1_bn (BatchNormal (None, 7, 7, 512) 2048 ['conv5_block2_1_conv[0][0]'] ization) conv5_block2_1_relu (Activatio (None, 7, 7, 512) 0 ['conv5_block2_1_bn[0][0]'] n) conv5_block2_2_pad (ZeroPaddin (None, 9, 9, 512) 0 ['conv5_block2_1_relu[0][0]'] g2D) conv5_block2_2_conv (Conv2D) (None, 7, 7, 512) 2359296 ['conv5_block2_2_pad[0][0]'] conv5_block2_2_bn (BatchNormal (None, 7, 7, 512) 2048 ['conv5_block2_2_conv[0][0]'] ization) conv5_block2_2_relu (Activatio (None, 7, 7, 512) 0 ['conv5_block2_2_bn[0][0]'] n) conv5_block2_3_conv (Conv2D) (None, 7, 7, 2048) 1050624 ['conv5_block2_2_relu[0][0]'] conv5_block2_out (Add) (None, 7, 7, 2048) 0 ['conv5_block1_out[0][0]', 'conv5_block2_3_conv[0][0]'] conv5_block3_preact_bn (BatchN (None, 7, 7, 2048) 8192 ['conv5_block2_out[0][0]'] ormalization) conv5_block3_preact_relu (Acti (None, 7, 7, 2048) 0 ['conv5_block3_preact_bn[0][0]'] vation) conv5_block3_1_conv (Conv2D) (None, 7, 7, 512) 1048576 ['conv5_block3_preact_relu[0][0]' ] conv5_block3_1_bn (BatchNormal (None, 7, 7, 512) 2048 ['conv5_block3_1_conv[0][0]'] ization) conv5_block3_1_relu (Activatio (None, 7, 7, 512) 0 ['conv5_block3_1_bn[0][0]'] n) conv5_block3_2_pad (ZeroPaddin (None, 9, 9, 512) 0 ['conv5_block3_1_relu[0][0]'] g2D) conv5_block3_2_conv (Conv2D) (None, 7, 7, 512) 2359296 ['conv5_block3_2_pad[0][0]'] conv5_block3_2_bn (BatchNormal (None, 7, 7, 512) 2048 ['conv5_block3_2_conv[0][0]'] ization) conv5_block3_2_relu (Activatio (None, 7, 7, 512) 0 ['conv5_block3_2_bn[0][0]'] n) conv5_block3_3_conv (Conv2D) (None, 7, 7, 2048) 1050624 ['conv5_block3_2_relu[0][0]'] conv5_block3_out (Add) (None, 7, 7, 2048) 0 ['conv5_block2_out[0][0]', 'conv5_block3_3_conv[0][0]'] post_bn (BatchNormalization) (None, 7, 7, 2048) 8192 ['conv5_block3_out[0][0]'] post_relu (Activation) (None, 7, 7, 2048) 0 ['post_bn[0][0]'] flatten_3 (Flatten) (None, 100352) 0 ['post_relu[0][0]'] dense_5 (Dense) (None, 5) 501765 ['flatten_3[0][0]'] ================================================================================================== Total params: 43,128,325 Trainable params: 501,765 Non-trainable params: 42,626,560 __________________________________________________________________________________________________
# tell the model what cost and optimization method to use
model.compile(
loss='sparse_categorical_crossentropy',
optimizer='adam',
metrics=['accuracy']
)
#train_ds_vgg_sw, test_ds_vgg_sw, validation_ds_vgg_sw
# fit the model
r = model.fit_generator(
train_ds_v,
validation_data=val_ds_v,
epochs=25,
steps_per_epoch=len(train_ds_v),
validation_steps=len(val_ds_v)
)
Epoch 1/25
/var/folders/3r/c8tg1h051m18qhsdccdysrt40000gn/T/ipykernel_14470/2541214992.py:10: UserWarning: `Model.fit_generator` is deprecated and will be removed in a future version. Please use `Model.fit`, which supports generators. r = model.fit_generator(
93/93 [==============================] - 197s 2s/step - loss: 1.1542 - accuracy: 0.8938 - val_loss: 0.2841 - val_accuracy: 0.9742 Epoch 2/25 93/93 [==============================] - 202s 2s/step - loss: 0.1366 - accuracy: 0.9819 - val_loss: 0.5596 - val_accuracy: 0.9524 Epoch 3/25 93/93 [==============================] - 208s 2s/step - loss: 0.0817 - accuracy: 0.9913 - val_loss: 0.7281 - val_accuracy: 0.9416 Epoch 4/25 93/93 [==============================] - 213s 2s/step - loss: 0.0254 - accuracy: 0.9953 - val_loss: 0.2856 - val_accuracy: 0.9769 Epoch 5/25 93/93 [==============================] - 216s 2s/step - loss: 0.0513 - accuracy: 0.9916 - val_loss: 0.7943 - val_accuracy: 0.9511 Epoch 6/25 93/93 [==============================] - 219s 2s/step - loss: 0.0716 - accuracy: 0.9919 - val_loss: 0.4567 - val_accuracy: 0.9715 Epoch 7/25 93/93 [==============================] - 221s 2s/step - loss: 0.0669 - accuracy: 0.9916 - val_loss: 0.5951 - val_accuracy: 0.9688 Epoch 8/25 93/93 [==============================] - 222s 2s/step - loss: 0.0294 - accuracy: 0.9966 - val_loss: 0.3915 - val_accuracy: 0.9769 Epoch 9/25 93/93 [==============================] - 223s 2s/step - loss: 0.0047 - accuracy: 0.9990 - val_loss: 0.5019 - val_accuracy: 0.9688 Epoch 10/25 93/93 [==============================] - 224s 2s/step - loss: 0.0159 - accuracy: 0.9976 - val_loss: 0.5905 - val_accuracy: 0.9715 Epoch 11/25 93/93 [==============================] - 225s 2s/step - loss: 0.0134 - accuracy: 0.9976 - val_loss: 0.3234 - val_accuracy: 0.9810 Epoch 12/25 93/93 [==============================] - 227s 2s/step - loss: 0.1011 - accuracy: 0.9899 - val_loss: 0.5499 - val_accuracy: 0.9728 Epoch 13/25 93/93 [==============================] - 225s 2s/step - loss: 0.0076 - accuracy: 0.9983 - val_loss: 0.4216 - val_accuracy: 0.9728 Epoch 14/25 93/93 [==============================] - 226s 2s/step - loss: 0.0643 - accuracy: 0.9926 - val_loss: 0.8745 - val_accuracy: 0.9511 Epoch 15/25 93/93 [==============================] - 226s 2s/step - loss: 0.0199 - accuracy: 0.9966 - val_loss: 0.4947 - val_accuracy: 0.9715 Epoch 16/25 93/93 [==============================] - 226s 2s/step - loss: 5.7203e-04 - accuracy: 0.9997 - val_loss: 0.4923 - val_accuracy: 0.9810 Epoch 17/25 93/93 [==============================] - 227s 2s/step - loss: 0.0131 - accuracy: 0.9970 - val_loss: 0.6881 - val_accuracy: 0.9647 Epoch 18/25 93/93 [==============================] - 225s 2s/step - loss: 0.0345 - accuracy: 0.9960 - val_loss: 0.4938 - val_accuracy: 0.9823 Epoch 19/25 93/93 [==============================] - 228s 2s/step - loss: 0.0126 - accuracy: 0.9987 - val_loss: 0.5642 - val_accuracy: 0.9688 Epoch 20/25 93/93 [==============================] - 226s 2s/step - loss: 0.0056 - accuracy: 0.9997 - val_loss: 0.4294 - val_accuracy: 0.9783 Epoch 21/25 93/93 [==============================] - 225s 2s/step - loss: 2.7678e-05 - accuracy: 1.0000 - val_loss: 0.4342 - val_accuracy: 0.9783 Epoch 22/25 93/93 [==============================] - 226s 2s/step - loss: 5.2474e-07 - accuracy: 1.0000 - val_loss: 0.4337 - val_accuracy: 0.9783 Epoch 23/25 93/93 [==============================] - 227s 2s/step - loss: 4.2286e-07 - accuracy: 1.0000 - val_loss: 0.4334 - val_accuracy: 0.9783 Epoch 24/25 93/93 [==============================] - 227s 2s/step - loss: 3.5546e-07 - accuracy: 1.0000 - val_loss: 0.4332 - val_accuracy: 0.9783 Epoch 25/25 93/93 [==============================] - 227s 2s/step - loss: 3.0831e-07 - accuracy: 1.0000 - val_loss: 0.4024 - val_accuracy: 0.9796
# loss
plt.plot(r.history['loss'], label='train loss')
plt.plot(r.history['val_loss'], label='val loss')
plt.legend()
plt.show()
plt.savefig('LossVal_loss')
<Figure size 640x480 with 0 Axes>
# accuracies
plt.plot(r.history['accuracy'], label='train acc')
plt.plot(r.history['val_accuracy'], label='val acc')
plt.legend()
plt.show()
plt.savefig('AccVal_acc')
<Figure size 640x480 with 0 Axes>
model.save('resnet_1.h5')
model.evaluate(test_ds_v)
29/29 [==============================] - 55s 2s/step - loss: 0.3070 - accuracy: 0.9828
[0.30702900886535645, 0.982758641242981]