258 KiB
258 KiB
Aleksandra Jonas, Aleksandra Gronowska, Iwona Christop
Zestaw 9-10/zadanie2 - AlexNet, VGG16, ResNet on village
Przygotowanie danych
from IPython.display import Image, display
import sys
import subprocess
import pkg_resources
import numpy as np
required = { 'scikit-image'}
installed = {pkg.key for pkg in pkg_resources.working_set}
missing = required - installed
if missing:
python = sys.executable
subprocess.check_call([python, '-m', 'pip', 'install', *missing], stdout=subprocess.DEVNULL)
def load_data(input_dir, img_size):
import numpy as np
import pandas as pd
import os
from skimage.io import imread
import cv2 as cv
from pathlib import Path
import random
from shutil import copyfile, rmtree
import json
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib
image_dir = Path(input_dir)
categories_name = []
for file in os.listdir(image_dir):
d = os.path.join(image_dir, file)
if os.path.isdir(d):
categories_name.append(file)
folders = [directory for directory in image_dir.iterdir() if directory.is_dir()]
ds_img = []
categories_count=[]
labels=[]
for i, direc in enumerate(folders):
count = 0
for obj in direc.iterdir():
if os.path.isfile(obj) and os.path.basename(os.path.normpath(obj)) != 'desktop.ini':
labels.append(os.path.basename(os.path.normpath(direc)))
count += 1
img = imread(obj)#zwraca ndarry postaci xSize x ySize x colorDepth
img = img[:, :, :3]
img = cv.resize(img, img_size, interpolation=cv.INTER_AREA)# zwraca ndarray
img = img / 255 #normalizacja
ds_img.append(img)
categories_count.append(count)
X={}
X["values"] = np.array(ds_img)
X["categories_name"] = categories_name
X["categories_count"] = categories_count
X["labels"]=labels
return X
def get_run_logdir(root_logdir):
import os
import time
run_id = time.strftime("run_%Y_%m_%d-%H_%M_%S")
return os.path.join(root_logdir, run_id)
def diagram_setup(model_name):
from tensorflow import keras
import os
root_logdir = os.path.join(os.curdir, f"logs\\\\fit\\\\\{model_name}\\\\")
run_logdir = get_run_logdir(root_logdir)
tensorboard_cb = keras.callbacks.TensorBoard(run_logdir)
def prepare_data(path, img_size, test_size, val_size):
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
import tensorflow as tf
data = load_data(path, img_size)
values = data['values']
labels = data['labels']
X_train, X_test, y_train, y_test = train_test_split(values, labels, test_size=test_size, random_state=42)
X_train, X_validate, y_train, y_validate = train_test_split(X_train, y_train, test_size=val_size, random_state=42)
class_le = LabelEncoder()
y_train_enc = class_le.fit_transform(y_train)
y_validate_enc = class_le.fit_transform(y_validate)
y_test_enc = class_le.fit_transform(y_test)
train_ds = tf.data.Dataset.from_tensor_slices((X_train, y_train_enc))
validation_ds = tf.data.Dataset.from_tensor_slices((X_validate, y_validate_enc))
test_ds = tf.data.Dataset.from_tensor_slices((X_test, y_test_enc))
train_ds_size = tf.data.experimental.cardinality(train_ds).numpy()
test_ds_size = tf.data.experimental.cardinality(test_ds).numpy()
validation_ds_size = tf.data.experimental.cardinality(validation_ds).numpy()
#Rozmiary zbiorów
print("Training:", train_ds_size)
print("Test:", test_ds_size)
print("Validation:", validation_ds_size)
# Mieszanie zriorów
train_ds = (train_ds.shuffle(buffer_size=train_ds_size).batch(batch_size=32, drop_remainder=True))
test_ds = (test_ds.shuffle(buffer_size=train_ds_size).batch(batch_size=32, drop_remainder=True))
validation_ds = (validation_ds.shuffle(buffer_size=train_ds_size).batch(batch_size=32, drop_remainder=True))
return train_ds, test_ds, validation_ds
AlexNet
from tensorflow import keras
import tensorflow as tf
import os
import time
model = keras.models.Sequential([
keras.layers.Conv2D(filters=96, kernel_size=(11,11), strides=(4,4), activation='relu', input_shape=(227,227,3)),
keras.layers.MaxPool2D(pool_size=(3,3), strides=(2,2)),
keras.layers.Conv2D(filters=256, kernel_size=(5,5), strides=(1,1), activation='relu', padding="same"),
keras.layers.MaxPool2D(pool_size=(3,3), strides=(2,2)),
keras.layers.Conv2D(filters=384, kernel_size=(3,3), strides=(1,1), activation='relu', padding="same"),
keras.layers.Conv2D(filters=384, kernel_size=(3,3), strides=(1,1), activation='relu', padding="same"),
keras.layers.Conv2D(filters=256, kernel_size=(3,3), strides=(1,1), activation='relu', padding="same"),
keras.layers.MaxPool2D(pool_size=(3,3), strides=(2,2)),
keras.layers.Flatten(),
keras.layers.Dense(4096, activation='relu'),
keras.layers.Dense(4096, activation='relu'),
keras.layers.Dense(12, activation='softmax')
])
model.compile(loss='sparse_categorical_crossentropy', optimizer=tf.optimizers.SGD(lr=.001), metrics=['accuracy'])
model.summary()
WARNING:absl:`lr` is deprecated, please use `learning_rate` instead, or use the legacy optimizer, e.g.,tf.keras.optimizers.legacy.SGD.
Model: "sequential" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d (Conv2D) (None, 55, 55, 96) 34944 max_pooling2d (MaxPooling2D (None, 27, 27, 96) 0 ) conv2d_1 (Conv2D) (None, 27, 27, 256) 614656 max_pooling2d_1 (MaxPooling (None, 13, 13, 256) 0 2D) conv2d_2 (Conv2D) (None, 13, 13, 384) 885120 conv2d_3 (Conv2D) (None, 13, 13, 384) 1327488 conv2d_4 (Conv2D) (None, 13, 13, 256) 884992 max_pooling2d_2 (MaxPooling (None, 6, 6, 256) 0 2D) flatten (Flatten) (None, 9216) 0 dense (Dense) (None, 4096) 37752832 dense_1 (Dense) (None, 4096) 16781312 dense_2 (Dense) (None, 12) 49164 ================================================================= Total params: 58,330,508 Trainable params: 58,330,508 Non-trainable params: 0 _________________________________________________________________
train_ds_a, test_ds_a, val_ds_a = prepare_data("./plantvillage/color", (227, 227), 0.2, 0.2)
Training: 7430 Test: 2323 Validation: 1858
from keras.callbacks import ModelCheckpoint, EarlyStopping
checkpoint = ModelCheckpoint("alex_2.h5", monitor='val_accuracy', verbose=1, save_best_only=True, save_weights_only=False, mode='auto', period=1)
early = EarlyStopping(monitor='val_accuracy', min_delta=0, patience=20, verbose=1, mode='auto')
alex = model.fit_generator(
steps_per_epoch=len(train_ds_a),
generator=train_ds_a,
validation_data= val_ds_a,
validation_steps=len(val_ds_a),
epochs=25,
callbacks=[checkpoint,early])
WARNING:tensorflow:`period` argument is deprecated. Please use `save_freq` to specify the frequency in number of batches seen.
WARNING:tensorflow:`period` argument is deprecated. Please use `save_freq` to specify the frequency in number of batches seen. /var/folders/_h/ljwht4gd7lb99rm1hm78h7_00000gn/T/ipykernel_23432/2397086753.py:6: UserWarning: `Model.fit_generator` is deprecated and will be removed in a future version. Please use `Model.fit`, which supports generators. alex = model.fit_generator(
Epoch 1/25
2023-01-06 20:01:38.622228: W tensorflow/tsl/platform/profile_utils/cpu_utils.cc:128] Failed to get CPU frequency: 0 Hz
232/232 [==============================] - ETA: 0s - loss: 2.1314 - accuracy: 0.2501 Epoch 1: val_accuracy improved from -inf to 0.44235, saving model to alex_2.h5 232/232 [==============================] - 223s 956ms/step - loss: 2.1314 - accuracy: 0.2501 - val_loss: 1.6157 - val_accuracy: 0.4423 Epoch 2/25 232/232 [==============================] - ETA: 0s - loss: 1.3779 - accuracy: 0.5031 Epoch 2: val_accuracy improved from 0.44235 to 0.60614, saving model to alex_2.h5 232/232 [==============================] - 264s 1s/step - loss: 1.3779 - accuracy: 0.5031 - val_loss: 1.1473 - val_accuracy: 0.6061 Epoch 3/25 232/232 [==============================] - ETA: 0s - loss: 1.0262 - accuracy: 0.6358 Epoch 3: val_accuracy improved from 0.60614 to 0.67726, saving model to alex_2.h5 232/232 [==============================] - 266s 1s/step - loss: 1.0262 - accuracy: 0.6358 - val_loss: 0.9024 - val_accuracy: 0.6773 Epoch 4/25 232/232 [==============================] - ETA: 0s - loss: 0.7844 - accuracy: 0.7259 Epoch 4: val_accuracy improved from 0.67726 to 0.72252, saving model to alex_2.h5 232/232 [==============================] - 267s 1s/step - loss: 0.7844 - accuracy: 0.7259 - val_loss: 0.7740 - val_accuracy: 0.7225 Epoch 5/25 232/232 [==============================] - ETA: 0s - loss: 0.5837 - accuracy: 0.7967 Epoch 5: val_accuracy improved from 0.72252 to 0.79472, saving model to alex_2.h5 232/232 [==============================] - 269s 1s/step - loss: 0.5837 - accuracy: 0.7967 - val_loss: 0.5986 - val_accuracy: 0.7947 Epoch 6/25 232/232 [==============================] - ETA: 0s - loss: 0.4601 - accuracy: 0.8393 Epoch 6: val_accuracy did not improve from 0.79472 232/232 [==============================] - 273s 1s/step - loss: 0.4601 - accuracy: 0.8393 - val_loss: 0.6495 - val_accuracy: 0.7769 Epoch 7/25 232/232 [==============================] - ETA: 0s - loss: 0.3825 - accuracy: 0.8679 Epoch 7: val_accuracy improved from 0.79472 to 0.85938, saving model to alex_2.h5 232/232 [==============================] - 274s 1s/step - loss: 0.3825 - accuracy: 0.8679 - val_loss: 0.4127 - val_accuracy: 0.8594 Epoch 8/25 232/232 [==============================] - ETA: 0s - loss: 0.2899 - accuracy: 0.8978 Epoch 8: val_accuracy did not improve from 0.85938 232/232 [==============================] - 273s 1s/step - loss: 0.2899 - accuracy: 0.8978 - val_loss: 0.4238 - val_accuracy: 0.8540 Epoch 9/25 232/232 [==============================] - ETA: 0s - loss: 0.2615 - accuracy: 0.9133 Epoch 9: val_accuracy improved from 0.85938 to 0.87338, saving model to alex_2.h5 232/232 [==============================] - 270s 1s/step - loss: 0.2615 - accuracy: 0.9133 - val_loss: 0.3714 - val_accuracy: 0.8734 Epoch 10/25 232/232 [==============================] - ETA: 0s - loss: 0.2115 - accuracy: 0.9247 Epoch 10: val_accuracy improved from 0.87338 to 0.87500, saving model to alex_2.h5 232/232 [==============================] - 269s 1s/step - loss: 0.2115 - accuracy: 0.9247 - val_loss: 0.3794 - val_accuracy: 0.8750 Epoch 11/25 232/232 [==============================] - ETA: 0s - loss: 0.1971 - accuracy: 0.9349 Epoch 11: val_accuracy did not improve from 0.87500 232/232 [==============================] - 270s 1s/step - loss: 0.1971 - accuracy: 0.9349 - val_loss: 0.4570 - val_accuracy: 0.8567 Epoch 12/25 232/232 [==============================] - ETA: 0s - loss: 0.1495 - accuracy: 0.9500 Epoch 12: val_accuracy improved from 0.87500 to 0.87662, saving model to alex_2.h5 232/232 [==============================] - 270s 1s/step - loss: 0.1495 - accuracy: 0.9500 - val_loss: 0.4067 - val_accuracy: 0.8766 Epoch 13/25 232/232 [==============================] - ETA: 0s - loss: 0.1206 - accuracy: 0.9634 Epoch 13: val_accuracy improved from 0.87662 to 0.88147, saving model to alex_2.h5 232/232 [==============================] - 269s 1s/step - loss: 0.1206 - accuracy: 0.9634 - val_loss: 0.4036 - val_accuracy: 0.8815 Epoch 14/25 232/232 [==============================] - ETA: 0s - loss: 0.1667 - accuracy: 0.9593 Epoch 14: val_accuracy did not improve from 0.88147 232/232 [==============================] - 272s 1s/step - loss: 0.1667 - accuracy: 0.9593 - val_loss: 0.5347 - val_accuracy: 0.8292 Epoch 15/25 232/232 [==============================] - ETA: 0s - loss: 0.1315 - accuracy: 0.9588 Epoch 15: val_accuracy did not improve from 0.88147 232/232 [==============================] - 277s 1s/step - loss: 0.1315 - accuracy: 0.9588 - val_loss: 0.7335 - val_accuracy: 0.8163 Epoch 16/25 232/232 [==============================] - ETA: 0s - loss: 0.0950 - accuracy: 0.9731 Epoch 16: val_accuracy improved from 0.88147 to 0.88308, saving model to alex_2.h5 232/232 [==============================] - 272s 1s/step - loss: 0.0950 - accuracy: 0.9731 - val_loss: 0.4444 - val_accuracy: 0.8831 Epoch 17/25 232/232 [==============================] - ETA: 0s - loss: 0.0566 - accuracy: 0.9846 Epoch 17: val_accuracy did not improve from 0.88308 232/232 [==============================] - 273s 1s/step - loss: 0.0566 - accuracy: 0.9846 - val_loss: 0.6635 - val_accuracy: 0.8287 Epoch 18/25 232/232 [==============================] - ETA: 0s - loss: 0.0443 - accuracy: 0.9880 Epoch 18: val_accuracy improved from 0.88308 to 0.88631, saving model to alex_2.h5 232/232 [==============================] - 273s 1s/step - loss: 0.0443 - accuracy: 0.9880 - val_loss: 0.4852 - val_accuracy: 0.8863 Epoch 19/25 232/232 [==============================] - ETA: 0s - loss: 0.0101 - accuracy: 0.9981 Epoch 19: val_accuracy improved from 0.88631 to 0.90248, saving model to alex_2.h5 232/232 [==============================] - 274s 1s/step - loss: 0.0101 - accuracy: 0.9981 - val_loss: 0.4459 - val_accuracy: 0.9025 Epoch 20/25 232/232 [==============================] - ETA: 0s - loss: 0.0031 - accuracy: 0.9995 Epoch 20: val_accuracy improved from 0.90248 to 0.90787, saving model to alex_2.h5 232/232 [==============================] - 274s 1s/step - loss: 0.0031 - accuracy: 0.9995 - val_loss: 0.4574 - val_accuracy: 0.9079 Epoch 21/25 232/232 [==============================] - ETA: 0s - loss: 0.0010 - accuracy: 1.0000 Epoch 21: val_accuracy did not improve from 0.90787 232/232 [==============================] - 278s 1s/step - loss: 0.0010 - accuracy: 1.0000 - val_loss: 0.4781 - val_accuracy: 0.9073 Epoch 22/25 232/232 [==============================] - ETA: 0s - loss: 7.0759e-04 - accuracy: 1.0000 Epoch 22: val_accuracy did not improve from 0.90787 232/232 [==============================] - 270s 1s/step - loss: 7.0759e-04 - accuracy: 1.0000 - val_loss: 0.4991 - val_accuracy: 0.9062 Epoch 23/25 232/232 [==============================] - ETA: 0s - loss: 5.5237e-04 - accuracy: 1.0000 Epoch 23: val_accuracy did not improve from 0.90787 232/232 [==============================] - 270s 1s/step - loss: 5.5237e-04 - accuracy: 1.0000 - val_loss: 0.5114 - val_accuracy: 0.9073 Epoch 24/25 232/232 [==============================] - ETA: 0s - loss: 4.5192e-04 - accuracy: 1.0000 Epoch 24: val_accuracy did not improve from 0.90787 232/232 [==============================] - 268s 1s/step - loss: 4.5192e-04 - accuracy: 1.0000 - val_loss: 0.5210 - val_accuracy: 0.9052 Epoch 25/25 232/232 [==============================] - ETA: 0s - loss: 3.7889e-04 - accuracy: 1.0000 Epoch 25: val_accuracy did not improve from 0.90787 232/232 [==============================] - 268s 1s/step - loss: 3.7889e-04 - accuracy: 1.0000 - val_loss: 0.5333 - val_accuracy: 0.9057
import matplotlib.pyplot as plt
plt.plot(alex.history["accuracy"])
plt.plot(alex.history['val_accuracy'])
plt.plot(alex.history['loss'])
plt.plot(alex.history['val_loss'])
plt.title("Model accuracy")
plt.ylabel("Value")
plt.xlabel("Epoch")
plt.legend(["Accuracy","Validation Accuracy","Loss","Validation Loss"])
plt.show()
model.evaluate(test_ds_a)
72/72 [==============================] - 23s 318ms/step - loss: 0.4541 - accuracy: 0.9084
[0.45413827896118164, 0.9084201455116272]
VGG16
import keras,os
from keras.models import Sequential
from keras.layers import Dense, Conv2D, MaxPool2D , Flatten
from keras.preprocessing.image import ImageDataGenerator
from keras.optimizers import Adam
import numpy as np
model = keras.models.Sequential([
keras.layers.Conv2D(filters=64, kernel_size=(3,3), activation='relu', input_shape=(224,224,3), padding="same"),
keras.layers.Conv2D(filters=64, kernel_size=(3,3), activation='relu', input_shape=(224,224,3), padding="same"),
keras.layers.MaxPool2D(pool_size=(2,2), strides=(2,2)),
keras.layers.Conv2D(filters=128, kernel_size=(3,3), padding="same", activation="relu"),
keras.layers.Conv2D(filters=128, kernel_size=(3,3), padding="same", activation="relu"),
keras.layers.MaxPool2D(pool_size=(2,2), strides=(2,2)),
keras.layers.Conv2D(filters=256, kernel_size=(3,3), padding="same", activation="relu"),
keras.layers.Conv2D(filters=256, kernel_size=(3,3), padding="same", activation="relu"),
keras.layers.Conv2D(filters=256, kernel_size=(3,3), padding="same", activation="relu"),
keras.layers.MaxPool2D(pool_size=(2,2), strides=(2,2)),
keras.layers.Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu"),
keras.layers.Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu"),
keras.layers.Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu"),
keras.layers.MaxPool2D(pool_size=(2,2), strides=(2,2)),
keras.layers.Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu"),
keras.layers.Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu"),
keras.layers.Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu"),
keras.layers.Flatten(),
keras.layers.Dense(units = 4096, activation='relu'),
keras.layers.Dense(units = 4096, activation='relu'),
keras.layers.Dense(units = 12, activation='softmax')
])
opt = Adam(lr=0.001)
model.compile(optimizer=opt, loss=keras.losses.sparse_categorical_crossentropy, metrics=['accuracy'])
model.summary()
Model: "sequential" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d (Conv2D) (None, 224, 224, 64) 1792 conv2d_1 (Conv2D) (None, 224, 224, 64) 36928 max_pooling2d (MaxPooling2D (None, 112, 112, 64) 0 ) conv2d_2 (Conv2D) (None, 112, 112, 128) 73856 conv2d_3 (Conv2D) (None, 112, 112, 128) 147584 max_pooling2d_1 (MaxPooling (None, 56, 56, 128) 0 2D) conv2d_4 (Conv2D) (None, 56, 56, 256) 295168 conv2d_5 (Conv2D) (None, 56, 56, 256) 590080 conv2d_6 (Conv2D) (None, 56, 56, 256) 590080 max_pooling2d_2 (MaxPooling (None, 28, 28, 256) 0 2D) conv2d_7 (Conv2D) (None, 28, 28, 512) 1180160 conv2d_8 (Conv2D) (None, 28, 28, 512) 2359808 conv2d_9 (Conv2D) (None, 28, 28, 512) 2359808 max_pooling2d_3 (MaxPooling (None, 14, 14, 512) 0 2D) conv2d_10 (Conv2D) (None, 14, 14, 512) 2359808 conv2d_11 (Conv2D) (None, 14, 14, 512) 2359808 conv2d_12 (Conv2D) (None, 14, 14, 512) 2359808 flatten (Flatten) (None, 100352) 0 dense (Dense) (None, 4096) 411045888 dense_1 (Dense) (None, 4096) 16781312 dense_2 (Dense) (None, 12) 49164 ================================================================= Total params: 442,591,052 Trainable params: 442,591,052 Non-trainable params: 0 _________________________________________________________________
/opt/homebrew/lib/python3.10/site-packages/keras/optimizers/optimizer_v2/adam.py:117: UserWarning: The `lr` argument is deprecated, use `learning_rate` instead. super().__init__(name, **kwargs)
train_ds_v, test_ds_v, val_ds_v = prepare_data('./plantvillage/color', (224, 224), 0.2, 0.2)
Training: 7430 Test: 2323 Validation: 1858
from keras.callbacks import ModelCheckpoint, EarlyStopping
checkpoint = ModelCheckpoint("vgg16_2.h5", monitor='val_accuracy', verbose=1, save_best_only=True, save_weights_only=False, mode='auto', period=1)
early = EarlyStopping(monitor='val_accuracy', min_delta=0, patience=20, verbose=1, mode='auto')
vgg = model.fit_generator(steps_per_epoch=len(train_ds_v), generator=train_ds_v, validation_data= val_ds_v, validation_steps=len(val_ds_v), epochs=25, callbacks=[checkpoint,early])
WARNING:tensorflow:`period` argument is deprecated. Please use `save_freq` to specify the frequency in number of batches seen.
/var/folders/_h/ljwht4gd7lb99rm1hm78h7_00000gn/T/ipykernel_24066/3966396738.py:5: UserWarning: `Model.fit_generator` is deprecated and will be removed in a future version. Please use `Model.fit`, which supports generators. vgg = model.fit_generator(steps_per_epoch=len(train_ds_v), generator=train_ds_v, validation_data= val_ds_v, validation_steps=len(val_ds_v), epochs=25, callbacks=[checkpoint,early])
Epoch 1/25
2023-01-06 22:32:18.362109: W tensorflow/tsl/platform/profile_utils/cpu_utils.cc:128] Failed to get CPU frequency: 0 Hz
232/232 [==============================] - ETA: 0s - loss: 2.4227 - accuracy: 0.1339 Epoch 1: val_accuracy improved from -inf to 0.15086, saving model to vgg16_2.h5 232/232 [==============================] - 3659s 16s/step - loss: 2.4227 - accuracy: 0.1339 - val_loss: 2.4052 - val_accuracy: 0.1509 Epoch 2/25 232/232 [==============================] - ETA: 0s - loss: 2.4051 - accuracy: 0.1356 Epoch 2: val_accuracy did not improve from 0.15086 232/232 [==============================] - 3761s 16s/step - loss: 2.4051 - accuracy: 0.1356 - val_loss: 2.4036 - val_accuracy: 0.1509 Epoch 3/25 232/232 [==============================] - ETA: 0s - loss: 2.4026 - accuracy: 0.1381 Epoch 3: val_accuracy did not improve from 0.15086 232/232 [==============================] - 3712s 16s/step - loss: 2.4026 - accuracy: 0.1381 - val_loss: 2.4002 - val_accuracy: 0.1503 Epoch 4/25 232/232 [==============================] - ETA: 0s - loss: 2.4015 - accuracy: 0.1379 Epoch 4: val_accuracy did not improve from 0.15086 232/232 [==============================] - 3690s 16s/step - loss: 2.4015 - accuracy: 0.1379 - val_loss: 2.4012 - val_accuracy: 0.1509 Epoch 5/25 232/232 [==============================] - ETA: 0s - loss: 2.4015 - accuracy: 0.1382 Epoch 5: val_accuracy did not improve from 0.15086 232/232 [==============================] - 3695s 16s/step - loss: 2.4015 - accuracy: 0.1382 - val_loss: 2.3971 - val_accuracy: 0.1509 Epoch 6/25 232/232 [==============================] - ETA: 0s - loss: 2.4004 - accuracy: 0.1393 Epoch 6: val_accuracy did not improve from 0.15086 232/232 [==============================] - 3703s 16s/step - loss: 2.4004 - accuracy: 0.1393 - val_loss: 2.3999 - val_accuracy: 0.1509 Epoch 7/25 232/232 [==============================] - ETA: 0s - loss: 2.4006 - accuracy: 0.1379 Epoch 7: val_accuracy did not improve from 0.15086 232/232 [==============================] - 3678s 16s/step - loss: 2.4006 - accuracy: 0.1379 - val_loss: 2.3984 - val_accuracy: 0.1509 Epoch 8/25 232/232 [==============================] - ETA: 0s - loss: 2.4007 - accuracy: 0.1394 Epoch 8: val_accuracy did not improve from 0.15086 232/232 [==============================] - 3677s 16s/step - loss: 2.4007 - accuracy: 0.1394 - val_loss: 2.3993 - val_accuracy: 0.1509 Epoch 9/25 232/232 [==============================] - ETA: 0s - loss: 2.4006 - accuracy: 0.1354 Epoch 9: val_accuracy did not improve from 0.15086 232/232 [==============================] - 3660s 16s/step - loss: 2.4006 - accuracy: 0.1354 - val_loss: 2.3993 - val_accuracy: 0.1509 Epoch 10/25 232/232 [==============================] - ETA: 0s - loss: 2.4004 - accuracy: 0.1395 Epoch 10: val_accuracy did not improve from 0.15086 232/232 [==============================] - 3696s 16s/step - loss: 2.4004 - accuracy: 0.1395 - val_loss: 2.3970 - val_accuracy: 0.1509 Epoch 11/25 232/232 [==============================] - ETA: 0s - loss: 2.4005 - accuracy: 0.1394 Epoch 11: val_accuracy did not improve from 0.15086 232/232 [==============================] - 3672s 16s/step - loss: 2.4005 - accuracy: 0.1394 - val_loss: 2.4014 - val_accuracy: 0.1498 Epoch 12/25 232/232 [==============================] - ETA: 0s - loss: 2.4003 - accuracy: 0.1374 Epoch 12: val_accuracy did not improve from 0.15086 232/232 [==============================] - 3548s 15s/step - loss: 2.4003 - accuracy: 0.1374 - val_loss: 2.3988 - val_accuracy: 0.1503 Epoch 13/25 232/232 [==============================] - ETA: 0s - loss: 2.4005 - accuracy: 0.1393 Epoch 13: val_accuracy did not improve from 0.15086 232/232 [==============================] - 3600s 16s/step - loss: 2.4005 - accuracy: 0.1393 - val_loss: 2.3987 - val_accuracy: 0.1503 Epoch 14/25 232/232 [==============================] - ETA: 0s - loss: 2.4005 - accuracy: 0.1394 Epoch 14: val_accuracy did not improve from 0.15086 232/232 [==============================] - 3600s 16s/step - loss: 2.4005 - accuracy: 0.1394 - val_loss: 2.3989 - val_accuracy: 0.1509 Epoch 15/25 232/232 [==============================] - ETA: 0s - loss: 2.4004 - accuracy: 0.1393 Epoch 15: val_accuracy did not improve from 0.15086 232/232 [==============================] - 3261s 14s/step - loss: 2.4004 - accuracy: 0.1393 - val_loss: 2.3988 - val_accuracy: 0.1503 Epoch 16/25 232/232 [==============================] - ETA: 0s - loss: 2.3998 - accuracy: 0.1367 Epoch 16: val_accuracy did not improve from 0.15086 232/232 [==============================] - 3359s 14s/step - loss: 2.3998 - accuracy: 0.1367 - val_loss: 2.3984 - val_accuracy: 0.1509 Epoch 17/25 232/232 [==============================] - ETA: 0s - loss: 2.4001 - accuracy: 0.1395 Epoch 17: val_accuracy did not improve from 0.15086 232/232 [==============================] - 3397s 15s/step - loss: 2.4001 - accuracy: 0.1395 - val_loss: 2.4013 - val_accuracy: 0.1509 Epoch 18/25 232/232 [==============================] - ETA: 0s - loss: 2.3998 - accuracy: 0.1394 Epoch 18: val_accuracy did not improve from 0.15086 232/232 [==============================] - 3391s 15s/step - loss: 2.3998 - accuracy: 0.1394 - val_loss: 2.3987 - val_accuracy: 0.1509 Epoch 19/25 232/232 [==============================] - ETA: 0s - loss: 2.3991 - accuracy: 0.1395 Epoch 19: val_accuracy did not improve from 0.15086 232/232 [==============================] - 3483s 15s/step - loss: 2.3991 - accuracy: 0.1395 - val_loss: 2.4005 - val_accuracy: 0.1509 Epoch 20/25 232/232 [==============================] - ETA: 0s - loss: 2.4009 - accuracy: 0.1373 Epoch 20: val_accuracy did not improve from 0.15086 232/232 [==============================] - 3464s 15s/step - loss: 2.4009 - accuracy: 0.1373 - val_loss: 2.3981 - val_accuracy: 0.1503 Epoch 21/25 232/232 [==============================] - ETA: 0s - loss: 2.3996 - accuracy: 0.1394 Epoch 21: val_accuracy did not improve from 0.15086 232/232 [==============================] - 3464s 15s/step - loss: 2.3996 - accuracy: 0.1394 - val_loss: 2.3978 - val_accuracy: 0.1509 Epoch 21: early stopping
import matplotlib.pyplot as plt
plt.plot(vgg.history["accuracy"])
plt.plot(vgg.history['val_accuracy'])
plt.plot(vgg.history['loss'])
plt.plot(vgg.history['val_loss'])
plt.title("Model accuracy")
plt.ylabel("Value")
plt.xlabel("Epoch")
plt.legend(["Accuracy","Validation Accuracy","Loss","Validation Loss"])
plt.show()
[0;31m---------------------------------------------------------------------------[0m [0;31mNameError[0m Traceback (most recent call last) Cell [0;32mIn [11], line 2[0m [1;32m 1[0m [39mimport[39;00m [39mmatplotlib[39;00m[39m.[39;00m[39mpyplot[39;00m [39mas[39;00m [39mplt[39;00m [0;32m----> 2[0m plt[39m.[39mplot(vgg[39m.[39mhistory[[39m"[39m[39maccuracy[39m[39m"[39m]) [1;32m 3[0m plt[39m.[39mplot(vgg[39m.[39mhistory[[39m'[39m[39mval_accuracy[39m[39m'[39m]) [1;32m 4[0m plt[39m.[39mplot(vgg[39m.[39mhistory[[39m'[39m[39mloss[39m[39m'[39m]) [0;31mNameError[0m: name 'vgg' is not defined
model.evaluate(test_ds_v)
[0;31m---------------------------------------------------------------------------[0m [0;31mNameError[0m Traceback (most recent call last) Cell [0;32mIn [5], line 1[0m [0;32m----> 1[0m model[39m.[39mevaluate(test_ds_v) [0;31mNameError[0m: name 'model' is not defined
ResNet50
from keras.layers import Input, Lambda, Dense, Flatten
from keras.models import Model
from keras.applications import ResNet50
from keras.preprocessing import image
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
import numpy as np
from glob import glob
import matplotlib.pyplot as plt
# re-size all the images to this
IMAGE_SIZE = [224, 224]
# add preprocessing layer to the front of resnet
resnet = ResNet50(input_shape=IMAGE_SIZE + [3], weights='imagenet', include_top=False)
# don't train existing weights
for layer in resnet.layers:
layer.trainable = False
# useful for getting number of classes
classes = 12
# our layers - you can add more if you want
x = Flatten()(resnet.output)
# x = Dense(1000, activation='relu')(x)
prediction = Dense(12, activation='softmax')(x)
# create a model object
model_resnet = Model(inputs=resnet.input, outputs=prediction)
# view the structure of the model
model_resnet.summary()
# tell the model what cost and optimization method to use
model_resnet.compile(
loss='sparse_categorical_crossentropy',
optimizer='adam',
metrics=['accuracy']
)
Model: "model_1" __________________________________________________________________________________________________ Layer (type) Output Shape Param # Connected to ================================================================================================== input_2 (InputLayer) [(None, 224, 224, 3 0 [] )] conv1_pad (ZeroPadding2D) (None, 230, 230, 3) 0 ['input_2[0][0]'] conv1_conv (Conv2D) (None, 112, 112, 64 9472 ['conv1_pad[0][0]'] ) conv1_bn (BatchNormalization) (None, 112, 112, 64 256 ['conv1_conv[0][0]'] ) conv1_relu (Activation) (None, 112, 112, 64 0 ['conv1_bn[0][0]'] ) pool1_pad (ZeroPadding2D) (None, 114, 114, 64 0 ['conv1_relu[0][0]'] ) pool1_pool (MaxPooling2D) (None, 56, 56, 64) 0 ['pool1_pad[0][0]'] conv2_block1_1_conv (Conv2D) (None, 56, 56, 64) 4160 ['pool1_pool[0][0]'] conv2_block1_1_bn (BatchNormal (None, 56, 56, 64) 256 ['conv2_block1_1_conv[0][0]'] ization) conv2_block1_1_relu (Activatio (None, 56, 56, 64) 0 ['conv2_block1_1_bn[0][0]'] n) conv2_block1_2_conv (Conv2D) (None, 56, 56, 64) 36928 ['conv2_block1_1_relu[0][0]'] conv2_block1_2_bn (BatchNormal (None, 56, 56, 64) 256 ['conv2_block1_2_conv[0][0]'] ization) conv2_block1_2_relu (Activatio (None, 56, 56, 64) 0 ['conv2_block1_2_bn[0][0]'] n) conv2_block1_0_conv (Conv2D) (None, 56, 56, 256) 16640 ['pool1_pool[0][0]'] conv2_block1_3_conv (Conv2D) (None, 56, 56, 256) 16640 ['conv2_block1_2_relu[0][0]'] conv2_block1_0_bn (BatchNormal (None, 56, 56, 256) 1024 ['conv2_block1_0_conv[0][0]'] ization) conv2_block1_3_bn (BatchNormal (None, 56, 56, 256) 1024 ['conv2_block1_3_conv[0][0]'] ization) conv2_block1_add (Add) (None, 56, 56, 256) 0 ['conv2_block1_0_bn[0][0]', 'conv2_block1_3_bn[0][0]'] conv2_block1_out (Activation) (None, 56, 56, 256) 0 ['conv2_block1_add[0][0]'] conv2_block2_1_conv (Conv2D) (None, 56, 56, 64) 16448 ['conv2_block1_out[0][0]'] conv2_block2_1_bn (BatchNormal (None, 56, 56, 64) 256 ['conv2_block2_1_conv[0][0]'] ization) conv2_block2_1_relu (Activatio (None, 56, 56, 64) 0 ['conv2_block2_1_bn[0][0]'] n) conv2_block2_2_conv (Conv2D) (None, 56, 56, 64) 36928 ['conv2_block2_1_relu[0][0]'] conv2_block2_2_bn (BatchNormal (None, 56, 56, 64) 256 ['conv2_block2_2_conv[0][0]'] ization) conv2_block2_2_relu (Activatio (None, 56, 56, 64) 0 ['conv2_block2_2_bn[0][0]'] n) conv2_block2_3_conv (Conv2D) (None, 56, 56, 256) 16640 ['conv2_block2_2_relu[0][0]'] conv2_block2_3_bn (BatchNormal (None, 56, 56, 256) 1024 ['conv2_block2_3_conv[0][0]'] ization) conv2_block2_add (Add) (None, 56, 56, 256) 0 ['conv2_block1_out[0][0]', 'conv2_block2_3_bn[0][0]'] conv2_block2_out (Activation) (None, 56, 56, 256) 0 ['conv2_block2_add[0][0]'] conv2_block3_1_conv (Conv2D) (None, 56, 56, 64) 16448 ['conv2_block2_out[0][0]'] conv2_block3_1_bn (BatchNormal (None, 56, 56, 64) 256 ['conv2_block3_1_conv[0][0]'] ization) conv2_block3_1_relu (Activatio (None, 56, 56, 64) 0 ['conv2_block3_1_bn[0][0]'] n) conv2_block3_2_conv (Conv2D) (None, 56, 56, 64) 36928 ['conv2_block3_1_relu[0][0]'] conv2_block3_2_bn (BatchNormal (None, 56, 56, 64) 256 ['conv2_block3_2_conv[0][0]'] ization) conv2_block3_2_relu (Activatio (None, 56, 56, 64) 0 ['conv2_block3_2_bn[0][0]'] n) conv2_block3_3_conv (Conv2D) (None, 56, 56, 256) 16640 ['conv2_block3_2_relu[0][0]'] conv2_block3_3_bn (BatchNormal (None, 56, 56, 256) 1024 ['conv2_block3_3_conv[0][0]'] ization) conv2_block3_add (Add) (None, 56, 56, 256) 0 ['conv2_block2_out[0][0]', 'conv2_block3_3_bn[0][0]'] conv2_block3_out (Activation) (None, 56, 56, 256) 0 ['conv2_block3_add[0][0]'] conv3_block1_1_conv (Conv2D) (None, 28, 28, 128) 32896 ['conv2_block3_out[0][0]'] conv3_block1_1_bn (BatchNormal (None, 28, 28, 128) 512 ['conv3_block1_1_conv[0][0]'] ization) conv3_block1_1_relu (Activatio (None, 28, 28, 128) 0 ['conv3_block1_1_bn[0][0]'] n) conv3_block1_2_conv (Conv2D) (None, 28, 28, 128) 147584 ['conv3_block1_1_relu[0][0]'] conv3_block1_2_bn (BatchNormal (None, 28, 28, 128) 512 ['conv3_block1_2_conv[0][0]'] ization) conv3_block1_2_relu (Activatio (None, 28, 28, 128) 0 ['conv3_block1_2_bn[0][0]'] n) conv3_block1_0_conv (Conv2D) (None, 28, 28, 512) 131584 ['conv2_block3_out[0][0]'] conv3_block1_3_conv (Conv2D) (None, 28, 28, 512) 66048 ['conv3_block1_2_relu[0][0]'] conv3_block1_0_bn (BatchNormal (None, 28, 28, 512) 2048 ['conv3_block1_0_conv[0][0]'] ization) conv3_block1_3_bn (BatchNormal (None, 28, 28, 512) 2048 ['conv3_block1_3_conv[0][0]'] ization) conv3_block1_add (Add) (None, 28, 28, 512) 0 ['conv3_block1_0_bn[0][0]', 'conv3_block1_3_bn[0][0]'] conv3_block1_out (Activation) (None, 28, 28, 512) 0 ['conv3_block1_add[0][0]'] conv3_block2_1_conv (Conv2D) (None, 28, 28, 128) 65664 ['conv3_block1_out[0][0]'] conv3_block2_1_bn (BatchNormal (None, 28, 28, 128) 512 ['conv3_block2_1_conv[0][0]'] ization) conv3_block2_1_relu (Activatio (None, 28, 28, 128) 0 ['conv3_block2_1_bn[0][0]'] n) conv3_block2_2_conv (Conv2D) (None, 28, 28, 128) 147584 ['conv3_block2_1_relu[0][0]'] conv3_block2_2_bn (BatchNormal (None, 28, 28, 128) 512 ['conv3_block2_2_conv[0][0]'] ization) conv3_block2_2_relu (Activatio (None, 28, 28, 128) 0 ['conv3_block2_2_bn[0][0]'] n) conv3_block2_3_conv (Conv2D) (None, 28, 28, 512) 66048 ['conv3_block2_2_relu[0][0]'] conv3_block2_3_bn (BatchNormal (None, 28, 28, 512) 2048 ['conv3_block2_3_conv[0][0]'] ization) conv3_block2_add (Add) (None, 28, 28, 512) 0 ['conv3_block1_out[0][0]', 'conv3_block2_3_bn[0][0]'] conv3_block2_out (Activation) (None, 28, 28, 512) 0 ['conv3_block2_add[0][0]'] conv3_block3_1_conv (Conv2D) (None, 28, 28, 128) 65664 ['conv3_block2_out[0][0]'] conv3_block3_1_bn (BatchNormal (None, 28, 28, 128) 512 ['conv3_block3_1_conv[0][0]'] ization) conv3_block3_1_relu (Activatio (None, 28, 28, 128) 0 ['conv3_block3_1_bn[0][0]'] n) conv3_block3_2_conv (Conv2D) (None, 28, 28, 128) 147584 ['conv3_block3_1_relu[0][0]'] conv3_block3_2_bn (BatchNormal (None, 28, 28, 128) 512 ['conv3_block3_2_conv[0][0]'] ization) conv3_block3_2_relu (Activatio (None, 28, 28, 128) 0 ['conv3_block3_2_bn[0][0]'] n) conv3_block3_3_conv (Conv2D) (None, 28, 28, 512) 66048 ['conv3_block3_2_relu[0][0]'] conv3_block3_3_bn (BatchNormal (None, 28, 28, 512) 2048 ['conv3_block3_3_conv[0][0]'] ization) conv3_block3_add (Add) (None, 28, 28, 512) 0 ['conv3_block2_out[0][0]', 'conv3_block3_3_bn[0][0]'] conv3_block3_out (Activation) (None, 28, 28, 512) 0 ['conv3_block3_add[0][0]'] conv3_block4_1_conv (Conv2D) (None, 28, 28, 128) 65664 ['conv3_block3_out[0][0]'] conv3_block4_1_bn (BatchNormal (None, 28, 28, 128) 512 ['conv3_block4_1_conv[0][0]'] ization) conv3_block4_1_relu (Activatio (None, 28, 28, 128) 0 ['conv3_block4_1_bn[0][0]'] n) conv3_block4_2_conv (Conv2D) (None, 28, 28, 128) 147584 ['conv3_block4_1_relu[0][0]'] conv3_block4_2_bn (BatchNormal (None, 28, 28, 128) 512 ['conv3_block4_2_conv[0][0]'] ization) conv3_block4_2_relu (Activatio (None, 28, 28, 128) 0 ['conv3_block4_2_bn[0][0]'] n) conv3_block4_3_conv (Conv2D) (None, 28, 28, 512) 66048 ['conv3_block4_2_relu[0][0]'] conv3_block4_3_bn (BatchNormal (None, 28, 28, 512) 2048 ['conv3_block4_3_conv[0][0]'] ization) conv3_block4_add (Add) (None, 28, 28, 512) 0 ['conv3_block3_out[0][0]', 'conv3_block4_3_bn[0][0]'] conv3_block4_out (Activation) (None, 28, 28, 512) 0 ['conv3_block4_add[0][0]'] conv4_block1_1_conv (Conv2D) (None, 14, 14, 256) 131328 ['conv3_block4_out[0][0]'] conv4_block1_1_bn (BatchNormal (None, 14, 14, 256) 1024 ['conv4_block1_1_conv[0][0]'] ization) conv4_block1_1_relu (Activatio (None, 14, 14, 256) 0 ['conv4_block1_1_bn[0][0]'] n) conv4_block1_2_conv (Conv2D) (None, 14, 14, 256) 590080 ['conv4_block1_1_relu[0][0]'] conv4_block1_2_bn (BatchNormal (None, 14, 14, 256) 1024 ['conv4_block1_2_conv[0][0]'] ization) conv4_block1_2_relu (Activatio (None, 14, 14, 256) 0 ['conv4_block1_2_bn[0][0]'] n) conv4_block1_0_conv (Conv2D) (None, 14, 14, 1024 525312 ['conv3_block4_out[0][0]'] ) conv4_block1_3_conv (Conv2D) (None, 14, 14, 1024 263168 ['conv4_block1_2_relu[0][0]'] ) conv4_block1_0_bn (BatchNormal (None, 14, 14, 1024 4096 ['conv4_block1_0_conv[0][0]'] ization) ) conv4_block1_3_bn (BatchNormal (None, 14, 14, 1024 4096 ['conv4_block1_3_conv[0][0]'] ization) ) conv4_block1_add (Add) (None, 14, 14, 1024 0 ['conv4_block1_0_bn[0][0]', ) 'conv4_block1_3_bn[0][0]'] conv4_block1_out (Activation) (None, 14, 14, 1024 0 ['conv4_block1_add[0][0]'] ) conv4_block2_1_conv (Conv2D) (None, 14, 14, 256) 262400 ['conv4_block1_out[0][0]'] conv4_block2_1_bn (BatchNormal (None, 14, 14, 256) 1024 ['conv4_block2_1_conv[0][0]'] ization) conv4_block2_1_relu (Activatio (None, 14, 14, 256) 0 ['conv4_block2_1_bn[0][0]'] n) conv4_block2_2_conv (Conv2D) (None, 14, 14, 256) 590080 ['conv4_block2_1_relu[0][0]'] conv4_block2_2_bn (BatchNormal (None, 14, 14, 256) 1024 ['conv4_block2_2_conv[0][0]'] ization) conv4_block2_2_relu (Activatio (None, 14, 14, 256) 0 ['conv4_block2_2_bn[0][0]'] n) conv4_block2_3_conv (Conv2D) (None, 14, 14, 1024 263168 ['conv4_block2_2_relu[0][0]'] ) conv4_block2_3_bn (BatchNormal (None, 14, 14, 1024 4096 ['conv4_block2_3_conv[0][0]'] ization) ) conv4_block2_add (Add) (None, 14, 14, 1024 0 ['conv4_block1_out[0][0]', ) 'conv4_block2_3_bn[0][0]'] conv4_block2_out (Activation) (None, 14, 14, 1024 0 ['conv4_block2_add[0][0]'] ) conv4_block3_1_conv (Conv2D) (None, 14, 14, 256) 262400 ['conv4_block2_out[0][0]'] conv4_block3_1_bn (BatchNormal (None, 14, 14, 256) 1024 ['conv4_block3_1_conv[0][0]'] ization) conv4_block3_1_relu (Activatio (None, 14, 14, 256) 0 ['conv4_block3_1_bn[0][0]'] n) conv4_block3_2_conv (Conv2D) (None, 14, 14, 256) 590080 ['conv4_block3_1_relu[0][0]'] conv4_block3_2_bn (BatchNormal (None, 14, 14, 256) 1024 ['conv4_block3_2_conv[0][0]'] ization) conv4_block3_2_relu (Activatio (None, 14, 14, 256) 0 ['conv4_block3_2_bn[0][0]'] n) conv4_block3_3_conv (Conv2D) (None, 14, 14, 1024 263168 ['conv4_block3_2_relu[0][0]'] ) conv4_block3_3_bn (BatchNormal (None, 14, 14, 1024 4096 ['conv4_block3_3_conv[0][0]'] ization) ) conv4_block3_add (Add) (None, 14, 14, 1024 0 ['conv4_block2_out[0][0]', ) 'conv4_block3_3_bn[0][0]'] conv4_block3_out (Activation) (None, 14, 14, 1024 0 ['conv4_block3_add[0][0]'] ) conv4_block4_1_conv (Conv2D) (None, 14, 14, 256) 262400 ['conv4_block3_out[0][0]'] conv4_block4_1_bn (BatchNormal (None, 14, 14, 256) 1024 ['conv4_block4_1_conv[0][0]'] ization) conv4_block4_1_relu (Activatio (None, 14, 14, 256) 0 ['conv4_block4_1_bn[0][0]'] n) conv4_block4_2_conv (Conv2D) (None, 14, 14, 256) 590080 ['conv4_block4_1_relu[0][0]'] conv4_block4_2_bn (BatchNormal (None, 14, 14, 256) 1024 ['conv4_block4_2_conv[0][0]'] ization) conv4_block4_2_relu (Activatio (None, 14, 14, 256) 0 ['conv4_block4_2_bn[0][0]'] n) conv4_block4_3_conv (Conv2D) (None, 14, 14, 1024 263168 ['conv4_block4_2_relu[0][0]'] ) conv4_block4_3_bn (BatchNormal (None, 14, 14, 1024 4096 ['conv4_block4_3_conv[0][0]'] ization) ) conv4_block4_add (Add) (None, 14, 14, 1024 0 ['conv4_block3_out[0][0]', ) 'conv4_block4_3_bn[0][0]'] conv4_block4_out (Activation) (None, 14, 14, 1024 0 ['conv4_block4_add[0][0]'] ) conv4_block5_1_conv (Conv2D) (None, 14, 14, 256) 262400 ['conv4_block4_out[0][0]'] conv4_block5_1_bn (BatchNormal (None, 14, 14, 256) 1024 ['conv4_block5_1_conv[0][0]'] ization) conv4_block5_1_relu (Activatio (None, 14, 14, 256) 0 ['conv4_block5_1_bn[0][0]'] n) conv4_block5_2_conv (Conv2D) (None, 14, 14, 256) 590080 ['conv4_block5_1_relu[0][0]'] conv4_block5_2_bn (BatchNormal (None, 14, 14, 256) 1024 ['conv4_block5_2_conv[0][0]'] ization) conv4_block5_2_relu (Activatio (None, 14, 14, 256) 0 ['conv4_block5_2_bn[0][0]'] n) conv4_block5_3_conv (Conv2D) (None, 14, 14, 1024 263168 ['conv4_block5_2_relu[0][0]'] ) conv4_block5_3_bn (BatchNormal (None, 14, 14, 1024 4096 ['conv4_block5_3_conv[0][0]'] ization) ) conv4_block5_add (Add) (None, 14, 14, 1024 0 ['conv4_block4_out[0][0]', ) 'conv4_block5_3_bn[0][0]'] conv4_block5_out (Activation) (None, 14, 14, 1024 0 ['conv4_block5_add[0][0]'] ) conv4_block6_1_conv (Conv2D) (None, 14, 14, 256) 262400 ['conv4_block5_out[0][0]'] conv4_block6_1_bn (BatchNormal (None, 14, 14, 256) 1024 ['conv4_block6_1_conv[0][0]'] ization) conv4_block6_1_relu (Activatio (None, 14, 14, 256) 0 ['conv4_block6_1_bn[0][0]'] n) conv4_block6_2_conv (Conv2D) (None, 14, 14, 256) 590080 ['conv4_block6_1_relu[0][0]'] conv4_block6_2_bn (BatchNormal (None, 14, 14, 256) 1024 ['conv4_block6_2_conv[0][0]'] ization) conv4_block6_2_relu (Activatio (None, 14, 14, 256) 0 ['conv4_block6_2_bn[0][0]'] n) conv4_block6_3_conv (Conv2D) (None, 14, 14, 1024 263168 ['conv4_block6_2_relu[0][0]'] ) conv4_block6_3_bn (BatchNormal (None, 14, 14, 1024 4096 ['conv4_block6_3_conv[0][0]'] ization) ) conv4_block6_add (Add) (None, 14, 14, 1024 0 ['conv4_block5_out[0][0]', ) 'conv4_block6_3_bn[0][0]'] conv4_block6_out (Activation) (None, 14, 14, 1024 0 ['conv4_block6_add[0][0]'] ) conv5_block1_1_conv (Conv2D) (None, 7, 7, 512) 524800 ['conv4_block6_out[0][0]'] conv5_block1_1_bn (BatchNormal (None, 7, 7, 512) 2048 ['conv5_block1_1_conv[0][0]'] ization) conv5_block1_1_relu (Activatio (None, 7, 7, 512) 0 ['conv5_block1_1_bn[0][0]'] n) conv5_block1_2_conv (Conv2D) (None, 7, 7, 512) 2359808 ['conv5_block1_1_relu[0][0]'] conv5_block1_2_bn (BatchNormal (None, 7, 7, 512) 2048 ['conv5_block1_2_conv[0][0]'] ization) conv5_block1_2_relu (Activatio (None, 7, 7, 512) 0 ['conv5_block1_2_bn[0][0]'] n) conv5_block1_0_conv (Conv2D) (None, 7, 7, 2048) 2099200 ['conv4_block6_out[0][0]'] conv5_block1_3_conv (Conv2D) (None, 7, 7, 2048) 1050624 ['conv5_block1_2_relu[0][0]'] conv5_block1_0_bn (BatchNormal (None, 7, 7, 2048) 8192 ['conv5_block1_0_conv[0][0]'] ization) conv5_block1_3_bn (BatchNormal (None, 7, 7, 2048) 8192 ['conv5_block1_3_conv[0][0]'] ization) conv5_block1_add (Add) (None, 7, 7, 2048) 0 ['conv5_block1_0_bn[0][0]', 'conv5_block1_3_bn[0][0]'] conv5_block1_out (Activation) (None, 7, 7, 2048) 0 ['conv5_block1_add[0][0]'] conv5_block2_1_conv (Conv2D) (None, 7, 7, 512) 1049088 ['conv5_block1_out[0][0]'] conv5_block2_1_bn (BatchNormal (None, 7, 7, 512) 2048 ['conv5_block2_1_conv[0][0]'] ization) conv5_block2_1_relu (Activatio (None, 7, 7, 512) 0 ['conv5_block2_1_bn[0][0]'] n) conv5_block2_2_conv (Conv2D) (None, 7, 7, 512) 2359808 ['conv5_block2_1_relu[0][0]'] conv5_block2_2_bn (BatchNormal (None, 7, 7, 512) 2048 ['conv5_block2_2_conv[0][0]'] ization) conv5_block2_2_relu (Activatio (None, 7, 7, 512) 0 ['conv5_block2_2_bn[0][0]'] n) conv5_block2_3_conv (Conv2D) (None, 7, 7, 2048) 1050624 ['conv5_block2_2_relu[0][0]'] conv5_block2_3_bn (BatchNormal (None, 7, 7, 2048) 8192 ['conv5_block2_3_conv[0][0]'] ization) conv5_block2_add (Add) (None, 7, 7, 2048) 0 ['conv5_block1_out[0][0]', 'conv5_block2_3_bn[0][0]'] conv5_block2_out (Activation) (None, 7, 7, 2048) 0 ['conv5_block2_add[0][0]'] conv5_block3_1_conv (Conv2D) (None, 7, 7, 512) 1049088 ['conv5_block2_out[0][0]'] conv5_block3_1_bn (BatchNormal (None, 7, 7, 512) 2048 ['conv5_block3_1_conv[0][0]'] ization) conv5_block3_1_relu (Activatio (None, 7, 7, 512) 0 ['conv5_block3_1_bn[0][0]'] n) conv5_block3_2_conv (Conv2D) (None, 7, 7, 512) 2359808 ['conv5_block3_1_relu[0][0]'] conv5_block3_2_bn (BatchNormal (None, 7, 7, 512) 2048 ['conv5_block3_2_conv[0][0]'] ization) conv5_block3_2_relu (Activatio (None, 7, 7, 512) 0 ['conv5_block3_2_bn[0][0]'] n) conv5_block3_3_conv (Conv2D) (None, 7, 7, 2048) 1050624 ['conv5_block3_2_relu[0][0]'] conv5_block3_3_bn (BatchNormal (None, 7, 7, 2048) 8192 ['conv5_block3_3_conv[0][0]'] ization) conv5_block3_add (Add) (None, 7, 7, 2048) 0 ['conv5_block2_out[0][0]', 'conv5_block3_3_bn[0][0]'] conv5_block3_out (Activation) (None, 7, 7, 2048) 0 ['conv5_block3_add[0][0]'] flatten_1 (Flatten) (None, 100352) 0 ['conv5_block3_out[0][0]'] dense_1 (Dense) (None, 12) 1204236 ['flatten_1[0][0]'] ================================================================================================== Total params: 24,791,948 Trainable params: 1,204,236 Non-trainable params: 23,587,712 __________________________________________________________________________________________________
train_ds_r, test_ds_r, val_ds_r = prepare_data('./plantvillage/color', img_size=IMAGE_SIZE, test_size=0.2, val_size=0.2)
Training: 7430 Test: 2323 Validation: 1858
r = model_resnet.fit_generator(
train_ds_r,
validation_data=val_ds_r,
epochs=25,
steps_per_epoch=len(train_ds_r),
validation_steps=len(val_ds_r)
)
Epoch 1/25
/var/folders/_h/ljwht4gd7lb99rm1hm78h7_00000gn/T/ipykernel_39241/1735889553.py:1: UserWarning: `Model.fit_generator` is deprecated and will be removed in a future version. Please use `Model.fit`, which supports generators. r = model_resnet.fit_generator(
232/232 [==============================] - 297s 1s/step - loss: 0.6232 - accuracy: 0.8380 - val_loss: 1.2547 - val_accuracy: 0.7328 Epoch 2/25 232/232 [==============================] - 277s 1s/step - loss: 0.4919 - accuracy: 0.8611 - val_loss: 0.8189 - val_accuracy: 0.8308 Epoch 3/25 232/232 [==============================] - 299s 1s/step - loss: 0.6947 - accuracy: 0.8382 - val_loss: 0.5326 - val_accuracy: 0.8518 Epoch 4/25 232/232 [==============================] - 306s 1s/step - loss: 0.6153 - accuracy: 0.8599 - val_loss: 1.1360 - val_accuracy: 0.7710 Epoch 5/25 232/232 [==============================] - 311s 1s/step - loss: 0.5149 - accuracy: 0.8689 - val_loss: 1.3260 - val_accuracy: 0.7780 Epoch 6/25 232/232 [==============================] - 313s 1s/step - loss: 0.6220 - accuracy: 0.8462 - val_loss: 0.8199 - val_accuracy: 0.8233 Epoch 7/25 232/232 [==============================] - 318s 1s/step - loss: 0.6513 - accuracy: 0.8412 - val_loss: 1.1632 - val_accuracy: 0.7457 Epoch 8/25 232/232 [==============================] - 320s 1s/step - loss: 0.5098 - accuracy: 0.8623 - val_loss: 0.8247 - val_accuracy: 0.8006 Epoch 9/25 232/232 [==============================] - 323s 1s/step - loss: 0.5930 - accuracy: 0.8493 - val_loss: 0.4964 - val_accuracy: 0.8761 Epoch 10/25 232/232 [==============================] - 324s 1s/step - loss: 0.5482 - accuracy: 0.8661 - val_loss: 0.8474 - val_accuracy: 0.8109 Epoch 11/25 232/232 [==============================] - 322s 1s/step - loss: 0.5106 - accuracy: 0.8668 - val_loss: 1.2926 - val_accuracy: 0.7629 Epoch 12/25 232/232 [==============================] - 322s 1s/step - loss: 0.5876 - accuracy: 0.8579 - val_loss: 1.0667 - val_accuracy: 0.7812 Epoch 13/25 232/232 [==============================] - 323s 1s/step - loss: 0.6110 - accuracy: 0.8560 - val_loss: 0.5787 - val_accuracy: 0.8545 Epoch 14/25 232/232 [==============================] - 323s 1s/step - loss: 0.5797 - accuracy: 0.8524 - val_loss: 0.6400 - val_accuracy: 0.8658 Epoch 15/25 232/232 [==============================] - 326s 1s/step - loss: 0.4589 - accuracy: 0.8759 - val_loss: 0.6950 - val_accuracy: 0.8400 Epoch 16/25 232/232 [==============================] - 324s 1s/step - loss: 0.5822 - accuracy: 0.8700 - val_loss: 1.4940 - val_accuracy: 0.7678 Epoch 17/25 232/232 [==============================] - 322s 1s/step - loss: 0.4404 - accuracy: 0.8827 - val_loss: 1.5049 - val_accuracy: 0.7559 Epoch 18/25 232/232 [==============================] - 321s 1s/step - loss: 0.6142 - accuracy: 0.8598 - val_loss: 0.8974 - val_accuracy: 0.8060 Epoch 19/25 232/232 [==============================] - 322s 1s/step - loss: 0.5486 - accuracy: 0.8677 - val_loss: 1.5655 - val_accuracy: 0.7753 Epoch 20/25 232/232 [==============================] - 326s 1s/step - loss: 0.3964 - accuracy: 0.8947 - val_loss: 0.7896 - val_accuracy: 0.8292 Epoch 21/25 232/232 [==============================] - 324s 1s/step - loss: 0.4499 - accuracy: 0.8848 - val_loss: 1.7746 - val_accuracy: 0.7150 Epoch 22/25 232/232 [==============================] - 323s 1s/step - loss: 0.4320 - accuracy: 0.8817 - val_loss: 1.2487 - val_accuracy: 0.7974 Epoch 23/25 232/232 [==============================] - 322s 1s/step - loss: 0.4307 - accuracy: 0.8844 - val_loss: 0.6485 - val_accuracy: 0.8470 Epoch 24/25 232/232 [==============================] - 322s 1s/step - loss: 0.4287 - accuracy: 0.8900 - val_loss: 1.5260 - val_accuracy: 0.7586 Epoch 25/25 232/232 [==============================] - 323s 1s/step - loss: 0.6704 - accuracy: 0.8482 - val_loss: 0.7494 - val_accuracy: 0.8287
# loss
plt.plot(r.history['loss'], label='train loss')
plt.plot(r.history['val_loss'], label='val loss')
plt.legend()
plt.show()
plt.savefig('LossVal_loss')
<Figure size 640x480 with 0 Axes>
# accuracies
plt.plot(r.history['accuracy'], label='train acc')
plt.plot(r.history['val_accuracy'], label='val acc')
plt.legend()
plt.show()
plt.savefig('AccVal_acc')
<Figure size 640x480 with 0 Axes>
model_resnet.save('resnet_new_model_2.h5')
model_resnet.evaluate(test_ds_r)
72/72 [==============================] - 61s 843ms/step - loss: 0.7182 - accuracy: 0.8411
[0.7181549072265625, 0.8411458134651184]