147 KiB
147 KiB
Aleksandra Jonas, Aleksandra Gronowska, Iwona Christop
Zadanie 9-10 - VGG16 + ResNet on train_test_sw
Przygotowanie danych
from IPython.display import Image, display
import sys
import subprocess
import pkg_resources
import numpy as np
required = { 'scikit-image'}
installed = {pkg.key for pkg in pkg_resources.working_set}
missing = required - installed
# VGG16 requires images to be of dim = (224, 224, 3)
newSize = (224,224)
if missing:
python = sys.executable
subprocess.check_call([python, '-m', 'pip', 'install', *missing], stdout=subprocess.DEVNULL)
def load_train_data(input_dir):
import numpy as np
import pandas as pd
import os
from skimage.io import imread
import cv2 as cv
from pathlib import Path
import random
from shutil import copyfile, rmtree
import json
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib
image_dir = Path(input_dir)
categories_name = []
for file in os.listdir(image_dir):
d = os.path.join(image_dir, file)
if os.path.isdir(d):
categories_name.append(file)
folders = [directory for directory in image_dir.iterdir() if directory.is_dir()]
train_img = []
categories_count=[]
labels=[]
for i, direc in enumerate(folders):
count = 0
for obj in direc.iterdir():
if os.path.isfile(obj) and os.path.basename(os.path.normpath(obj)) != 'desktop.ini':
labels.append(os.path.basename(os.path.normpath(direc)))
count += 1
img = imread(obj)#zwraca ndarry postaci xSize x ySize x colorDepth
img = img[:, :, :3]
img = cv.resize(img, newSize, interpolation=cv.INTER_AREA)# zwraca ndarray
img = img / 255 #normalizacja
train_img.append(img)
categories_count.append(count)
X={}
X["values"] = np.array(train_img)
X["categories_name"] = categories_name
X["categories_count"] = categories_count
X["labels"]=labels
return X
def load_test_data(input_dir):
import numpy as np
import pandas as pd
import os
from skimage.io import imread
import cv2 as cv
from pathlib import Path
import random
from shutil import copyfile, rmtree
import json
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib
image_path = Path(input_dir)
labels_path = image_path.parents[0] / 'test_labels.json'
jsonString = labels_path.read_text()
objects = json.loads(jsonString)
categories_name = []
categories_count=[]
count = 0
c = objects[0]['value']
for e in objects:
if e['value'] != c:
categories_count.append(count)
c = e['value']
count = 1
else:
count += 1
if not e['value'] in categories_name:
categories_name.append(e['value'])
categories_count.append(count)
test_img = []
labels=[]
for e in objects:
p = image_path / e['filename']
img = imread(p)#zwraca ndarry postaci xSize x ySize x colorDepth
img = img[:, :, :3]
img = cv.resize(img, newSize, interpolation=cv.INTER_AREA)# zwraca ndarray
img = img / 255#normalizacja
test_img.append(img)
labels.append(e['value'])
X={}
X["values"] = np.array(test_img)
X["categories_name"] = categories_name
X["categories_count"] = categories_count
X["labels"]=labels
return X
def create_tf_ds(X_train, y_train_enc, X_validate, y_validate_enc, X_test, y_test_enc):
import tensorflow as tf
train_ds = tf.data.Dataset.from_tensor_slices((X_train, y_train_enc))
validation_ds = tf.data.Dataset.from_tensor_slices((X_validate, y_validate_enc))
test_ds = tf.data.Dataset.from_tensor_slices((X_test, y_test_enc))
train_ds_size = tf.data.experimental.cardinality(train_ds).numpy()
test_ds_size = tf.data.experimental.cardinality(test_ds).numpy()
validation_ds_size = tf.data.experimental.cardinality(validation_ds).numpy()
print("Training data size:", train_ds_size)
print("Test data size:", test_ds_size)
print("Validation data size:", validation_ds_size)
train_ds = (train_ds
.shuffle(buffer_size=train_ds_size)
.batch(batch_size=32, drop_remainder=True))
test_ds = (test_ds
.shuffle(buffer_size=train_ds_size)
.batch(batch_size=32, drop_remainder=True))
validation_ds = (validation_ds
.shuffle(buffer_size=train_ds_size)
.batch(batch_size=32, drop_remainder=True))
return train_ds, test_ds, validation_ds
def get_run_logdir(root_logdir):
import os
import time
run_id = time.strftime("run_%Y_%m_%d-%H_%M_%S")
return os.path.join(root_logdir, run_id)
def diagram_setup(model_name):
from tensorflow import keras
import os
root_logdir = os.path.join(os.curdir, f"logs\\\\fit\\\\\{model_name}\\\\")
run_logdir = get_run_logdir(root_logdir)
tensorboard_cb = keras.callbacks.TensorBoard(run_logdir)
# Data load
data_train = load_train_data("./train_test_sw/train_sw")
values_train = data_train['values']
labels_train = data_train['labels']
data_test = load_test_data("./train_test_sw/test_sw")
X_test = data_test['values']
y_test = data_test['labels']
from sklearn.model_selection import train_test_split
X_train, X_validate, y_train, y_validate = train_test_split(values_train, labels_train, test_size=0.2, random_state=42)
from sklearn.preprocessing import LabelEncoder
class_le = LabelEncoder()
y_train_enc = class_le.fit_transform(y_train)
y_validate_enc = class_le.fit_transform(y_validate)
y_test_enc = class_le.fit_transform(y_test)
train_ds, test_ds, validation_ds = create_tf_ds(X_train, y_train_enc, X_validate, y_validate_enc, X_test, y_test_enc)
Training data size: 820 Test data size: 259 Validation data size: 206
VGG16
diagram_setup('vgg_sw')
import keras,os
from keras.models import Sequential
from keras.layers import Dense, Conv2D, MaxPool2D , Flatten
from keras.preprocessing.image import ImageDataGenerator
import numpy as np
model = keras.models.Sequential([
keras.layers.Conv2D(filters=64, kernel_size=(3,3), activation='relu', input_shape=(224,224,3), padding="same"),
keras.layers.Conv2D(filters=64, kernel_size=(3,3), activation='relu', input_shape=(224,224,3), padding="same"),
keras.layers.MaxPool2D(pool_size=(2,2), strides=(2,2)),
keras.layers.Conv2D(filters=128, kernel_size=(3,3), padding="same", activation="relu"),
keras.layers.Conv2D(filters=128, kernel_size=(3,3), padding="same", activation="relu"),
keras.layers.MaxPool2D(pool_size=(2,2), strides=(2,2)),
keras.layers.Conv2D(filters=256, kernel_size=(3,3), padding="same", activation="relu"),
keras.layers.Conv2D(filters=256, kernel_size=(3,3), padding="same", activation="relu"),
keras.layers.Conv2D(filters=256, kernel_size=(3,3), padding="same", activation="relu"),
keras.layers.MaxPool2D(pool_size=(2,2), strides=(2,2)),
keras.layers.Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu"),
keras.layers.Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu"),
keras.layers.Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu"),
keras.layers.MaxPool2D(pool_size=(2,2), strides=(2,2)),
keras.layers.Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu"),
keras.layers.Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu"),
keras.layers.Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu"),
keras.layers.Flatten(),
keras.layers.Dense(units = 4096, activation='relu'),
keras.layers.Dense(units = 4096, activation='relu'),
keras.layers.Dense(units = 5, activation='softmax')
])
from keras.optimizers import Adam
opt = Adam(lr=0.001)
model.compile(optimizer=opt, loss=keras.losses.sparse_categorical_crossentropy, metrics=['accuracy'])
/Users/jonas/Library/Python/3.9/lib/python/site-packages/keras/optimizers/optimizer_v2/adam.py:117: UserWarning: The `lr` argument is deprecated, use `learning_rate` instead. super().__init__(name, **kwargs)
model.summary()
Model: "sequential" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d (Conv2D) (None, 224, 224, 64) 1792 conv2d_1 (Conv2D) (None, 224, 224, 64) 36928 max_pooling2d (MaxPooling2D (None, 112, 112, 64) 0 ) conv2d_2 (Conv2D) (None, 112, 112, 128) 73856 conv2d_3 (Conv2D) (None, 112, 112, 128) 147584 max_pooling2d_1 (MaxPooling (None, 56, 56, 128) 0 2D) conv2d_4 (Conv2D) (None, 56, 56, 256) 295168 conv2d_5 (Conv2D) (None, 56, 56, 256) 590080 conv2d_6 (Conv2D) (None, 56, 56, 256) 590080 max_pooling2d_2 (MaxPooling (None, 28, 28, 256) 0 2D) conv2d_7 (Conv2D) (None, 28, 28, 512) 1180160 conv2d_8 (Conv2D) (None, 28, 28, 512) 2359808 conv2d_9 (Conv2D) (None, 28, 28, 512) 2359808 max_pooling2d_3 (MaxPooling (None, 14, 14, 512) 0 2D) conv2d_10 (Conv2D) (None, 14, 14, 512) 2359808 conv2d_11 (Conv2D) (None, 14, 14, 512) 2359808 conv2d_12 (Conv2D) (None, 14, 14, 512) 2359808 flatten (Flatten) (None, 100352) 0 dense (Dense) (None, 4096) 411045888 dense_1 (Dense) (None, 4096) 16781312 dense_2 (Dense) (None, 5) 20485 ================================================================= Total params: 442,562,373 Trainable params: 442,562,373 Non-trainable params: 0 _________________________________________________________________
from keras.callbacks import ModelCheckpoint, EarlyStopping
checkpoint = ModelCheckpoint("vgg16_1.h5", monitor='val_accuracy', verbose=1, save_best_only=True, save_weights_only=False, mode='auto', period=1)
early = EarlyStopping(monitor='val_accuracy', min_delta=0, patience=20, verbose=1, mode='auto')
hist_vgg = model.fit_generator(steps_per_epoch=len(train_ds), generator=train_ds, validation_data= validation_ds, validation_steps=len(validation_ds), epochs=2, callbacks=[checkpoint,early])
WARNING:tensorflow:`period` argument is deprecated. Please use `save_freq` to specify the frequency in number of batches seen. Epoch 1/2
/var/folders/6b/j4d60ym516x2s6wymzj707rh0000gn/T/ipykernel_8661/3543889534.py:4: UserWarning: `Model.fit_generator` is deprecated and will be removed in a future version. Please use `Model.fit`, which supports generators. hist_vgg = model.fit_generator(steps_per_epoch=len(train_ds), generator=train_ds, validation_data= validation_ds, validation_steps=len(validation_ds), epochs=2, callbacks=[checkpoint,early]) 2023-01-06 03:00:40.894219: W tensorflow/tsl/platform/profile_utils/cpu_utils.cc:128] Failed to get CPU frequency: 0 Hz
2/25 [=>............................] - ETA: 9:29 - loss: 1.5960 - accuracy: 0.2031
[0;31m---------------------------------------------------------------------------[0m [0;31mKeyboardInterrupt[0m Traceback (most recent call last) Cell [0;32mIn[28], line 4[0m [1;32m 2[0m checkpoint [39m=[39m ModelCheckpoint([39m"[39m[39mvgg16_1.h5[39m[39m"[39m, monitor[39m=[39m[39m'[39m[39mval_accuracy[39m[39m'[39m, verbose[39m=[39m[39m1[39m, save_best_only[39m=[39m[39mTrue[39;00m, save_weights_only[39m=[39m[39mFalse[39;00m, mode[39m=[39m[39m'[39m[39mauto[39m[39m'[39m, period[39m=[39m[39m1[39m) [1;32m 3[0m early [39m=[39m EarlyStopping(monitor[39m=[39m[39m'[39m[39mval_accuracy[39m[39m'[39m, min_delta[39m=[39m[39m0[39m, patience[39m=[39m[39m20[39m, verbose[39m=[39m[39m1[39m, mode[39m=[39m[39m'[39m[39mauto[39m[39m'[39m) [0;32m----> 4[0m hist_vgg [39m=[39m model[39m.[39;49mfit_generator(steps_per_epoch[39m=[39;49m[39mlen[39;49m(train_ds), generator[39m=[39;49mtrain_ds, validation_data[39m=[39;49m validation_ds, validation_steps[39m=[39;49m[39mlen[39;49m(validation_ds), epochs[39m=[39;49m[39m2[39;49m, callbacks[39m=[39;49m[checkpoint,early]) File [0;32m~/Library/Python/3.9/lib/python/site-packages/keras/engine/training.py:2604[0m, in [0;36mModel.fit_generator[0;34m(self, generator, steps_per_epoch, epochs, verbose, callbacks, validation_data, validation_steps, validation_freq, class_weight, max_queue_size, workers, use_multiprocessing, shuffle, initial_epoch)[0m [1;32m 2592[0m [39m[39m[39m"""Fits the model on data yielded batch-by-batch by a Python generator.[39;00m [1;32m 2593[0m [1;32m 2594[0m [39mDEPRECATED:[39;00m [1;32m 2595[0m [39m `Model.fit` now supports generators, so there is no longer any need to[39;00m [1;32m 2596[0m [39m use this endpoint.[39;00m [1;32m 2597[0m [39m"""[39;00m [1;32m 2598[0m warnings[39m.[39mwarn( [1;32m 2599[0m [39m"[39m[39m`Model.fit_generator` is deprecated and [39m[39m"[39m [1;32m 2600[0m [39m"[39m[39mwill be removed in a future version. [39m[39m"[39m [1;32m 2601[0m [39m"[39m[39mPlease use `Model.fit`, which supports generators.[39m[39m"[39m, [1;32m 2602[0m stacklevel[39m=[39m[39m2[39m, [1;32m 2603[0m ) [0;32m-> 2604[0m [39mreturn[39;00m [39mself[39;49m[39m.[39;49mfit( [1;32m 2605[0m generator, [1;32m 2606[0m steps_per_epoch[39m=[39;49msteps_per_epoch, [1;32m 2607[0m epochs[39m=[39;49mepochs, [1;32m 2608[0m verbose[39m=[39;49mverbose, [1;32m 2609[0m callbacks[39m=[39;49mcallbacks, [1;32m 2610[0m validation_data[39m=[39;49mvalidation_data, [1;32m 2611[0m validation_steps[39m=[39;49mvalidation_steps, [1;32m 2612[0m validation_freq[39m=[39;49mvalidation_freq, [1;32m 2613[0m class_weight[39m=[39;49mclass_weight, [1;32m 2614[0m max_queue_size[39m=[39;49mmax_queue_size, [1;32m 2615[0m workers[39m=[39;49mworkers, [1;32m 2616[0m use_multiprocessing[39m=[39;49muse_multiprocessing, [1;32m 2617[0m shuffle[39m=[39;49mshuffle, [1;32m 2618[0m initial_epoch[39m=[39;49minitial_epoch, [1;32m 2619[0m ) File [0;32m~/Library/Python/3.9/lib/python/site-packages/keras/utils/traceback_utils.py:65[0m, in [0;36mfilter_traceback.<locals>.error_handler[0;34m(*args, **kwargs)[0m [1;32m 63[0m filtered_tb [39m=[39m [39mNone[39;00m [1;32m 64[0m [39mtry[39;00m: [0;32m---> 65[0m [39mreturn[39;00m fn([39m*[39;49margs, [39m*[39;49m[39m*[39;49mkwargs) [1;32m 66[0m [39mexcept[39;00m [39mException[39;00m [39mas[39;00m e: [1;32m 67[0m filtered_tb [39m=[39m _process_traceback_frames(e[39m.[39m__traceback__) File [0;32m~/Library/Python/3.9/lib/python/site-packages/keras/engine/training.py:1650[0m, in [0;36mModel.fit[0;34m(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_batch_size, validation_freq, max_queue_size, workers, use_multiprocessing)[0m [1;32m 1642[0m [39mwith[39;00m tf[39m.[39mprofiler[39m.[39mexperimental[39m.[39mTrace( [1;32m 1643[0m [39m"[39m[39mtrain[39m[39m"[39m, [1;32m 1644[0m epoch_num[39m=[39mepoch, [0;32m (...)[0m [1;32m 1647[0m _r[39m=[39m[39m1[39m, [1;32m 1648[0m ): [1;32m 1649[0m callbacks[39m.[39mon_train_batch_begin(step) [0;32m-> 1650[0m tmp_logs [39m=[39m [39mself[39;49m[39m.[39;49mtrain_function(iterator) [1;32m 1651[0m [39mif[39;00m data_handler[39m.[39mshould_sync: [1;32m 1652[0m context[39m.[39masync_wait() File [0;32m~/Library/Python/3.9/lib/python/site-packages/tensorflow/python/util/traceback_utils.py:150[0m, in [0;36mfilter_traceback.<locals>.error_handler[0;34m(*args, **kwargs)[0m [1;32m 148[0m filtered_tb [39m=[39m [39mNone[39;00m [1;32m 149[0m [39mtry[39;00m: [0;32m--> 150[0m [39mreturn[39;00m fn([39m*[39;49margs, [39m*[39;49m[39m*[39;49mkwargs) [1;32m 151[0m [39mexcept[39;00m [39mException[39;00m [39mas[39;00m e: [1;32m 152[0m filtered_tb [39m=[39m _process_traceback_frames(e[39m.[39m__traceback__) File [0;32m~/Library/Python/3.9/lib/python/site-packages/tensorflow/python/eager/polymorphic_function/polymorphic_function.py:880[0m, in [0;36mFunction.__call__[0;34m(self, *args, **kwds)[0m [1;32m 877[0m compiler [39m=[39m [39m"[39m[39mxla[39m[39m"[39m [39mif[39;00m [39mself[39m[39m.[39m_jit_compile [39melse[39;00m [39m"[39m[39mnonXla[39m[39m"[39m [1;32m 879[0m [39mwith[39;00m OptionalXlaContext([39mself[39m[39m.[39m_jit_compile): [0;32m--> 880[0m result [39m=[39m [39mself[39;49m[39m.[39;49m_call([39m*[39;49margs, [39m*[39;49m[39m*[39;49mkwds) [1;32m 882[0m new_tracing_count [39m=[39m [39mself[39m[39m.[39mexperimental_get_tracing_count() [1;32m 883[0m without_tracing [39m=[39m (tracing_count [39m==[39m new_tracing_count) File [0;32m~/Library/Python/3.9/lib/python/site-packages/tensorflow/python/eager/polymorphic_function/polymorphic_function.py:912[0m, in [0;36mFunction._call[0;34m(self, *args, **kwds)[0m [1;32m 909[0m [39mself[39m[39m.[39m_lock[39m.[39mrelease() [1;32m 910[0m [39m# In this case we have created variables on the first call, so we run the[39;00m [1;32m 911[0m [39m# defunned version which is guaranteed to never create variables.[39;00m [0;32m--> 912[0m [39mreturn[39;00m [39mself[39;49m[39m.[39;49m_no_variable_creation_fn([39m*[39;49margs, [39m*[39;49m[39m*[39;49mkwds) [39m# pylint: disable=not-callable[39;00m [1;32m 913[0m [39melif[39;00m [39mself[39m[39m.[39m_variable_creation_fn [39mis[39;00m [39mnot[39;00m [39mNone[39;00m: [1;32m 914[0m [39m# Release the lock early so that multiple threads can perform the call[39;00m [1;32m 915[0m [39m# in parallel.[39;00m [1;32m 916[0m [39mself[39m[39m.[39m_lock[39m.[39mrelease() File [0;32m~/Library/Python/3.9/lib/python/site-packages/tensorflow/python/eager/polymorphic_function/tracing_compiler.py:134[0m, in [0;36mTracingCompiler.__call__[0;34m(self, *args, **kwargs)[0m [1;32m 131[0m [39mwith[39;00m [39mself[39m[39m.[39m_lock: [1;32m 132[0m (concrete_function, [1;32m 133[0m filtered_flat_args) [39m=[39m [39mself[39m[39m.[39m_maybe_define_function(args, kwargs) [0;32m--> 134[0m [39mreturn[39;00m concrete_function[39m.[39;49m_call_flat( [1;32m 135[0m filtered_flat_args, captured_inputs[39m=[39;49mconcrete_function[39m.[39;49mcaptured_inputs) File [0;32m~/Library/Python/3.9/lib/python/site-packages/tensorflow/python/eager/polymorphic_function/monomorphic_function.py:1745[0m, in [0;36mConcreteFunction._call_flat[0;34m(self, args, captured_inputs, cancellation_manager)[0m [1;32m 1741[0m possible_gradient_type [39m=[39m gradients_util[39m.[39mPossibleTapeGradientTypes(args) [1;32m 1742[0m [39mif[39;00m (possible_gradient_type [39m==[39m gradients_util[39m.[39mPOSSIBLE_GRADIENT_TYPES_NONE [1;32m 1743[0m [39mand[39;00m executing_eagerly): [1;32m 1744[0m [39m# No tape is watching; skip to running the function.[39;00m [0;32m-> 1745[0m [39mreturn[39;00m [39mself[39m[39m.[39m_build_call_outputs([39mself[39;49m[39m.[39;49m_inference_function[39m.[39;49mcall( [1;32m 1746[0m ctx, args, cancellation_manager[39m=[39;49mcancellation_manager)) [1;32m 1747[0m forward_backward [39m=[39m [39mself[39m[39m.[39m_select_forward_and_backward_functions( [1;32m 1748[0m args, [1;32m 1749[0m possible_gradient_type, [1;32m 1750[0m executing_eagerly) [1;32m 1751[0m forward_function, args_with_tangents [39m=[39m forward_backward[39m.[39mforward() File [0;32m~/Library/Python/3.9/lib/python/site-packages/tensorflow/python/eager/polymorphic_function/monomorphic_function.py:378[0m, in [0;36m_EagerDefinedFunction.call[0;34m(self, ctx, args, cancellation_manager)[0m [1;32m 376[0m [39mwith[39;00m _InterpolateFunctionError([39mself[39m): [1;32m 377[0m [39mif[39;00m cancellation_manager [39mis[39;00m [39mNone[39;00m: [0;32m--> 378[0m outputs [39m=[39m execute[39m.[39;49mexecute( [1;32m 379[0m [39mstr[39;49m([39mself[39;49m[39m.[39;49msignature[39m.[39;49mname), [1;32m 380[0m num_outputs[39m=[39;49m[39mself[39;49m[39m.[39;49m_num_outputs, [1;32m 381[0m inputs[39m=[39;49margs, [1;32m 382[0m attrs[39m=[39;49mattrs, [1;32m 383[0m ctx[39m=[39;49mctx) [1;32m 384[0m [39melse[39;00m: [1;32m 385[0m outputs [39m=[39m execute[39m.[39mexecute_with_cancellation( [1;32m 386[0m [39mstr[39m([39mself[39m[39m.[39msignature[39m.[39mname), [1;32m 387[0m num_outputs[39m=[39m[39mself[39m[39m.[39m_num_outputs, [0;32m (...)[0m [1;32m 390[0m ctx[39m=[39mctx, [1;32m 391[0m cancellation_manager[39m=[39mcancellation_manager) File [0;32m~/Library/Python/3.9/lib/python/site-packages/tensorflow/python/eager/execute.py:52[0m, in [0;36mquick_execute[0;34m(op_name, num_outputs, inputs, attrs, ctx, name)[0m [1;32m 50[0m [39mtry[39;00m: [1;32m 51[0m ctx[39m.[39mensure_initialized() [0;32m---> 52[0m tensors [39m=[39m pywrap_tfe[39m.[39;49mTFE_Py_Execute(ctx[39m.[39;49m_handle, device_name, op_name, [1;32m 53[0m inputs, attrs, num_outputs) [1;32m 54[0m [39mexcept[39;00m core[39m.[39m_NotOkStatusException [39mas[39;00m e: [1;32m 55[0m [39mif[39;00m name [39mis[39;00m [39mnot[39;00m [39mNone[39;00m: [0;31mKeyboardInterrupt[0m:
import matplotlib.pyplot as plt
plt.plot(hist_vgg.history["accuracy"])
plt.plot(hist_vgg.history['val_accuracy'])
plt.plot(hist_vgg.history['loss'])
plt.plot(hist_vgg.history['val_loss'])
plt.title("Model accuracy")
plt.ylabel("Value")
plt.xlabel("Epoch")
plt.legend(["Accuracy","Validation Accuracy","Loss","Validation Loss"])
plt.show()
ResNet50
from keras.layers import Input, Lambda, Dense, Flatten
from keras.models import Model
from keras.applications import ResNet50
from keras.preprocessing import image
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
import numpy as np
from glob import glob
import matplotlib.pyplot as plt
# re-size all the images to this
IMAGE_SIZE = [224, 224]
# add preprocessing layer to the front of resnet
resnet = ResNet50(input_shape=IMAGE_SIZE + [3], weights='imagenet', include_top=False)
# don't train existing weights
for layer in resnet.layers:
layer.trainable = False
# useful for getting number of classes
classes = 5
# our layers - you can add more if you want
x = Flatten()(resnet.output)
# x = Dense(1000, activation='relu')(x)
prediction = Dense(5, activation='softmax')(x)
Model: "model_1" __________________________________________________________________________________________________ Layer (type) Output Shape Param # Connected to ================================================================================================== input_3 (InputLayer) [(None, 224, 224, 3 0 [] )] conv1_pad (ZeroPadding2D) (None, 230, 230, 3) 0 ['input_3[0][0]'] conv1_conv (Conv2D) (None, 112, 112, 64 9472 ['conv1_pad[0][0]'] ) conv1_bn (BatchNormalization) (None, 112, 112, 64 256 ['conv1_conv[0][0]'] ) conv1_relu (Activation) (None, 112, 112, 64 0 ['conv1_bn[0][0]'] ) pool1_pad (ZeroPadding2D) (None, 114, 114, 64 0 ['conv1_relu[0][0]'] ) pool1_pool (MaxPooling2D) (None, 56, 56, 64) 0 ['pool1_pad[0][0]'] conv2_block1_1_conv (Conv2D) (None, 56, 56, 64) 4160 ['pool1_pool[0][0]'] conv2_block1_1_bn (BatchNormal (None, 56, 56, 64) 256 ['conv2_block1_1_conv[0][0]'] ization) conv2_block1_1_relu (Activatio (None, 56, 56, 64) 0 ['conv2_block1_1_bn[0][0]'] n) conv2_block1_2_conv (Conv2D) (None, 56, 56, 64) 36928 ['conv2_block1_1_relu[0][0]'] conv2_block1_2_bn (BatchNormal (None, 56, 56, 64) 256 ['conv2_block1_2_conv[0][0]'] ization) conv2_block1_2_relu (Activatio (None, 56, 56, 64) 0 ['conv2_block1_2_bn[0][0]'] n) conv2_block1_0_conv (Conv2D) (None, 56, 56, 256) 16640 ['pool1_pool[0][0]'] conv2_block1_3_conv (Conv2D) (None, 56, 56, 256) 16640 ['conv2_block1_2_relu[0][0]'] conv2_block1_0_bn (BatchNormal (None, 56, 56, 256) 1024 ['conv2_block1_0_conv[0][0]'] ization) conv2_block1_3_bn (BatchNormal (None, 56, 56, 256) 1024 ['conv2_block1_3_conv[0][0]'] ization) conv2_block1_add (Add) (None, 56, 56, 256) 0 ['conv2_block1_0_bn[0][0]', 'conv2_block1_3_bn[0][0]'] conv2_block1_out (Activation) (None, 56, 56, 256) 0 ['conv2_block1_add[0][0]'] conv2_block2_1_conv (Conv2D) (None, 56, 56, 64) 16448 ['conv2_block1_out[0][0]'] conv2_block2_1_bn (BatchNormal (None, 56, 56, 64) 256 ['conv2_block2_1_conv[0][0]'] ization) conv2_block2_1_relu (Activatio (None, 56, 56, 64) 0 ['conv2_block2_1_bn[0][0]'] n) conv2_block2_2_conv (Conv2D) (None, 56, 56, 64) 36928 ['conv2_block2_1_relu[0][0]'] conv2_block2_2_bn (BatchNormal (None, 56, 56, 64) 256 ['conv2_block2_2_conv[0][0]'] ization) conv2_block2_2_relu (Activatio (None, 56, 56, 64) 0 ['conv2_block2_2_bn[0][0]'] n) conv2_block2_3_conv (Conv2D) (None, 56, 56, 256) 16640 ['conv2_block2_2_relu[0][0]'] conv2_block2_3_bn (BatchNormal (None, 56, 56, 256) 1024 ['conv2_block2_3_conv[0][0]'] ization) conv2_block2_add (Add) (None, 56, 56, 256) 0 ['conv2_block1_out[0][0]', 'conv2_block2_3_bn[0][0]'] conv2_block2_out (Activation) (None, 56, 56, 256) 0 ['conv2_block2_add[0][0]'] conv2_block3_1_conv (Conv2D) (None, 56, 56, 64) 16448 ['conv2_block2_out[0][0]'] conv2_block3_1_bn (BatchNormal (None, 56, 56, 64) 256 ['conv2_block3_1_conv[0][0]'] ization) conv2_block3_1_relu (Activatio (None, 56, 56, 64) 0 ['conv2_block3_1_bn[0][0]'] n) conv2_block3_2_conv (Conv2D) (None, 56, 56, 64) 36928 ['conv2_block3_1_relu[0][0]'] conv2_block3_2_bn (BatchNormal (None, 56, 56, 64) 256 ['conv2_block3_2_conv[0][0]'] ization) conv2_block3_2_relu (Activatio (None, 56, 56, 64) 0 ['conv2_block3_2_bn[0][0]'] n) conv2_block3_3_conv (Conv2D) (None, 56, 56, 256) 16640 ['conv2_block3_2_relu[0][0]'] conv2_block3_3_bn (BatchNormal (None, 56, 56, 256) 1024 ['conv2_block3_3_conv[0][0]'] ization) conv2_block3_add (Add) (None, 56, 56, 256) 0 ['conv2_block2_out[0][0]', 'conv2_block3_3_bn[0][0]'] conv2_block3_out (Activation) (None, 56, 56, 256) 0 ['conv2_block3_add[0][0]'] conv3_block1_1_conv (Conv2D) (None, 28, 28, 128) 32896 ['conv2_block3_out[0][0]'] conv3_block1_1_bn (BatchNormal (None, 28, 28, 128) 512 ['conv3_block1_1_conv[0][0]'] ization) conv3_block1_1_relu (Activatio (None, 28, 28, 128) 0 ['conv3_block1_1_bn[0][0]'] n) conv3_block1_2_conv (Conv2D) (None, 28, 28, 128) 147584 ['conv3_block1_1_relu[0][0]'] conv3_block1_2_bn (BatchNormal (None, 28, 28, 128) 512 ['conv3_block1_2_conv[0][0]'] ization) conv3_block1_2_relu (Activatio (None, 28, 28, 128) 0 ['conv3_block1_2_bn[0][0]'] n) conv3_block1_0_conv (Conv2D) (None, 28, 28, 512) 131584 ['conv2_block3_out[0][0]'] conv3_block1_3_conv (Conv2D) (None, 28, 28, 512) 66048 ['conv3_block1_2_relu[0][0]'] conv3_block1_0_bn (BatchNormal (None, 28, 28, 512) 2048 ['conv3_block1_0_conv[0][0]'] ization) conv3_block1_3_bn (BatchNormal (None, 28, 28, 512) 2048 ['conv3_block1_3_conv[0][0]'] ization) conv3_block1_add (Add) (None, 28, 28, 512) 0 ['conv3_block1_0_bn[0][0]', 'conv3_block1_3_bn[0][0]'] conv3_block1_out (Activation) (None, 28, 28, 512) 0 ['conv3_block1_add[0][0]'] conv3_block2_1_conv (Conv2D) (None, 28, 28, 128) 65664 ['conv3_block1_out[0][0]'] conv3_block2_1_bn (BatchNormal (None, 28, 28, 128) 512 ['conv3_block2_1_conv[0][0]'] ization) conv3_block2_1_relu (Activatio (None, 28, 28, 128) 0 ['conv3_block2_1_bn[0][0]'] n) conv3_block2_2_conv (Conv2D) (None, 28, 28, 128) 147584 ['conv3_block2_1_relu[0][0]'] conv3_block2_2_bn (BatchNormal (None, 28, 28, 128) 512 ['conv3_block2_2_conv[0][0]'] ization) conv3_block2_2_relu (Activatio (None, 28, 28, 128) 0 ['conv3_block2_2_bn[0][0]'] n) conv3_block2_3_conv (Conv2D) (None, 28, 28, 512) 66048 ['conv3_block2_2_relu[0][0]'] conv3_block2_3_bn (BatchNormal (None, 28, 28, 512) 2048 ['conv3_block2_3_conv[0][0]'] ization) conv3_block2_add (Add) (None, 28, 28, 512) 0 ['conv3_block1_out[0][0]', 'conv3_block2_3_bn[0][0]'] conv3_block2_out (Activation) (None, 28, 28, 512) 0 ['conv3_block2_add[0][0]'] conv3_block3_1_conv (Conv2D) (None, 28, 28, 128) 65664 ['conv3_block2_out[0][0]'] conv3_block3_1_bn (BatchNormal (None, 28, 28, 128) 512 ['conv3_block3_1_conv[0][0]'] ization) conv3_block3_1_relu (Activatio (None, 28, 28, 128) 0 ['conv3_block3_1_bn[0][0]'] n) conv3_block3_2_conv (Conv2D) (None, 28, 28, 128) 147584 ['conv3_block3_1_relu[0][0]'] conv3_block3_2_bn (BatchNormal (None, 28, 28, 128) 512 ['conv3_block3_2_conv[0][0]'] ization) conv3_block3_2_relu (Activatio (None, 28, 28, 128) 0 ['conv3_block3_2_bn[0][0]'] n) conv3_block3_3_conv (Conv2D) (None, 28, 28, 512) 66048 ['conv3_block3_2_relu[0][0]'] conv3_block3_3_bn (BatchNormal (None, 28, 28, 512) 2048 ['conv3_block3_3_conv[0][0]'] ization) conv3_block3_add (Add) (None, 28, 28, 512) 0 ['conv3_block2_out[0][0]', 'conv3_block3_3_bn[0][0]'] conv3_block3_out (Activation) (None, 28, 28, 512) 0 ['conv3_block3_add[0][0]'] conv3_block4_1_conv (Conv2D) (None, 28, 28, 128) 65664 ['conv3_block3_out[0][0]'] conv3_block4_1_bn (BatchNormal (None, 28, 28, 128) 512 ['conv3_block4_1_conv[0][0]'] ization) conv3_block4_1_relu (Activatio (None, 28, 28, 128) 0 ['conv3_block4_1_bn[0][0]'] n) conv3_block4_2_conv (Conv2D) (None, 28, 28, 128) 147584 ['conv3_block4_1_relu[0][0]'] conv3_block4_2_bn (BatchNormal (None, 28, 28, 128) 512 ['conv3_block4_2_conv[0][0]'] ization) conv3_block4_2_relu (Activatio (None, 28, 28, 128) 0 ['conv3_block4_2_bn[0][0]'] n) conv3_block4_3_conv (Conv2D) (None, 28, 28, 512) 66048 ['conv3_block4_2_relu[0][0]'] conv3_block4_3_bn (BatchNormal (None, 28, 28, 512) 2048 ['conv3_block4_3_conv[0][0]'] ization) conv3_block4_add (Add) (None, 28, 28, 512) 0 ['conv3_block3_out[0][0]', 'conv3_block4_3_bn[0][0]'] conv3_block4_out (Activation) (None, 28, 28, 512) 0 ['conv3_block4_add[0][0]'] conv4_block1_1_conv (Conv2D) (None, 14, 14, 256) 131328 ['conv3_block4_out[0][0]'] conv4_block1_1_bn (BatchNormal (None, 14, 14, 256) 1024 ['conv4_block1_1_conv[0][0]'] ization) conv4_block1_1_relu (Activatio (None, 14, 14, 256) 0 ['conv4_block1_1_bn[0][0]'] n) conv4_block1_2_conv (Conv2D) (None, 14, 14, 256) 590080 ['conv4_block1_1_relu[0][0]'] conv4_block1_2_bn (BatchNormal (None, 14, 14, 256) 1024 ['conv4_block1_2_conv[0][0]'] ization) conv4_block1_2_relu (Activatio (None, 14, 14, 256) 0 ['conv4_block1_2_bn[0][0]'] n) conv4_block1_0_conv (Conv2D) (None, 14, 14, 1024 525312 ['conv3_block4_out[0][0]'] ) conv4_block1_3_conv (Conv2D) (None, 14, 14, 1024 263168 ['conv4_block1_2_relu[0][0]'] ) conv4_block1_0_bn (BatchNormal (None, 14, 14, 1024 4096 ['conv4_block1_0_conv[0][0]'] ization) ) conv4_block1_3_bn (BatchNormal (None, 14, 14, 1024 4096 ['conv4_block1_3_conv[0][0]'] ization) ) conv4_block1_add (Add) (None, 14, 14, 1024 0 ['conv4_block1_0_bn[0][0]', ) 'conv4_block1_3_bn[0][0]'] conv4_block1_out (Activation) (None, 14, 14, 1024 0 ['conv4_block1_add[0][0]'] ) conv4_block2_1_conv (Conv2D) (None, 14, 14, 256) 262400 ['conv4_block1_out[0][0]'] conv4_block2_1_bn (BatchNormal (None, 14, 14, 256) 1024 ['conv4_block2_1_conv[0][0]'] ization) conv4_block2_1_relu (Activatio (None, 14, 14, 256) 0 ['conv4_block2_1_bn[0][0]'] n) conv4_block2_2_conv (Conv2D) (None, 14, 14, 256) 590080 ['conv4_block2_1_relu[0][0]'] conv4_block2_2_bn (BatchNormal (None, 14, 14, 256) 1024 ['conv4_block2_2_conv[0][0]'] ization) conv4_block2_2_relu (Activatio (None, 14, 14, 256) 0 ['conv4_block2_2_bn[0][0]'] n) conv4_block2_3_conv (Conv2D) (None, 14, 14, 1024 263168 ['conv4_block2_2_relu[0][0]'] ) conv4_block2_3_bn (BatchNormal (None, 14, 14, 1024 4096 ['conv4_block2_3_conv[0][0]'] ization) ) conv4_block2_add (Add) (None, 14, 14, 1024 0 ['conv4_block1_out[0][0]', ) 'conv4_block2_3_bn[0][0]'] conv4_block2_out (Activation) (None, 14, 14, 1024 0 ['conv4_block2_add[0][0]'] ) conv4_block3_1_conv (Conv2D) (None, 14, 14, 256) 262400 ['conv4_block2_out[0][0]'] conv4_block3_1_bn (BatchNormal (None, 14, 14, 256) 1024 ['conv4_block3_1_conv[0][0]'] ization) conv4_block3_1_relu (Activatio (None, 14, 14, 256) 0 ['conv4_block3_1_bn[0][0]'] n) conv4_block3_2_conv (Conv2D) (None, 14, 14, 256) 590080 ['conv4_block3_1_relu[0][0]'] conv4_block3_2_bn (BatchNormal (None, 14, 14, 256) 1024 ['conv4_block3_2_conv[0][0]'] ization) conv4_block3_2_relu (Activatio (None, 14, 14, 256) 0 ['conv4_block3_2_bn[0][0]'] n) conv4_block3_3_conv (Conv2D) (None, 14, 14, 1024 263168 ['conv4_block3_2_relu[0][0]'] ) conv4_block3_3_bn (BatchNormal (None, 14, 14, 1024 4096 ['conv4_block3_3_conv[0][0]'] ization) ) conv4_block3_add (Add) (None, 14, 14, 1024 0 ['conv4_block2_out[0][0]', ) 'conv4_block3_3_bn[0][0]'] conv4_block3_out (Activation) (None, 14, 14, 1024 0 ['conv4_block3_add[0][0]'] ) conv4_block4_1_conv (Conv2D) (None, 14, 14, 256) 262400 ['conv4_block3_out[0][0]'] conv4_block4_1_bn (BatchNormal (None, 14, 14, 256) 1024 ['conv4_block4_1_conv[0][0]'] ization) conv4_block4_1_relu (Activatio (None, 14, 14, 256) 0 ['conv4_block4_1_bn[0][0]'] n) conv4_block4_2_conv (Conv2D) (None, 14, 14, 256) 590080 ['conv4_block4_1_relu[0][0]'] conv4_block4_2_bn (BatchNormal (None, 14, 14, 256) 1024 ['conv4_block4_2_conv[0][0]'] ization) conv4_block4_2_relu (Activatio (None, 14, 14, 256) 0 ['conv4_block4_2_bn[0][0]'] n) conv4_block4_3_conv (Conv2D) (None, 14, 14, 1024 263168 ['conv4_block4_2_relu[0][0]'] ) conv4_block4_3_bn (BatchNormal (None, 14, 14, 1024 4096 ['conv4_block4_3_conv[0][0]'] ization) ) conv4_block4_add (Add) (None, 14, 14, 1024 0 ['conv4_block3_out[0][0]', ) 'conv4_block4_3_bn[0][0]'] conv4_block4_out (Activation) (None, 14, 14, 1024 0 ['conv4_block4_add[0][0]'] ) conv4_block5_1_conv (Conv2D) (None, 14, 14, 256) 262400 ['conv4_block4_out[0][0]'] conv4_block5_1_bn (BatchNormal (None, 14, 14, 256) 1024 ['conv4_block5_1_conv[0][0]'] ization) conv4_block5_1_relu (Activatio (None, 14, 14, 256) 0 ['conv4_block5_1_bn[0][0]'] n) conv4_block5_2_conv (Conv2D) (None, 14, 14, 256) 590080 ['conv4_block5_1_relu[0][0]'] conv4_block5_2_bn (BatchNormal (None, 14, 14, 256) 1024 ['conv4_block5_2_conv[0][0]'] ization) conv4_block5_2_relu (Activatio (None, 14, 14, 256) 0 ['conv4_block5_2_bn[0][0]'] n) conv4_block5_3_conv (Conv2D) (None, 14, 14, 1024 263168 ['conv4_block5_2_relu[0][0]'] ) conv4_block5_3_bn (BatchNormal (None, 14, 14, 1024 4096 ['conv4_block5_3_conv[0][0]'] ization) ) conv4_block5_add (Add) (None, 14, 14, 1024 0 ['conv4_block4_out[0][0]', ) 'conv4_block5_3_bn[0][0]'] conv4_block5_out (Activation) (None, 14, 14, 1024 0 ['conv4_block5_add[0][0]'] ) conv4_block6_1_conv (Conv2D) (None, 14, 14, 256) 262400 ['conv4_block5_out[0][0]'] conv4_block6_1_bn (BatchNormal (None, 14, 14, 256) 1024 ['conv4_block6_1_conv[0][0]'] ization) conv4_block6_1_relu (Activatio (None, 14, 14, 256) 0 ['conv4_block6_1_bn[0][0]'] n) conv4_block6_2_conv (Conv2D) (None, 14, 14, 256) 590080 ['conv4_block6_1_relu[0][0]'] conv4_block6_2_bn (BatchNormal (None, 14, 14, 256) 1024 ['conv4_block6_2_conv[0][0]'] ization) conv4_block6_2_relu (Activatio (None, 14, 14, 256) 0 ['conv4_block6_2_bn[0][0]'] n) conv4_block6_3_conv (Conv2D) (None, 14, 14, 1024 263168 ['conv4_block6_2_relu[0][0]'] ) conv4_block6_3_bn (BatchNormal (None, 14, 14, 1024 4096 ['conv4_block6_3_conv[0][0]'] ization) ) conv4_block6_add (Add) (None, 14, 14, 1024 0 ['conv4_block5_out[0][0]', ) 'conv4_block6_3_bn[0][0]'] conv4_block6_out (Activation) (None, 14, 14, 1024 0 ['conv4_block6_add[0][0]'] ) conv5_block1_1_conv (Conv2D) (None, 7, 7, 512) 524800 ['conv4_block6_out[0][0]'] conv5_block1_1_bn (BatchNormal (None, 7, 7, 512) 2048 ['conv5_block1_1_conv[0][0]'] ization) conv5_block1_1_relu (Activatio (None, 7, 7, 512) 0 ['conv5_block1_1_bn[0][0]'] n) conv5_block1_2_conv (Conv2D) (None, 7, 7, 512) 2359808 ['conv5_block1_1_relu[0][0]'] conv5_block1_2_bn (BatchNormal (None, 7, 7, 512) 2048 ['conv5_block1_2_conv[0][0]'] ization) conv5_block1_2_relu (Activatio (None, 7, 7, 512) 0 ['conv5_block1_2_bn[0][0]'] n) conv5_block1_0_conv (Conv2D) (None, 7, 7, 2048) 2099200 ['conv4_block6_out[0][0]'] conv5_block1_3_conv (Conv2D) (None, 7, 7, 2048) 1050624 ['conv5_block1_2_relu[0][0]'] conv5_block1_0_bn (BatchNormal (None, 7, 7, 2048) 8192 ['conv5_block1_0_conv[0][0]'] ization) conv5_block1_3_bn (BatchNormal (None, 7, 7, 2048) 8192 ['conv5_block1_3_conv[0][0]'] ization) conv5_block1_add (Add) (None, 7, 7, 2048) 0 ['conv5_block1_0_bn[0][0]', 'conv5_block1_3_bn[0][0]'] conv5_block1_out (Activation) (None, 7, 7, 2048) 0 ['conv5_block1_add[0][0]'] conv5_block2_1_conv (Conv2D) (None, 7, 7, 512) 1049088 ['conv5_block1_out[0][0]'] conv5_block2_1_bn (BatchNormal (None, 7, 7, 512) 2048 ['conv5_block2_1_conv[0][0]'] ization) conv5_block2_1_relu (Activatio (None, 7, 7, 512) 0 ['conv5_block2_1_bn[0][0]'] n) conv5_block2_2_conv (Conv2D) (None, 7, 7, 512) 2359808 ['conv5_block2_1_relu[0][0]'] conv5_block2_2_bn (BatchNormal (None, 7, 7, 512) 2048 ['conv5_block2_2_conv[0][0]'] ization) conv5_block2_2_relu (Activatio (None, 7, 7, 512) 0 ['conv5_block2_2_bn[0][0]'] n) conv5_block2_3_conv (Conv2D) (None, 7, 7, 2048) 1050624 ['conv5_block2_2_relu[0][0]'] conv5_block2_3_bn (BatchNormal (None, 7, 7, 2048) 8192 ['conv5_block2_3_conv[0][0]'] ization) conv5_block2_add (Add) (None, 7, 7, 2048) 0 ['conv5_block1_out[0][0]', 'conv5_block2_3_bn[0][0]'] conv5_block2_out (Activation) (None, 7, 7, 2048) 0 ['conv5_block2_add[0][0]'] conv5_block3_1_conv (Conv2D) (None, 7, 7, 512) 1049088 ['conv5_block2_out[0][0]'] conv5_block3_1_bn (BatchNormal (None, 7, 7, 512) 2048 ['conv5_block3_1_conv[0][0]'] ization) conv5_block3_1_relu (Activatio (None, 7, 7, 512) 0 ['conv5_block3_1_bn[0][0]'] n) conv5_block3_2_conv (Conv2D) (None, 7, 7, 512) 2359808 ['conv5_block3_1_relu[0][0]'] conv5_block3_2_bn (BatchNormal (None, 7, 7, 512) 2048 ['conv5_block3_2_conv[0][0]'] ization) conv5_block3_2_relu (Activatio (None, 7, 7, 512) 0 ['conv5_block3_2_bn[0][0]'] n) conv5_block3_3_conv (Conv2D) (None, 7, 7, 2048) 1050624 ['conv5_block3_2_relu[0][0]'] conv5_block3_3_bn (BatchNormal (None, 7, 7, 2048) 8192 ['conv5_block3_3_conv[0][0]'] ization) conv5_block3_add (Add) (None, 7, 7, 2048) 0 ['conv5_block2_out[0][0]', 'conv5_block3_3_bn[0][0]'] conv5_block3_out (Activation) (None, 7, 7, 2048) 0 ['conv5_block3_add[0][0]'] flatten_6 (Flatten) (None, 100352) 0 ['conv5_block3_out[0][0]'] dense_13 (Dense) (None, 5) 501765 ['flatten_6[0][0]'] ================================================================================================== Total params: 24,089,477 Trainable params: 501,765 Non-trainable params: 23,587,712 __________________________________________________________________________________________________ Epoch 1/5
/var/folders/_h/ljwht4gd7lb99rm1hm78h7_00000gn/T/ipykernel_13133/3879957867.py:45: UserWarning: `Model.fit_generator` is deprecated and will be removed in a future version. Please use `Model.fit`, which supports generators. r = model.fit_generator(
25/25 [==============================] - 90s 3s/step - loss: 4.3387 - accuracy: 0.2100 - val_loss: 2.1355 - val_accuracy: 0.1875 Epoch 2/5 25/25 [==============================] - 84s 3s/step - loss: 1.9657 - accuracy: 0.2550 - val_loss: 2.3121 - val_accuracy: 0.2188 Epoch 3/5 25/25 [==============================] - 84s 3s/step - loss: 1.8658 - accuracy: 0.2600 - val_loss: 1.4832 - val_accuracy: 0.3073 Epoch 4/5 25/25 [==============================] - 83s 3s/step - loss: 1.7074 - accuracy: 0.2775 - val_loss: 1.5045 - val_accuracy: 0.3698 Epoch 5/5 25/25 [==============================] - 85s 3s/step - loss: 1.9758 - accuracy: 0.2800 - val_loss: 1.7073 - val_accuracy: 0.2812
[0;31m---------------------------------------------------------------------------[0m [0;31mKeyError[0m Traceback (most recent call last) Cell [0;32mIn [55], line 60[0m [1;32m 57[0m plt[39m.[39msavefig([39m'[39m[39mLossVal_loss[39m[39m'[39m) [1;32m 59[0m [39m# accuracies[39;00m [0;32m---> 60[0m plt[39m.[39mplot(r[39m.[39;49mhistory[[39m'[39;49m[39macc[39;49m[39m'[39;49m], label[39m=[39m[39m'[39m[39mtrain acc[39m[39m'[39m) [1;32m 61[0m plt[39m.[39mplot(r[39m.[39mhistory[[39m'[39m[39mval_acc[39m[39m'[39m], label[39m=[39m[39m'[39m[39mval acc[39m[39m'[39m) [1;32m 62[0m plt[39m.[39mlegend() [0;31mKeyError[0m: 'acc'
<Figure size 640x480 with 0 Axes>
# create a model object
model = Model(inputs=resnet.input, outputs=prediction)
# view the structure of the model
model.summary()
# tell the model what cost and optimization method to use
model.compile(
loss='sparse_categorical_crossentropy',
optimizer='adam',
metrics=['accuracy']
)
#train_ds_vgg_sw, test_ds_vgg_sw, validation_ds_vgg_sw
# fit the model
r = model.fit_generator(
train_ds,
validation_data=validation_ds,
epochs=100,
steps_per_epoch=len(train_ds),
validation_steps=len(validation_ds)
)
# loss
plt.plot(r.history["accuracy"])
plt.plot(r.history['val_accuracy'])
plt.plot(r.history['loss'])
plt.plot(r.history['val_loss'])
plt.title("Model accuracy")
plt.ylabel("Value")
plt.xlabel("Epoch")
plt.legend(["Accuracy","Validation Accuracy","Loss","Validation Loss"])
plt.show()
model.save('resnet_1.h5')