93 KiB
93 KiB
Aleksandra Jonas, Aleksandra Gronowska, Iwona Christop
Generowanie dodatkowych zdjęć w oparciu o filtry krawędziowe
import os
import cv2 as cv
import matplotlib.pyplot as plt
import numpy as np
import json
from tensorflow import keras
%matplotlib inline
def alex(filter_name, train_ds, test_ds, validation_ds):
from keras.callbacks import ModelCheckpoint, EarlyStopping
import matplotlib.pyplot as plt
import tensorflow as tf
alexnet = keras.models.Sequential([
keras.layers.Conv2D(filters=96, kernel_size=(11,11), strides=(4,4), activation='relu', input_shape=(227,227,3)),
keras.layers.BatchNormalization(),
keras.layers.MaxPool2D(pool_size=(3,3), strides=(2,2)),
keras.layers.Conv2D(filters=256, kernel_size=(5,5), strides=(1,1), activation='relu', padding="same"),
keras.layers.BatchNormalization(),
keras.layers.MaxPool2D(pool_size=(3,3), strides=(2,2)),
keras.layers.Conv2D(filters=384, kernel_size=(3,3), strides=(1,1), activation='relu', padding="same"),
keras.layers.BatchNormalization(),
keras.layers.Conv2D(filters=384, kernel_size=(3,3), strides=(1,1), activation='relu', padding="same"),
keras.layers.BatchNormalization(),
keras.layers.Conv2D(filters=256, kernel_size=(3,3), strides=(1,1), activation='relu', padding="same"),
keras.layers.BatchNormalization(),
keras.layers.MaxPool2D(pool_size=(3,3), strides=(2,2)),
keras.layers.Flatten(),
keras.layers.Dense(4096, activation='relu'),
keras.layers.Dropout(.5),
keras.layers.Dense(4096, activation='relu'),
keras.layers.Dropout(.5),
keras.layers.Dense(10, activation='softmax')
])
alexnet.compile(loss='sparse_categorical_crossentropy', optimizer=tf.optimizers.SGD(lr=.001), metrics=['accuracy'])
alexnet.summary()
checkpoint = ModelCheckpoint("alex_2.h5", monitor='val_accuracy', verbose=1, save_best_only=True, save_weights_only=False, mode='auto', period=1)
early = EarlyStopping(monitor='val_accuracy', min_delta=0, patience=20, verbose=1, mode='auto')
alex = alexnet.fit_generator(
steps_per_epoch=len(train_ds),
generator=train_ds,
validation_data= validation_ds,
validation_steps=len(validation_ds),
epochs=25,
callbacks=[checkpoint,early])
plt.plot(alex.history["accuracy"])
plt.plot(alex.history['val_accuracy'])
plt.plot(alex.history['loss'])
plt.plot(alex.history['val_loss'])
plt.title(f"Model accuracy - {filter_name}")
plt.ylabel("Value")
plt.xlabel("Epoch")
plt.legend(["Accuracy","Validation Accuracy","Loss","Validation Loss"])
plt.show()
alexnet.evaluate(test_ds)
def fix_float_img(img):
img_normed = 255 * (img - img.min()) / (img.max() - img.min())
img_normed = np.array(img_normed, np.int)
return img_normed
# directory = r"train_test_sw/train_sw_kontrast"
# subdirs = [r"/Tomato", r"/Lemon", r"/Beech", r"/Mean", r"/Gardenia"]
# json_entries = []
# for sub in subdirs:
# path = directory + sub
# for filename in os.listdir(path):
# f = os.path.join(path, filename)
# if os.path.isfile(f):
# img = cv.imread(f)
# lab= cv.cvtColor(img, cv.COLOR_BGR2LAB)
# l_channel, a, b = cv.split(lab)
# # Applying CLAHE to L-channel
# # feel free to try different values for the limit and grid size:
# clahe = cv.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
# cl = clahe.apply(l_channel)
# # merge the CLAHE enhanced L-channel with the a and b channel
# limg = cv.merge((cl,a,b))
# # Converting image from LAB Color model to BGR color spcae
# enhanced_img = cv.cvtColor(limg, cv.COLOR_LAB2BGR)
# filename_edge = f[:-4] + '_kontrast.png'
# #final_edge = fix_float_img(adjusted)
# cv.imwrite(filename_edge, enhanced_img)
# directory = r"train_test_sw/train_sw_saturacja"
# subdirs = [r"/Tomato", r"/Lemon", r"/Beech", r"/Mean", r"/Gardenia"]
# json_entries = []
# for sub in subdirs:
# path = directory + sub
# for filename in os.listdir(path):
# f = os.path.join(path, filename)
# if os.path.isfile(f):
# img = cv.imread(f)
# hsv = cv.cvtColor(img, cv.COLOR_BGR2HSV)
# greenMask = cv.inRange(hsv, (26, 10, 30), (97, 100, 255))
# hsv[:,:,1] = greenMask
# back = cv.cvtColor(hsv, cv.COLOR_HSV2RGB)
# filename_edge = f[:-4] + '_saturacja.png'
# #final_edge = fix_float_img(adjusted)
# cv.imwrite(filename_edge, back)
# directory = r"train_test_sw/train_sw_jezu"
# subdirs = [r"/Tomato", r"/Lemon", r"/Beech", r"/Mean", r"/Gardenia"]
# json_entries = []
# for sub in subdirs:
# path = directory + sub
# for filename in os.listdir(path):
# f = os.path.join(path, filename)
# if os.path.isfile(f):
# img = cv.imread(f)
# img_gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
# ddepth = cv.CV_16S
# kernel_size = 3
# laplacian_operator = cv.Laplacian(img_gray, ddepth, ksize=kernel_size)
# filename_edge = f[:-4] + '_laplacian.png'
# #final_edge = fix_float_img(adjusted)
# cv.imwrite(filename_edge, laplacian_operator)
# directory = r"train_test_sw/train_sw"
# subdirs = [r"/Tomato", r"/Lemon", r"/Beech", r"/Mean", r"/Gardenia"]
# json_entries = []
# for sub in subdirs:
# path = directory + sub
# for filename in os.listdir(path):
# f = os.path.join(path, filename)
# if os.path.isfile(f):
# img = cv.imread(f)
# lab_image = cv.cvtColor(img, cv.COLOR_BGR2LAB)
# filename_edge = f[:-4] + '_lab.png'
# #final_edge = fix_float_img(adjusted)
# cv.imwrite(filename_edge, lab_image)
# directory = r"train_test_sw/train_sw_emboss"
# subdirs = [r"/Tomato", r"/Lemon", r"/Beech", r"/Mean", r"/Gardenia"]
# json_entries = []
# for sub in subdirs:
# path = directory + sub
# for filename in os.listdir(path):
# f = os.path.join(path, filename)
# if os.path.isfile(f):
# img = cv.imread(f)
# height, width = img.shape[:2]
# y = np.ones((height, width), np.uint8) * 128
# output = np.zeros((height, width), np.uint8)
# # generating the kernels
# kernel1 = np.array([[0, -1, -1], # kernel for embossing bottom left side
# [1, 0, -1],
# [1, 1, 0]])
# kernel2 = np.array([[-1, -1, 0], # kernel for embossing bottom right side
# [-1, 0, 1],
# [0, 1, 1]])
# # you can generate kernels for embossing top as well
# gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
# output1 = cv.add(cv.filter2D(gray, -1, kernel1), y) # emboss on bottom left side
# output2 = cv.add(cv.filter2D(gray, -1, kernel2), y) # emboss on bottom right side
# for i in range(height):
# for j in range(width):
# output[i, j] = max(output1[i, j], output2[i, j]) # combining both embosses to produce stronger emboss
# filename_edge = f[:-4] + '_emboss.png'
# #final_edge = fix_float_img(adjusted)
# cv.imwrite(filename_edge, output)
# directory = r"train_test_sw/train_sw_cartoon"
# subdirs = [r"/Tomato", r"/Lemon", r"/Beech", r"/Mean", r"/Gardenia"]
# json_entries = []
# for sub in subdirs:
# path = directory + sub
# for filename in os.listdir(path):
# f = os.path.join(path, filename)
# if os.path.isfile(f):
# img = cv.imread(f)
# edges1 = cv.bitwise_not(cv.Canny(img, 100, 200)) # for thin edges and inverting the mask obatined
# gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
# gray = cv.medianBlur(gray, 5) # applying median blur with kernel size of 5
# edges2 = cv.adaptiveThreshold(gray, 255, cv.ADAPTIVE_THRESH_MEAN_C, cv.THRESH_BINARY, 7, 7) # thick edges
# dst = cv.edgePreservingFilter(img, flags=2, sigma_s=20, sigma_r=0.1) # you can also use bilateral filter but that is slow
# # flag = 1 for RECURS_FILTER (Recursive Filtering) and 2 for NORMCONV_FILTER (Normalized Convolution). NORMCONV_FILTER produces sharpening of the edges but is slower.
# # sigma_s controls the size of the neighborhood. Range 1 - 200
# # sigma_r controls the how dissimilar colors within the neighborhood will be averaged. A larger sigma_r results in large regions of constant color. Range 0 - 1
# cartoon = cv.bitwise_and(dst, dst, mask=edges1) # adding thin edges to smoothened imag
# filename_edge = f[:-4] + '_cartoon.png'
# #final_edge = fix_float_img(adjusted)
# cv.imwrite(filename_edge, cartoon)
Data
import sys
import subprocess
import pkg_resources
import numpy as np
peachy = []
required = { 'scikit-image'}
installed = {pkg.key for pkg in pkg_resources.working_set}
missing = required - installed
if missing:
python = sys.executable
subprocess.check_call([python, '-m', 'pip', 'install', *missing], stdout=subprocess.DEVNULL)
def load_train_data(input_dir, newSize=(227,227)):
import numpy as np
import pandas as pd
import os
from skimage.io import imread
import cv2 as cv
from pathlib import Path
import random
from shutil import copyfile, rmtree
import json
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib
image_dir = Path(input_dir)
categories_name = []
for file in os.listdir(image_dir):
d = os.path.join(image_dir, file)
if os.path.isdir(d):
categories_name.append(file)
folders = [directory for directory in image_dir.iterdir() if directory.is_dir()]
train_img = []
categories_count=[]
labels=[]
for i, direc in enumerate(folders):
count = 0
for obj in direc.iterdir():
if os.path.isfile(obj) and os.path.basename(os.path.normpath(obj)) != 'desktop.ini':
labels.append(os.path.basename(os.path.normpath(direc)))
count += 1
img = imread(obj)#zwraca ndarry postaci xSize x ySize x colorDepth
if img.shape[-1] == 256:
img = np.repeat(img[..., np.newaxis], 3, axis=2)
elif img.shape[-1] == 4:
img = img[:, :, :3]
img = cv.resize(img, newSize, interpolation=cv.INTER_AREA)# zwraca ndarray
img = img / 255#normalizacja
train_img.append(img)
categories_count.append(count)
X={}
X["values"] = np.array(train_img)
X["categories_name"] = categories_name
X["categories_count"] = categories_count
X["labels"]=labels
return X
def load_test_data(input_dir, newSize=(227,227)):
import numpy as np
import pandas as pd
import os
from skimage.io import imread
import cv2 as cv
from pathlib import Path
import random
from shutil import copyfile, rmtree
import json
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib
image_path = Path(input_dir)
labels_path = image_path.parents[0] / 'test_labels.json'
jsonString = labels_path.read_text()
objects = json.loads(jsonString)
categories_name = []
categories_count=[]
count = 0
c = objects[0]['value']
for e in objects:
if e['value'] != c:
categories_count.append(count)
c = e['value']
count = 1
else:
count += 1
if not e['value'] in categories_name:
categories_name.append(e['value'])
categories_count.append(count)
test_img = []
labels=[]
for e in objects:
p = image_path / e['filename']
img = imread(p)#zwraca ndarry postaci xSize x ySize x colorDepth
img = cv.resize(img, newSize, interpolation=cv.INTER_AREA)# zwraca ndarray
img = img / 255#normalizacja
test_img.append(img)
labels.append(e['value'])
X={}
X["values"] = np.array(test_img)
X["categories_name"] = categories_name
X["categories_count"] = categories_count
X["labels"]=labels
return X
def data_prep_alex(filter_name):
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
import tensorflow as tf
data_train = load_train_data(f"./train_test_sw/train_sw_{filter_name}")
values_train = data_train['values']
labels_train = data_train['labels']
data_test = load_test_data("./train_test_sw/test_sw")
X_test = data_test['values']
y_test = data_test['labels']
X_train, X_validate, y_train, y_validate = train_test_split(values_train, labels_train, test_size=0.2, random_state=42)
class_le = LabelEncoder()
y_train_enc = class_le.fit_transform(y_train)
y_validate_enc = class_le.fit_transform(y_validate)
y_test_enc = class_le.fit_transform(y_test)
train_ds = tf.data.Dataset.from_tensor_slices((X_train, y_train_enc))
validation_ds = tf.data.Dataset.from_tensor_slices((X_validate, y_validate_enc))
test_ds = tf.data.Dataset.from_tensor_slices((X_test, y_test_enc))
train_ds_size = tf.data.experimental.cardinality(train_ds).numpy()
test_ds_size = tf.data.experimental.cardinality(test_ds).numpy()
validation_ds_size = tf.data.experimental.cardinality(validation_ds).numpy()
train_ds = (train_ds
.shuffle(buffer_size=train_ds_size)
.batch(batch_size=32, drop_remainder=True))
test_ds = (test_ds
.shuffle(buffer_size=train_ds_size)
.batch(batch_size=32, drop_remainder=True))
validation_ds = (validation_ds
.shuffle(buffer_size=train_ds_size)
.batch(batch_size=32, drop_remainder=True))
return train_ds, test_ds, validation_ds
Emboss
# train_ds
# train_ds_size = tf.data.experimental.cardinality(train_ds).numpy()
# test_ds_size = tf.data.experimental.cardinality(test_ds).numpy()
# validation_ds_size = tf.data.experimental.cardinality(validation_ds).numpy()
# print("Training data size:", train_ds_size)
# print("Test data size:", test_ds_size)
# print("Validation data size:", validation_ds_size)
# alexnet.compile(loss='sparse_categorical_crossentropy', optimizer=tf.optimizers.SGD(lr=.001), metrics=['accuracy'])
# alexnet.summary()
# checkpoint = ModelCheckpoint("alex_2.h5", monitor='val_accuracy', verbose=1, save_best_only=True, save_weights_only=False, mode='auto', period=1)
# early = EarlyStopping(monitor='val_accuracy', min_delta=0, patience=20, verbose=1, mode='auto')
# alex = alexnet.fit_generator(
# steps_per_epoch=len(train_ds),
# generator=train_ds,
# validation_data= validation_ds,
# validation_steps=len(validation_ds),
# epochs=25,
# callbacks=[checkpoint,early])
# model_flat_drop.fit(train_ds,
# epochs=100,
# validation_data=validation_ds,
# validation_freq=1,
# callbacks=[tensorboard_cb])
Saturacja
# from sklearn.preprocessing import LabelEncoder
# # Data load
# data_train = load_train_data("train_test_sw_kontrast/train_sw", newSize=(16,16))
# X_train = data_train['values']
# y_train = data_train['labels']
# data_test = load_test_data("train_test_sw_kontrast/test_sw", newSize=(16,16))
# X_test = data_test['values']
# y_test = data_test['labels']
# class_le = LabelEncoder()
# y_train_enc = class_le.fit_transform(y_train)
# y_test_enc = class_le.fit_transform(y_test)
# X_train = X_train.flatten().reshape(X_train.shape[0], int(np.prod(X_train.shape) / X_train.shape[0]))
# X_test = X_test.flatten().reshape(X_test.shape[0], int(np.prod(X_test.shape) / X_test.shape[0]))
ALEXNET
filters = ['laplasian', 'kontrast', 'cartoon', 'saturacja', 'emboss']
data_test = load_test_data("./train_test_sw/test_sw")
X_test = data_test['values']
y_test = data_test['labels']
for filter in filters:
print(f"{filter} ---------------------------------------")
train_ds, test_ds, validation_ds = data_prep_alex(filter)
alex(filter, train_ds, test_ds, validation_ds)
WARNING:absl:`lr` is deprecated, please use `learning_rate` instead, or use the legacy optimizer, e.g.,tf.keras.optimizers.legacy.SGD.
Model: "sequential" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d (Conv2D) (None, 55, 55, 96) 34944 batch_normalization (BatchN (None, 55, 55, 96) 384 ormalization) max_pooling2d (MaxPooling2D (None, 27, 27, 96) 0 ) conv2d_1 (Conv2D) (None, 27, 27, 256) 614656 batch_normalization_1 (Batc (None, 27, 27, 256) 1024 hNormalization) max_pooling2d_1 (MaxPooling (None, 13, 13, 256) 0 2D) conv2d_2 (Conv2D) (None, 13, 13, 384) 885120 batch_normalization_2 (Batc (None, 13, 13, 384) 1536 hNormalization) conv2d_3 (Conv2D) (None, 13, 13, 384) 1327488 batch_normalization_3 (Batc (None, 13, 13, 384) 1536 hNormalization) conv2d_4 (Conv2D) (None, 13, 13, 256) 884992 batch_normalization_4 (Batc (None, 13, 13, 256) 1024 hNormalization) max_pooling2d_2 (MaxPooling (None, 6, 6, 256) 0 2D) flatten (Flatten) (None, 9216) 0 dense (Dense) (None, 4096) 37752832 dropout (Dropout) (None, 4096) 0 dense_1 (Dense) (None, 4096) 16781312 dropout_1 (Dropout) (None, 4096) 0 dense_2 (Dense) (None, 10) 40970 ================================================================= Total params: 58,327,818 Trainable params: 58,325,066 Non-trainable params: 2,752 _________________________________________________________________ WARNING:tensorflow:`period` argument is deprecated. Please use `save_freq` to specify the frequency in number of batches seen.
WARNING:tensorflow:`period` argument is deprecated. Please use `save_freq` to specify the frequency in number of batches seen. /var/folders/6b/j4d60ym516x2s6wymzj707rh0000gn/T/ipykernel_35367/157534861.py:34: UserWarning: `Model.fit_generator` is deprecated and will be removed in a future version. Please use `Model.fit`, which supports generators. alex = alexnet.fit_generator(
Epoch 1/25
2023-01-11 15:57:37.093304: W tensorflow/tsl/platform/profile_utils/cpu_utils.cc:128] Failed to get CPU frequency: 0 Hz
51/51 [==============================] - ETA: 0s - loss: 3.5934 - accuracy: 0.3903 Epoch 1: val_accuracy improved from -inf to 0.24740, saving model to alex_2.h5 51/51 [==============================] - 46s 888ms/step - loss: 3.5934 - accuracy: 0.3903 - val_loss: 1.9219 - val_accuracy: 0.2474 Epoch 2/25 51/51 [==============================] - ETA: 0s - loss: 1.2680 - accuracy: 0.5699 Epoch 2: val_accuracy did not improve from 0.24740 51/51 [==============================] - 45s 890ms/step - loss: 1.2680 - accuracy: 0.5699 - val_loss: 2.9384 - val_accuracy: 0.2370 Epoch 3/25 51/51 [==============================] - ETA: 0s - loss: 0.8801 - accuracy: 0.6930 Epoch 3: val_accuracy did not improve from 0.24740 51/51 [==============================] - 51s 1s/step - loss: 0.8801 - accuracy: 0.6930 - val_loss: 4.2987 - val_accuracy: 0.2318 Epoch 4/25 51/51 [==============================] - ETA: 0s - loss: 0.8070 - accuracy: 0.7181 Epoch 4: val_accuracy improved from 0.24740 to 0.27604, saving model to alex_2.h5 51/51 [==============================] - 47s 925ms/step - loss: 0.8070 - accuracy: 0.7181 - val_loss: 5.2133 - val_accuracy: 0.2760 Epoch 5/25 51/51 [==============================] - ETA: 0s - loss: 0.6284 - accuracy: 0.7714 Epoch 5: val_accuracy improved from 0.27604 to 0.28906, saving model to alex_2.h5 51/51 [==============================] - 52s 1s/step - loss: 0.6284 - accuracy: 0.7714 - val_loss: 5.1982 - val_accuracy: 0.2891 Epoch 6/25 51/51 [==============================] - ETA: 0s - loss: 0.5519 - accuracy: 0.7996 Epoch 6: val_accuracy improved from 0.28906 to 0.34635, saving model to alex_2.h5 51/51 [==============================] - 47s 925ms/step - loss: 0.5519 - accuracy: 0.7996 - val_loss: 5.3340 - val_accuracy: 0.3464 Epoch 7/25 51/51 [==============================] - ETA: 0s - loss: 0.5127 - accuracy: 0.8205 Epoch 7: val_accuracy did not improve from 0.34635 51/51 [==============================] - 48s 934ms/step - loss: 0.5127 - accuracy: 0.8205 - val_loss: 4.6689 - val_accuracy: 0.3307 Epoch 8/25 51/51 [==============================] - ETA: 0s - loss: 0.4584 - accuracy: 0.8364 Epoch 8: val_accuracy improved from 0.34635 to 0.34896, saving model to alex_2.h5 51/51 [==============================] - 48s 939ms/step - loss: 0.4584 - accuracy: 0.8364 - val_loss: 4.0851 - val_accuracy: 0.3490 Epoch 9/25 51/51 [==============================] - ETA: 0s - loss: 0.3952 - accuracy: 0.8585 Epoch 9: val_accuracy improved from 0.34896 to 0.39844, saving model to alex_2.h5 51/51 [==============================] - 49s 955ms/step - loss: 0.3952 - accuracy: 0.8585 - val_loss: 2.6378 - val_accuracy: 0.3984 Epoch 10/25 51/51 [==============================] - ETA: 0s - loss: 0.3141 - accuracy: 0.8811 Epoch 10: val_accuracy improved from 0.39844 to 0.43750, saving model to alex_2.h5 51/51 [==============================] - 48s 940ms/step - loss: 0.3141 - accuracy: 0.8811 - val_loss: 2.3606 - val_accuracy: 0.4375 Epoch 11/25 51/51 [==============================] - ETA: 0s - loss: 0.2889 - accuracy: 0.8922 Epoch 11: val_accuracy improved from 0.43750 to 0.65625, saving model to alex_2.h5 51/51 [==============================] - 48s 949ms/step - loss: 0.2889 - accuracy: 0.8922 - val_loss: 1.1387 - val_accuracy: 0.6562 Epoch 12/25 51/51 [==============================] - ETA: 0s - loss: 0.2696 - accuracy: 0.8977 Epoch 12: val_accuracy did not improve from 0.65625 51/51 [==============================] - 48s 933ms/step - loss: 0.2696 - accuracy: 0.8977 - val_loss: 1.1794 - val_accuracy: 0.6328 Epoch 13/25 51/51 [==============================] - ETA: 0s - loss: 0.2124 - accuracy: 0.9271 Epoch 13: val_accuracy improved from 0.65625 to 0.83073, saving model to alex_2.h5 51/51 [==============================] - 50s 973ms/step - loss: 0.2124 - accuracy: 0.9271 - val_loss: 0.4526 - val_accuracy: 0.8307 Epoch 14/25 51/51 [==============================] - ETA: 0s - loss: 0.1891 - accuracy: 0.9228 Epoch 14: val_accuracy did not improve from 0.83073 51/51 [==============================] - 50s 981ms/step - loss: 0.1891 - accuracy: 0.9228 - val_loss: 0.5985 - val_accuracy: 0.7943 Epoch 15/25 51/51 [==============================] - ETA: 0s - loss: 0.1603 - accuracy: 0.9381 Epoch 15: val_accuracy improved from 0.83073 to 0.83333, saving model to alex_2.h5 51/51 [==============================] - 50s 983ms/step - loss: 0.1603 - accuracy: 0.9381 - val_loss: 0.4779 - val_accuracy: 0.8333 Epoch 16/25 51/51 [==============================] - ETA: 0s - loss: 0.1852 - accuracy: 0.9314 Epoch 16: val_accuracy improved from 0.83333 to 0.86979, saving model to alex_2.h5 51/51 [==============================] - 49s 962ms/step - loss: 0.1852 - accuracy: 0.9314 - val_loss: 0.3588 - val_accuracy: 0.8698 Epoch 17/25 51/51 [==============================] - ETA: 0s - loss: 0.1484 - accuracy: 0.9504 Epoch 17: val_accuracy improved from 0.86979 to 0.87500, saving model to alex_2.h5 51/51 [==============================] - 49s 963ms/step - loss: 0.1484 - accuracy: 0.9504 - val_loss: 0.3464 - val_accuracy: 0.8750 Epoch 18/25 51/51 [==============================] - ETA: 0s - loss: 0.1367 - accuracy: 0.9534 Epoch 18: val_accuracy did not improve from 0.87500 51/51 [==============================] - 49s 962ms/step - loss: 0.1367 - accuracy: 0.9534 - val_loss: 0.4452 - val_accuracy: 0.8464 Epoch 19/25 51/51 [==============================] - ETA: 0s - loss: 0.1089 - accuracy: 0.9638 Epoch 19: val_accuracy improved from 0.87500 to 0.89062, saving model to alex_2.h5 51/51 [==============================] - 49s 953ms/step - loss: 0.1089 - accuracy: 0.9638 - val_loss: 0.3376 - val_accuracy: 0.8906 Epoch 20/25 51/51 [==============================] - ETA: 0s - loss: 0.1115 - accuracy: 0.9596 Epoch 20: val_accuracy did not improve from 0.89062 51/51 [==============================] - 49s 954ms/step - loss: 0.1115 - accuracy: 0.9596 - val_loss: 0.3655 - val_accuracy: 0.8854 Epoch 21/25 51/51 [==============================] - ETA: 0s - loss: 0.0793 - accuracy: 0.9681 Epoch 21: val_accuracy did not improve from 0.89062 51/51 [==============================] - 48s 949ms/step - loss: 0.0793 - accuracy: 0.9681 - val_loss: 0.4086 - val_accuracy: 0.8776 Epoch 22/25 51/51 [==============================] - ETA: 0s - loss: 0.0725 - accuracy: 0.9767 Epoch 22: val_accuracy improved from 0.89062 to 0.90365, saving model to alex_2.h5 51/51 [==============================] - 49s 958ms/step - loss: 0.0725 - accuracy: 0.9767 - val_loss: 0.2975 - val_accuracy: 0.9036 Epoch 23/25 51/51 [==============================] - ETA: 0s - loss: 0.0727 - accuracy: 0.9755 Epoch 23: val_accuracy did not improve from 0.90365 51/51 [==============================] - 49s 957ms/step - loss: 0.0727 - accuracy: 0.9755 - val_loss: 0.4552 - val_accuracy: 0.8698 Epoch 24/25 51/51 [==============================] - ETA: 0s - loss: 0.0659 - accuracy: 0.9737 Epoch 24: val_accuracy did not improve from 0.90365 51/51 [==============================] - 49s 952ms/step - loss: 0.0659 - accuracy: 0.9737 - val_loss: 0.3930 - val_accuracy: 0.8854 Epoch 25/25 51/51 [==============================] - ETA: 0s - loss: 0.0693 - accuracy: 0.9816 Epoch 25: val_accuracy did not improve from 0.90365 51/51 [==============================] - 50s 980ms/step - loss: 0.0693 - accuracy: 0.9816 - val_loss: 0.6543 - val_accuracy: 0.8177
WARNING:absl:`lr` is deprecated, please use `learning_rate` instead, or use the legacy optimizer, e.g.,tf.keras.optimizers.legacy.SGD.
Model: "sequential_1" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d_5 (Conv2D) (None, 55, 55, 96) 34944 batch_normalization_5 (Batc (None, 55, 55, 96) 384 hNormalization) max_pooling2d_3 (MaxPooling (None, 27, 27, 96) 0 2D) conv2d_6 (Conv2D) (None, 27, 27, 256) 614656 batch_normalization_6 (Batc (None, 27, 27, 256) 1024 hNormalization) max_pooling2d_4 (MaxPooling (None, 13, 13, 256) 0 2D) conv2d_7 (Conv2D) (None, 13, 13, 384) 885120 batch_normalization_7 (Batc (None, 13, 13, 384) 1536 hNormalization) conv2d_8 (Conv2D) (None, 13, 13, 384) 1327488 batch_normalization_8 (Batc (None, 13, 13, 384) 1536 hNormalization) conv2d_9 (Conv2D) (None, 13, 13, 256) 884992 batch_normalization_9 (Batc (None, 13, 13, 256) 1024 hNormalization) max_pooling2d_5 (MaxPooling (None, 6, 6, 256) 0 2D) flatten_1 (Flatten) (None, 9216) 0 dense_3 (Dense) (None, 4096) 37752832 dropout_2 (Dropout) (None, 4096) 0 dense_4 (Dense) (None, 4096) 16781312 dropout_3 (Dropout) (None, 4096) 0 dense_5 (Dense) (None, 10) 40970 ================================================================= Total params: 58,327,818 Trainable params: 58,325,066 Non-trainable params: 2,752 _________________________________________________________________ WARNING:tensorflow:`period` argument is deprecated. Please use `save_freq` to specify the frequency in number of batches seen.
WARNING:tensorflow:`period` argument is deprecated. Please use `save_freq` to specify the frequency in number of batches seen.
Epoch 1/25 51/51 [==============================] - ETA: 0s - loss: 3.6077 - accuracy: 0.3566 Epoch 1: val_accuracy improved from -inf to 0.26302, saving model to alex_2.h5 51/51 [==============================] - 51s 994ms/step - loss: 3.6077 - accuracy: 0.3566 - val_loss: 1.8337 - val_accuracy: 0.2630 Epoch 2/25 51/51 [==============================] - ETA: 0s - loss: 1.2408 - accuracy: 0.5803 Epoch 2: val_accuracy improved from 0.26302 to 0.34375, saving model to alex_2.h5 51/51 [==============================] - 53s 1s/step - loss: 1.2408 - accuracy: 0.5803 - val_loss: 2.8576 - val_accuracy: 0.3438 Epoch 3/25 51/51 [==============================] - ETA: 0s - loss: 0.9538 - accuracy: 0.6550 Epoch 3: val_accuracy improved from 0.34375 to 0.35677, saving model to alex_2.h5 51/51 [==============================] - 56s 1s/step - loss: 0.9538 - accuracy: 0.6550 - val_loss: 4.7057 - val_accuracy: 0.3568 Epoch 4/25 44/51 [========================>.....] - ETA: 7s - loss: 0.7304 - accuracy: 0.7273