208 KiB
208 KiB
Aleksandra Jonas, Aleksandra Gronowska, Iwona Christop
Generowanie dodatkowych zdjęć w oparciu o filtry krawędziowe
import os
import cv2 as cv
import matplotlib.pyplot as plt
import numpy as np
import json
from tensorflow import keras
%matplotlib inline
def alex(filter_name, train_ds, test_ds, validation_ds):
from keras.callbacks import ModelCheckpoint, EarlyStopping
import matplotlib.pyplot as plt
import tensorflow as tf
alexnet = keras.models.Sequential([
keras.layers.Conv2D(filters=96, kernel_size=(11,11), strides=(4,4), activation='relu', input_shape=(227,227,3)),
keras.layers.BatchNormalization(),
keras.layers.MaxPool2D(pool_size=(3,3), strides=(2,2)),
keras.layers.Conv2D(filters=256, kernel_size=(5,5), strides=(1,1), activation='relu', padding="same"),
keras.layers.BatchNormalization(),
keras.layers.MaxPool2D(pool_size=(3,3), strides=(2,2)),
keras.layers.Conv2D(filters=384, kernel_size=(3,3), strides=(1,1), activation='relu', padding="same"),
keras.layers.BatchNormalization(),
keras.layers.Conv2D(filters=384, kernel_size=(3,3), strides=(1,1), activation='relu', padding="same"),
keras.layers.BatchNormalization(),
keras.layers.Conv2D(filters=256, kernel_size=(3,3), strides=(1,1), activation='relu', padding="same"),
keras.layers.BatchNormalization(),
keras.layers.MaxPool2D(pool_size=(3,3), strides=(2,2)),
keras.layers.Flatten(),
keras.layers.Dense(4096, activation='relu'),
keras.layers.Dropout(.5),
keras.layers.Dense(4096, activation='relu'),
keras.layers.Dropout(.5),
keras.layers.Dense(10, activation='softmax')
])
alexnet.compile(loss='sparse_categorical_crossentropy', optimizer=tf.optimizers.SGD(lr=.001), metrics=['accuracy'])
alexnet.summary()
checkpoint = ModelCheckpoint("alex_2.h5", monitor='val_accuracy', verbose=1, save_best_only=True, save_weights_only=False, mode='auto', period=1)
early = EarlyStopping(monitor='val_accuracy', min_delta=0, patience=20, verbose=1, mode='auto')
alex = alexnet.fit_generator(
steps_per_epoch=len(train_ds),
generator=train_ds,
validation_data= validation_ds,
validation_steps=len(validation_ds),
epochs=25,
callbacks=[checkpoint,early])
plt.plot(alex.history["accuracy"])
plt.plot(alex.history['val_accuracy'])
plt.plot(alex.history['loss'])
plt.plot(alex.history['val_loss'])
plt.title(f"Model accuracy - {filter_name}")
plt.ylabel("Value")
plt.xlabel("Epoch")
plt.legend(["Accuracy","Validation Accuracy","Loss","Validation Loss"])
plt.show()
alexnet.evaluate(test_ds)
def fix_float_img(img):
img_normed = 255 * (img - img.min()) / (img.max() - img.min())
img_normed = np.array(img_normed, np.int)
return img_normed
# directory = r"train_test_sw/train_sw_kontrast"
# subdirs = [r"/Tomato", r"/Lemon", r"/Beech", r"/Mean", r"/Gardenia"]
# json_entries = []
# for sub in subdirs:
# path = directory + sub
# for filename in os.listdir(path):
# f = os.path.join(path, filename)
# if os.path.isfile(f):
# img = cv.imread(f)
# lab= cv.cvtColor(img, cv.COLOR_BGR2LAB)
# l_channel, a, b = cv.split(lab)
# # Applying CLAHE to L-channel
# # feel free to try different values for the limit and grid size:
# clahe = cv.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
# cl = clahe.apply(l_channel)
# # merge the CLAHE enhanced L-channel with the a and b channel
# limg = cv.merge((cl,a,b))
# # Converting image from LAB Color model to BGR color spcae
# enhanced_img = cv.cvtColor(limg, cv.COLOR_LAB2BGR)
# filename_edge = f[:-4] + '_kontrast.png'
# #final_edge = fix_float_img(adjusted)
# cv.imwrite(filename_edge, enhanced_img)
# directory = r"train_test_sw/train_sw_saturacja"
# subdirs = [r"/Tomato", r"/Lemon", r"/Beech", r"/Mean", r"/Gardenia"]
# json_entries = []
# for sub in subdirs:
# path = directory + sub
# for filename in os.listdir(path):
# f = os.path.join(path, filename)
# if os.path.isfile(f):
# img = cv.imread(f)
# hsv = cv.cvtColor(img, cv.COLOR_BGR2HSV)
# greenMask = cv.inRange(hsv, (26, 10, 30), (97, 100, 255))
# hsv[:,:,1] = greenMask
# back = cv.cvtColor(hsv, cv.COLOR_HSV2RGB)
# filename_edge = f[:-4] + '_saturacja.png'
# #final_edge = fix_float_img(adjusted)
# cv.imwrite(filename_edge, back)
# directory = r"train_test_sw/train_sw_cartoon"
# subdirs = [r"/Tomato", r"/Lemon", r"/Beech", r"/Mean", r"/Gardenia"]
# json_entries = []
# for sub in subdirs:
# path = directory + sub
# for filename in os.listdir(path):
# f = os.path.join(path, filename)
# if os.path.isfile(f):
# img = cv.imread(f)
# edges1 = cv.bitwise_not(cv.Canny(img, 100, 200)) # for thin edges and inverting the mask obatined
# gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
# gray = cv.medianBlur(gray, 5) # applying median blur with kernel size of 5
# edges2 = cv.adaptiveThreshold(gray, 255, cv.ADAPTIVE_THRESH_MEAN_C, cv.THRESH_BINARY, 7, 7) # thick edges
# dst = cv.edgePreservingFilter(img, flags=2, sigma_s=20, sigma_r=0.1) # you can also use bilateral filter but that is slow
# # flag = 1 for RECURS_FILTER (Recursive Filtering) and 2 for NORMCONV_FILTER (Normalized Convolution). NORMCONV_FILTER produces sharpening of the edges but is slower.
# # sigma_s controls the size of the neighborhood. Range 1 - 200
# # sigma_r controls the how dissimilar colors within the neighborhood will be averaged. A larger sigma_r results in large regions of constant color. Range 0 - 1
# cartoon = cv.bitwise_and(dst, dst, mask=edges1) # adding thin edges to smoothened imag
# filename_edge = f[:-4] + '_cartoon.png'
# #final_edge = fix_float_img(adjusted)
# cv.imwrite(filename_edge, cartoon)
Data
import sys
import subprocess
import pkg_resources
import numpy as np
peachy = []
required = { 'scikit-image'}
installed = {pkg.key for pkg in pkg_resources.working_set}
missing = required - installed
if missing:
python = sys.executable
subprocess.check_call([python, '-m', 'pip', 'install', *missing], stdout=subprocess.DEVNULL)
def load_train_data(input_dir, newSize=(227,227)):
import numpy as np
import pandas as pd
import os
from skimage.io import imread
import cv2 as cv
from pathlib import Path
import random
from shutil import copyfile, rmtree
import json
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib
image_dir = Path(input_dir)
categories_name = []
for file in os.listdir(image_dir):
d = os.path.join(image_dir, file)
if os.path.isdir(d):
categories_name.append(file)
folders = [directory for directory in image_dir.iterdir() if directory.is_dir()]
train_img = []
categories_count=[]
labels=[]
for i, direc in enumerate(folders):
count = 0
for obj in direc.iterdir():
if os.path.isfile(obj) and os.path.basename(os.path.normpath(obj)) != 'desktop.ini':
labels.append(os.path.basename(os.path.normpath(direc)))
count += 1
img = imread(obj)#zwraca ndarry postaci xSize x ySize x colorDepth
if img.shape[-1] == 256:
img = np.repeat(img[..., np.newaxis], 3, axis=2)
elif img.shape[-1] == 4:
img = img[:, :, :3]
img = cv.resize(img, newSize, interpolation=cv.INTER_AREA)# zwraca ndarray
img = img / 255#normalizacja
train_img.append(img)
categories_count.append(count)
X={}
X["values"] = np.array(train_img)
X["categories_name"] = categories_name
X["categories_count"] = categories_count
X["labels"]=labels
return X
def load_test_data(input_dir, newSize=(227,227)):
import numpy as np
import pandas as pd
import os
from skimage.io import imread
import cv2 as cv
from pathlib import Path
import random
from shutil import copyfile, rmtree
import json
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib
image_path = Path(input_dir)
labels_path = image_path.parents[0] / 'test_labels.json'
jsonString = labels_path.read_text()
objects = json.loads(jsonString)
categories_name = []
categories_count=[]
count = 0
c = objects[0]['value']
for e in objects:
if e['value'] != c:
categories_count.append(count)
c = e['value']
count = 1
else:
count += 1
if not e['value'] in categories_name:
categories_name.append(e['value'])
categories_count.append(count)
test_img = []
labels=[]
for e in objects:
p = image_path / e['filename']
img = imread(p)#zwraca ndarry postaci xSize x ySize x colorDepth
if img.shape[-1] == 4:
img = img[:, :, :3]
img = cv.resize(img, newSize, interpolation=cv.INTER_AREA)# zwraca ndarray
img = img / 255#normalizacja
test_img.append(img)
labels.append(e['value'])
X={}
X["values"] = np.array(test_img)
X["categories_name"] = categories_name
X["categories_count"] = categories_count
X["labels"]=labels
return X
def data_prep_alex(filter_name):
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
import tensorflow as tf
data_train = load_train_data(f"./train_test_sw/train_sw_{filter_name}")
values_train = data_train['values']
labels_train = data_train['labels']
data_test = load_test_data("./train_test_sw/test_sw")
X_test = data_test['values']
y_test = data_test['labels']
X_train, X_validate, y_train, y_validate = train_test_split(values_train, labels_train, test_size=0.2, random_state=42)
class_le = LabelEncoder()
y_train_enc = class_le.fit_transform(y_train)
y_validate_enc = class_le.fit_transform(y_validate)
y_test_enc = class_le.fit_transform(y_test)
train_ds = tf.data.Dataset.from_tensor_slices((X_train, y_train_enc))
validation_ds = tf.data.Dataset.from_tensor_slices((X_validate, y_validate_enc))
test_ds = tf.data.Dataset.from_tensor_slices((X_test, y_test_enc))
train_ds_size = tf.data.experimental.cardinality(train_ds).numpy()
test_ds_size = tf.data.experimental.cardinality(test_ds).numpy()
validation_ds_size = tf.data.experimental.cardinality(validation_ds).numpy()
train_ds = (train_ds
.shuffle(buffer_size=train_ds_size)
.batch(batch_size=32, drop_remainder=True))
test_ds = (test_ds
.shuffle(buffer_size=train_ds_size)
.batch(batch_size=32, drop_remainder=True))
validation_ds = (validation_ds
.shuffle(buffer_size=train_ds_size)
.batch(batch_size=32, drop_remainder=True))
return train_ds, test_ds, validation_ds
ALEXNET
filters = ['kontrast', 'cartoon', 'saturacja']
data_test = load_test_data("./train_test_sw/test_sw")
X_test = data_test['values']
y_test = data_test['labels']
for filter in filters:
print(f"{filter} ---------------------------------------")
train_ds, test_ds, validation_ds = data_prep_alex(filter)
alex(filter, train_ds, test_ds, validation_ds)
kontrast ---------------------------------------
WARNING:absl:`lr` is deprecated, please use `learning_rate` instead, or use the legacy optimizer, e.g.,tf.keras.optimizers.legacy.SGD.
Model: "sequential_1" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d_5 (Conv2D) (None, 55, 55, 96) 34944 batch_normalization_5 (Batc (None, 55, 55, 96) 384 hNormalization) max_pooling2d_3 (MaxPooling (None, 27, 27, 96) 0 2D) conv2d_6 (Conv2D) (None, 27, 27, 256) 614656 batch_normalization_6 (Batc (None, 27, 27, 256) 1024 hNormalization) max_pooling2d_4 (MaxPooling (None, 13, 13, 256) 0 2D) conv2d_7 (Conv2D) (None, 13, 13, 384) 885120 batch_normalization_7 (Batc (None, 13, 13, 384) 1536 hNormalization) conv2d_8 (Conv2D) (None, 13, 13, 384) 1327488 batch_normalization_8 (Batc (None, 13, 13, 384) 1536 hNormalization) conv2d_9 (Conv2D) (None, 13, 13, 256) 884992 batch_normalization_9 (Batc (None, 13, 13, 256) 1024 hNormalization) max_pooling2d_5 (MaxPooling (None, 6, 6, 256) 0 2D) flatten_1 (Flatten) (None, 9216) 0 dense_3 (Dense) (None, 4096) 37752832 dropout_2 (Dropout) (None, 4096) 0 dense_4 (Dense) (None, 4096) 16781312 dropout_3 (Dropout) (None, 4096) 0 dense_5 (Dense) (None, 10) 40970 ================================================================= Total params: 58,327,818 Trainable params: 58,325,066 Non-trainable params: 2,752 _________________________________________________________________ WARNING:tensorflow:`period` argument is deprecated. Please use `save_freq` to specify the frequency in number of batches seen.
WARNING:tensorflow:`period` argument is deprecated. Please use `save_freq` to specify the frequency in number of batches seen.
Epoch 1/25
/var/folders/6b/j4d60ym516x2s6wymzj707rh0000gn/T/ipykernel_35974/3983922004.py:34: UserWarning: `Model.fit_generator` is deprecated and will be removed in a future version. Please use `Model.fit`, which supports generators. alex = alexnet.fit_generator(
51/51 [==============================] - ETA: 0s - loss: 3.8345 - accuracy: 0.3658 Epoch 1: val_accuracy improved from -inf to 0.22656, saving model to alex_2.h5 51/51 [==============================] - 46s 891ms/step - loss: 3.8345 - accuracy: 0.3658 - val_loss: 2.1574 - val_accuracy: 0.2266 Epoch 2/25 51/51 [==============================] - ETA: 0s - loss: 1.3397 - accuracy: 0.5362 Epoch 2: val_accuracy improved from 0.22656 to 0.23177, saving model to alex_2.h5 51/51 [==============================] - 48s 945ms/step - loss: 1.3397 - accuracy: 0.5362 - val_loss: 2.7271 - val_accuracy: 0.2318 Epoch 3/25 51/51 [==============================] - ETA: 0s - loss: 0.9793 - accuracy: 0.6428 Epoch 3: val_accuracy improved from 0.23177 to 0.34635, saving model to alex_2.h5 51/51 [==============================] - 49s 954ms/step - loss: 0.9793 - accuracy: 0.6428 - val_loss: 3.4108 - val_accuracy: 0.3464 Epoch 4/25 51/51 [==============================] - ETA: 0s - loss: 0.6715 - accuracy: 0.7273 Epoch 4: val_accuracy did not improve from 0.34635 51/51 [==============================] - 51s 1s/step - loss: 0.6715 - accuracy: 0.7273 - val_loss: 4.2069 - val_accuracy: 0.3411 Epoch 5/25 51/51 [==============================] - ETA: 0s - loss: 0.5853 - accuracy: 0.7917 Epoch 5: val_accuracy did not improve from 0.34635 51/51 [==============================] - 49s 962ms/step - loss: 0.5853 - accuracy: 0.7917 - val_loss: 4.3773 - val_accuracy: 0.2839 Epoch 6/25 51/51 [==============================] - ETA: 0s - loss: 0.4176 - accuracy: 0.8413 Epoch 6: val_accuracy did not improve from 0.34635 51/51 [==============================] - 49s 961ms/step - loss: 0.4176 - accuracy: 0.8413 - val_loss: 5.1601 - val_accuracy: 0.3281 Epoch 7/25 51/51 [==============================] - ETA: 0s - loss: 0.3224 - accuracy: 0.8756 Epoch 7: val_accuracy did not improve from 0.34635 51/51 [==============================] - 49s 957ms/step - loss: 0.3224 - accuracy: 0.8756 - val_loss: 5.2943 - val_accuracy: 0.3307 Epoch 8/25 51/51 [==============================] - ETA: 0s - loss: 0.2591 - accuracy: 0.9026 Epoch 8: val_accuracy improved from 0.34635 to 0.41406, saving model to alex_2.h5 51/51 [==============================] - 50s 985ms/step - loss: 0.2591 - accuracy: 0.9026 - val_loss: 3.7030 - val_accuracy: 0.4141 Epoch 9/25 51/51 [==============================] - ETA: 0s - loss: 0.2748 - accuracy: 0.8964 Epoch 9: val_accuracy improved from 0.41406 to 0.50000, saving model to alex_2.h5 51/51 [==============================] - 53s 1s/step - loss: 0.2748 - accuracy: 0.8964 - val_loss: 2.1064 - val_accuracy: 0.5000 Epoch 10/25 51/51 [==============================] - ETA: 0s - loss: 0.2015 - accuracy: 0.9240 Epoch 10: val_accuracy improved from 0.50000 to 0.62500, saving model to alex_2.h5 51/51 [==============================] - 52s 1s/step - loss: 0.2015 - accuracy: 0.9240 - val_loss: 1.3254 - val_accuracy: 0.6250 Epoch 11/25 51/51 [==============================] - ETA: 0s - loss: 0.1754 - accuracy: 0.9350 Epoch 11: val_accuracy improved from 0.62500 to 0.74740, saving model to alex_2.h5 51/51 [==============================] - 54s 1s/step - loss: 0.1754 - accuracy: 0.9350 - val_loss: 0.7914 - val_accuracy: 0.7474 Epoch 12/25 51/51 [==============================] - ETA: 0s - loss: 0.1711 - accuracy: 0.9412 Epoch 12: val_accuracy did not improve from 0.74740 51/51 [==============================] - 738s 15s/step - loss: 0.1711 - accuracy: 0.9412 - val_loss: 1.0148 - val_accuracy: 0.7031 Epoch 13/25 51/51 [==============================] - ETA: 0s - loss: 0.1424 - accuracy: 0.9498 Epoch 13: val_accuracy improved from 0.74740 to 0.82031, saving model to alex_2.h5 51/51 [==============================] - 49s 951ms/step - loss: 0.1424 - accuracy: 0.9498 - val_loss: 0.5437 - val_accuracy: 0.8203 Epoch 14/25 51/51 [==============================] - ETA: 0s - loss: 0.1434 - accuracy: 0.9418 Epoch 14: val_accuracy improved from 0.82031 to 0.83594, saving model to alex_2.h5 51/51 [==============================] - 53s 1s/step - loss: 0.1434 - accuracy: 0.9418 - val_loss: 0.4773 - val_accuracy: 0.8359 Epoch 15/25 51/51 [==============================] - ETA: 0s - loss: 0.0943 - accuracy: 0.9681 Epoch 15: val_accuracy did not improve from 0.83594 51/51 [==============================] - 50s 974ms/step - loss: 0.0943 - accuracy: 0.9681 - val_loss: 0.6302 - val_accuracy: 0.8125 Epoch 16/25 51/51 [==============================] - ETA: 0s - loss: 0.0859 - accuracy: 0.9669 Epoch 16: val_accuracy improved from 0.83594 to 0.93229, saving model to alex_2.h5 51/51 [==============================] - 48s 948ms/step - loss: 0.0859 - accuracy: 0.9669 - val_loss: 0.2049 - val_accuracy: 0.9323 Epoch 17/25 51/51 [==============================] - ETA: 0s - loss: 0.0849 - accuracy: 0.9688 Epoch 17: val_accuracy did not improve from 0.93229 51/51 [==============================] - 55s 1s/step - loss: 0.0849 - accuracy: 0.9688 - val_loss: 0.3428 - val_accuracy: 0.8932 Epoch 18/25 51/51 [==============================] - ETA: 0s - loss: 0.0876 - accuracy: 0.9712 Epoch 18: val_accuracy did not improve from 0.93229 51/51 [==============================] - 78s 2s/step - loss: 0.0876 - accuracy: 0.9712 - val_loss: 0.7060 - val_accuracy: 0.8151 Epoch 19/25 51/51 [==============================] - ETA: 0s - loss: 0.0708 - accuracy: 0.9737 Epoch 19: val_accuracy improved from 0.93229 to 0.94271, saving model to alex_2.h5 51/51 [==============================] - 53s 1s/step - loss: 0.0708 - accuracy: 0.9737 - val_loss: 0.1935 - val_accuracy: 0.9427 Epoch 20/25 51/51 [==============================] - ETA: 0s - loss: 0.0829 - accuracy: 0.9657 Epoch 20: val_accuracy did not improve from 0.94271 51/51 [==============================] - 67s 1s/step - loss: 0.0829 - accuracy: 0.9657 - val_loss: 0.1955 - val_accuracy: 0.9375 Epoch 21/25 51/51 [==============================] - ETA: 0s - loss: 0.0404 - accuracy: 0.9865 Epoch 21: val_accuracy improved from 0.94271 to 0.95312, saving model to alex_2.h5 51/51 [==============================] - 140s 3s/step - loss: 0.0404 - accuracy: 0.9865 - val_loss: 0.1493 - val_accuracy: 0.9531 Epoch 22/25 51/51 [==============================] - ETA: 0s - loss: 0.0370 - accuracy: 0.9877 Epoch 22: val_accuracy did not improve from 0.95312 51/51 [==============================] - 64s 1s/step - loss: 0.0370 - accuracy: 0.9877 - val_loss: 0.1635 - val_accuracy: 0.9505 Epoch 23/25 51/51 [==============================] - ETA: 0s - loss: 0.0353 - accuracy: 0.9865 Epoch 23: val_accuracy did not improve from 0.95312 51/51 [==============================] - 89s 2s/step - loss: 0.0353 - accuracy: 0.9865 - val_loss: 0.4217 - val_accuracy: 0.8932 Epoch 24/25 51/51 [==============================] - ETA: 0s - loss: 0.0308 - accuracy: 0.9920 Epoch 24: val_accuracy did not improve from 0.95312 51/51 [==============================] - 133s 3s/step - loss: 0.0308 - accuracy: 0.9920 - val_loss: 0.2005 - val_accuracy: 0.9349 Epoch 25/25 51/51 [==============================] - ETA: 0s - loss: 0.0203 - accuracy: 0.9957 Epoch 25: val_accuracy improved from 0.95312 to 0.95573, saving model to alex_2.h5 51/51 [==============================] - 54s 1s/step - loss: 0.0203 - accuracy: 0.9957 - val_loss: 0.1394 - val_accuracy: 0.9557
8/8 [==============================] - 2s 256ms/step - loss: 0.2136 - accuracy: 0.9375 cartoon ---------------------------------------
WARNING:absl:`lr` is deprecated, please use `learning_rate` instead, or use the legacy optimizer, e.g.,tf.keras.optimizers.legacy.SGD.
Model: "sequential_2" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d_10 (Conv2D) (None, 55, 55, 96) 34944 batch_normalization_10 (Bat (None, 55, 55, 96) 384 chNormalization) max_pooling2d_6 (MaxPooling (None, 27, 27, 96) 0 2D) conv2d_11 (Conv2D) (None, 27, 27, 256) 614656 batch_normalization_11 (Bat (None, 27, 27, 256) 1024 chNormalization) max_pooling2d_7 (MaxPooling (None, 13, 13, 256) 0 2D) conv2d_12 (Conv2D) (None, 13, 13, 384) 885120 batch_normalization_12 (Bat (None, 13, 13, 384) 1536 chNormalization) conv2d_13 (Conv2D) (None, 13, 13, 384) 1327488 batch_normalization_13 (Bat (None, 13, 13, 384) 1536 chNormalization) conv2d_14 (Conv2D) (None, 13, 13, 256) 884992 batch_normalization_14 (Bat (None, 13, 13, 256) 1024 chNormalization) max_pooling2d_8 (MaxPooling (None, 6, 6, 256) 0 2D) flatten_2 (Flatten) (None, 9216) 0 dense_6 (Dense) (None, 4096) 37752832 dropout_4 (Dropout) (None, 4096) 0 dense_7 (Dense) (None, 4096) 16781312 dropout_5 (Dropout) (None, 4096) 0 dense_8 (Dense) (None, 10) 40970 ================================================================= Total params: 58,327,818 Trainable params: 58,325,066 Non-trainable params: 2,752 _________________________________________________________________ WARNING:tensorflow:`period` argument is deprecated. Please use `save_freq` to specify the frequency in number of batches seen.
WARNING:tensorflow:`period` argument is deprecated. Please use `save_freq` to specify the frequency in number of batches seen.
Epoch 1/25 51/51 [==============================] - ETA: 0s - loss: 3.3183 - accuracy: 0.4295 Epoch 1: val_accuracy improved from -inf to 0.23177, saving model to alex_2.h5 51/51 [==============================] - 49s 942ms/step - loss: 3.3183 - accuracy: 0.4295 - val_loss: 2.0209 - val_accuracy: 0.2318 Epoch 2/25 51/51 [==============================] - ETA: 0s - loss: 1.0712 - accuracy: 0.6654 Epoch 2: val_accuracy did not improve from 0.23177 51/51 [==============================] - 52s 1s/step - loss: 1.0712 - accuracy: 0.6654 - val_loss: 2.9587 - val_accuracy: 0.2188 Epoch 3/25 51/51 [==============================] - ETA: 0s - loss: 0.6603 - accuracy: 0.7739 Epoch 3: val_accuracy improved from 0.23177 to 0.31250, saving model to alex_2.h5 51/51 [==============================] - 55s 1s/step - loss: 0.6603 - accuracy: 0.7739 - val_loss: 3.3996 - val_accuracy: 0.3125 Epoch 4/25 51/51 [==============================] - ETA: 0s - loss: 0.5013 - accuracy: 0.8070 Epoch 4: val_accuracy improved from 0.31250 to 0.32031, saving model to alex_2.h5 51/51 [==============================] - 54s 1s/step - loss: 0.5013 - accuracy: 0.8070 - val_loss: 4.6634 - val_accuracy: 0.3203 Epoch 5/25 51/51 [==============================] - ETA: 0s - loss: 0.3286 - accuracy: 0.8762 Epoch 5: val_accuracy did not improve from 0.32031 51/51 [==============================] - 53s 1s/step - loss: 0.3286 - accuracy: 0.8762 - val_loss: 5.9495 - val_accuracy: 0.2109 Epoch 6/25 51/51 [==============================] - ETA: 0s - loss: 0.2392 - accuracy: 0.9124 Epoch 6: val_accuracy did not improve from 0.32031 51/51 [==============================] - 59s 1s/step - loss: 0.2392 - accuracy: 0.9124 - val_loss: 6.1043 - val_accuracy: 0.2760 Epoch 7/25 51/51 [==============================] - ETA: 0s - loss: 0.2096 - accuracy: 0.9216 Epoch 7: val_accuracy did not improve from 0.32031 51/51 [==============================] - 51s 995ms/step - loss: 0.2096 - accuracy: 0.9216 - val_loss: 6.5559 - val_accuracy: 0.2422 Epoch 8/25 51/51 [==============================] - ETA: 0s - loss: 0.1786 - accuracy: 0.9387 Epoch 8: val_accuracy improved from 0.32031 to 0.34115, saving model to alex_2.h5 51/51 [==============================] - 47s 913ms/step - loss: 0.1786 - accuracy: 0.9387 - val_loss: 5.2047 - val_accuracy: 0.3411 Epoch 9/25 51/51 [==============================] - ETA: 0s - loss: 0.1700 - accuracy: 0.9387 Epoch 9: val_accuracy improved from 0.34115 to 0.41667, saving model to alex_2.h5 51/51 [==============================] - 47s 914ms/step - loss: 0.1700 - accuracy: 0.9387 - val_loss: 3.7162 - val_accuracy: 0.4167 Epoch 10/25 51/51 [==============================] - ETA: 0s - loss: 0.1615 - accuracy: 0.9430 Epoch 10: val_accuracy improved from 0.41667 to 0.59375, saving model to alex_2.h5 51/51 [==============================] - 47s 915ms/step - loss: 0.1615 - accuracy: 0.9430 - val_loss: 1.8405 - val_accuracy: 0.5938 Epoch 11/25 51/51 [==============================] - ETA: 0s - loss: 0.1049 - accuracy: 0.9602 Epoch 11: val_accuracy improved from 0.59375 to 0.66406, saving model to alex_2.h5 51/51 [==============================] - 51s 1s/step - loss: 0.1049 - accuracy: 0.9602 - val_loss: 1.1911 - val_accuracy: 0.6641 Epoch 12/25 51/51 [==============================] - ETA: 0s - loss: 0.0944 - accuracy: 0.9657 Epoch 12: val_accuracy improved from 0.66406 to 0.76823, saving model to alex_2.h5 51/51 [==============================] - 54s 1s/step - loss: 0.0944 - accuracy: 0.9657 - val_loss: 0.8048 - val_accuracy: 0.7682 Epoch 13/25 51/51 [==============================] - ETA: 0s - loss: 0.0714 - accuracy: 0.9761 Epoch 13: val_accuracy improved from 0.76823 to 0.96615, saving model to alex_2.h5 51/51 [==============================] - 112s 2s/step - loss: 0.0714 - accuracy: 0.9761 - val_loss: 0.0924 - val_accuracy: 0.9661 Epoch 14/25 51/51 [==============================] - ETA: 0s - loss: 0.0788 - accuracy: 0.9694 Epoch 14: val_accuracy did not improve from 0.96615 51/51 [==============================] - 109s 2s/step - loss: 0.0788 - accuracy: 0.9694 - val_loss: 0.1619 - val_accuracy: 0.9323 Epoch 15/25 51/51 [==============================] - ETA: 0s - loss: 0.0630 - accuracy: 0.9847 Epoch 15: val_accuracy did not improve from 0.96615 51/51 [==============================] - 59s 1s/step - loss: 0.0630 - accuracy: 0.9847 - val_loss: 0.3735 - val_accuracy: 0.8750 Epoch 16/25 51/51 [==============================] - ETA: 0s - loss: 0.0662 - accuracy: 0.9779 Epoch 16: val_accuracy did not improve from 0.96615 51/51 [==============================] - 49s 967ms/step - loss: 0.0662 - accuracy: 0.9779 - val_loss: 0.1856 - val_accuracy: 0.9193 Epoch 17/25 51/51 [==============================] - ETA: 0s - loss: 0.0492 - accuracy: 0.9816 Epoch 17: val_accuracy did not improve from 0.96615 51/51 [==============================] - 48s 945ms/step - loss: 0.0492 - accuracy: 0.9816 - val_loss: 0.2103 - val_accuracy: 0.9271 Epoch 18/25 51/51 [==============================] - ETA: 0s - loss: 0.0420 - accuracy: 0.9871 Epoch 18: val_accuracy did not improve from 0.96615 51/51 [==============================] - 48s 946ms/step - loss: 0.0420 - accuracy: 0.9871 - val_loss: 0.7410 - val_accuracy: 0.8411 Epoch 19/25 51/51 [==============================] - ETA: 0s - loss: 0.0580 - accuracy: 0.9792 Epoch 19: val_accuracy improved from 0.96615 to 0.98958, saving model to alex_2.h5 51/51 [==============================] - 51s 993ms/step - loss: 0.0580 - accuracy: 0.9792 - val_loss: 0.0379 - val_accuracy: 0.9896 Epoch 20/25 51/51 [==============================] - ETA: 0s - loss: 0.0471 - accuracy: 0.9853 Epoch 20: val_accuracy did not improve from 0.98958 51/51 [==============================] - 49s 961ms/step - loss: 0.0471 - accuracy: 0.9853 - val_loss: 1.3082 - val_accuracy: 0.7526 Epoch 21/25 51/51 [==============================] - ETA: 0s - loss: 0.0391 - accuracy: 0.9890 Epoch 21: val_accuracy did not improve from 0.98958 51/51 [==============================] - 55s 1s/step - loss: 0.0391 - accuracy: 0.9890 - val_loss: 0.1507 - val_accuracy: 0.9323 Epoch 22/25 51/51 [==============================] - ETA: 0s - loss: 0.0351 - accuracy: 0.9896 Epoch 22: val_accuracy did not improve from 0.98958 51/51 [==============================] - 55s 1s/step - loss: 0.0351 - accuracy: 0.9896 - val_loss: 0.1305 - val_accuracy: 0.9479 Epoch 23/25 51/51 [==============================] - ETA: 0s - loss: 0.0231 - accuracy: 0.9933 Epoch 23: val_accuracy did not improve from 0.98958 51/51 [==============================] - 53s 1s/step - loss: 0.0231 - accuracy: 0.9933 - val_loss: 0.0865 - val_accuracy: 0.9635 Epoch 24/25 51/51 [==============================] - ETA: 0s - loss: 0.0201 - accuracy: 0.9933 Epoch 24: val_accuracy did not improve from 0.98958 51/51 [==============================] - 56s 1s/step - loss: 0.0201 - accuracy: 0.9933 - val_loss: 0.5474 - val_accuracy: 0.8281 Epoch 25/25 51/51 [==============================] - ETA: 0s - loss: 0.0346 - accuracy: 0.9896 Epoch 25: val_accuracy did not improve from 0.98958 51/51 [==============================] - 56s 1s/step - loss: 0.0346 - accuracy: 0.9896 - val_loss: 0.0609 - val_accuracy: 0.9844
8/8 [==============================] - 3s 318ms/step - loss: 0.2321 - accuracy: 0.9297 saturacja ---------------------------------------
WARNING:absl:`lr` is deprecated, please use `learning_rate` instead, or use the legacy optimizer, e.g.,tf.keras.optimizers.legacy.SGD.
Model: "sequential_3" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d_15 (Conv2D) (None, 55, 55, 96) 34944 batch_normalization_15 (Bat (None, 55, 55, 96) 384 chNormalization) max_pooling2d_9 (MaxPooling (None, 27, 27, 96) 0 2D) conv2d_16 (Conv2D) (None, 27, 27, 256) 614656 batch_normalization_16 (Bat (None, 27, 27, 256) 1024 chNormalization) max_pooling2d_10 (MaxPoolin (None, 13, 13, 256) 0 g2D) conv2d_17 (Conv2D) (None, 13, 13, 384) 885120 batch_normalization_17 (Bat (None, 13, 13, 384) 1536 chNormalization) conv2d_18 (Conv2D) (None, 13, 13, 384) 1327488 batch_normalization_18 (Bat (None, 13, 13, 384) 1536 chNormalization) conv2d_19 (Conv2D) (None, 13, 13, 256) 884992 batch_normalization_19 (Bat (None, 13, 13, 256) 1024 chNormalization) max_pooling2d_11 (MaxPoolin (None, 6, 6, 256) 0 g2D) flatten_3 (Flatten) (None, 9216) 0 dense_9 (Dense) (None, 4096) 37752832 dropout_6 (Dropout) (None, 4096) 0 dense_10 (Dense) (None, 4096) 16781312 dropout_7 (Dropout) (None, 4096) 0 dense_11 (Dense) (None, 10) 40970 ================================================================= Total params: 58,327,818 Trainable params: 58,325,066 Non-trainable params: 2,752 _________________________________________________________________ WARNING:tensorflow:`period` argument is deprecated. Please use `save_freq` to specify the frequency in number of batches seen.
WARNING:tensorflow:`period` argument is deprecated. Please use `save_freq` to specify the frequency in number of batches seen.
Epoch 1/25 51/51 [==============================] - ETA: 0s - loss: 3.6670 - accuracy: 0.3793 Epoch 1: val_accuracy improved from -inf to 0.38542, saving model to alex_2.h5 51/51 [==============================] - 49s 953ms/step - loss: 3.6670 - accuracy: 0.3793 - val_loss: 1.8499 - val_accuracy: 0.3854 Epoch 2/25 51/51 [==============================] - ETA: 0s - loss: 1.3486 - accuracy: 0.5748 Epoch 2: val_accuracy did not improve from 0.38542 51/51 [==============================] - 52s 1s/step - loss: 1.3486 - accuracy: 0.5748 - val_loss: 3.4816 - val_accuracy: 0.2578 Epoch 3/25 51/51 [==============================] - ETA: 0s - loss: 0.9585 - accuracy: 0.6458 Epoch 3: val_accuracy did not improve from 0.38542 51/51 [==============================] - 51s 1s/step - loss: 0.9585 - accuracy: 0.6458 - val_loss: 4.6736 - val_accuracy: 0.2578 Epoch 4/25 51/51 [==============================] - ETA: 0s - loss: 0.7698 - accuracy: 0.7126 Epoch 4: val_accuracy did not improve from 0.38542 51/51 [==============================] - 55s 1s/step - loss: 0.7698 - accuracy: 0.7126 - val_loss: 5.1900 - val_accuracy: 0.2500 Epoch 5/25 51/51 [==============================] - ETA: 0s - loss: 0.6196 - accuracy: 0.7770 Epoch 5: val_accuracy did not improve from 0.38542 51/51 [==============================] - 52s 1s/step - loss: 0.6196 - accuracy: 0.7770 - val_loss: 6.2598 - val_accuracy: 0.3359 Epoch 6/25 51/51 [==============================] - ETA: 0s - loss: 0.5028 - accuracy: 0.8235 Epoch 6: val_accuracy did not improve from 0.38542 51/51 [==============================] - 54s 1s/step - loss: 0.5028 - accuracy: 0.8235 - val_loss: 6.7278 - val_accuracy: 0.2708 Epoch 7/25 51/51 [==============================] - ETA: 0s - loss: 0.4281 - accuracy: 0.8425 Epoch 7: val_accuracy did not improve from 0.38542 51/51 [==============================] - 54s 1s/step - loss: 0.4281 - accuracy: 0.8425 - val_loss: 4.9290 - val_accuracy: 0.3542 Epoch 8/25 51/51 [==============================] - ETA: 0s - loss: 0.3045 - accuracy: 0.8977 Epoch 8: val_accuracy improved from 0.38542 to 0.45573, saving model to alex_2.h5 51/51 [==============================] - 53s 1s/step - loss: 0.3045 - accuracy: 0.8977 - val_loss: 2.6881 - val_accuracy: 0.4557 Epoch 9/25 51/51 [==============================] - ETA: 0s - loss: 0.2855 - accuracy: 0.8915 Epoch 9: val_accuracy improved from 0.45573 to 0.48177, saving model to alex_2.h5 51/51 [==============================] - 53s 1s/step - loss: 0.2855 - accuracy: 0.8915 - val_loss: 2.4350 - val_accuracy: 0.4818 Epoch 10/25 51/51 [==============================] - ETA: 0s - loss: 0.2387 - accuracy: 0.9148 Epoch 10: val_accuracy improved from 0.48177 to 0.59115, saving model to alex_2.h5 51/51 [==============================] - 53s 1s/step - loss: 0.2387 - accuracy: 0.9148 - val_loss: 1.2724 - val_accuracy: 0.5911 Epoch 11/25 51/51 [==============================] - ETA: 0s - loss: 0.2451 - accuracy: 0.9118 Epoch 11: val_accuracy improved from 0.59115 to 0.74479, saving model to alex_2.h5 51/51 [==============================] - 53s 1s/step - loss: 0.2451 - accuracy: 0.9118 - val_loss: 0.7184 - val_accuracy: 0.7448 Epoch 12/25 51/51 [==============================] - ETA: 0s - loss: 0.2065 - accuracy: 0.9271 Epoch 12: val_accuracy improved from 0.74479 to 0.75521, saving model to alex_2.h5 51/51 [==============================] - 53s 1s/step - loss: 0.2065 - accuracy: 0.9271 - val_loss: 0.6324 - val_accuracy: 0.7552 Epoch 13/25 51/51 [==============================] - ETA: 0s - loss: 0.1495 - accuracy: 0.9442 Epoch 13: val_accuracy improved from 0.75521 to 0.88542, saving model to alex_2.h5 51/51 [==============================] - 55s 1s/step - loss: 0.1495 - accuracy: 0.9442 - val_loss: 0.3196 - val_accuracy: 0.8854 Epoch 14/25 51/51 [==============================] - ETA: 0s - loss: 0.1121 - accuracy: 0.9620 Epoch 14: val_accuracy improved from 0.88542 to 0.93750, saving model to alex_2.h5 51/51 [==============================] - 52s 1s/step - loss: 0.1121 - accuracy: 0.9620 - val_loss: 0.1828 - val_accuracy: 0.9375 Epoch 15/25 51/51 [==============================] - ETA: 0s - loss: 0.1123 - accuracy: 0.9626 Epoch 15: val_accuracy did not improve from 0.93750 51/51 [==============================] - 55s 1s/step - loss: 0.1123 - accuracy: 0.9626 - val_loss: 0.2040 - val_accuracy: 0.9271 Epoch 16/25 51/51 [==============================] - ETA: 0s - loss: 0.1076 - accuracy: 0.9614 Epoch 16: val_accuracy improved from 0.93750 to 0.94271, saving model to alex_2.h5 51/51 [==============================] - 53s 1s/step - loss: 0.1076 - accuracy: 0.9614 - val_loss: 0.1781 - val_accuracy: 0.9427 Epoch 17/25 51/51 [==============================] - ETA: 0s - loss: 0.1243 - accuracy: 0.9571 Epoch 17: val_accuracy did not improve from 0.94271 51/51 [==============================] - 50s 988ms/step - loss: 0.1243 - accuracy: 0.9571 - val_loss: 0.2918 - val_accuracy: 0.8984 Epoch 18/25 51/51 [==============================] - ETA: 0s - loss: 0.0914 - accuracy: 0.9706 Epoch 18: val_accuracy did not improve from 0.94271 51/51 [==============================] - 952s 19s/step - loss: 0.0914 - accuracy: 0.9706 - val_loss: 0.2769 - val_accuracy: 0.9036 Epoch 19/25 51/51 [==============================] - ETA: 0s - loss: 0.0683 - accuracy: 0.9761 Epoch 19: val_accuracy did not improve from 0.94271 51/51 [==============================] - 121s 2s/step - loss: 0.0683 - accuracy: 0.9761 - val_loss: 0.2512 - val_accuracy: 0.9036 Epoch 20/25 51/51 [==============================] - ETA: 0s - loss: 0.0546 - accuracy: 0.9841 Epoch 20: val_accuracy improved from 0.94271 to 0.96354, saving model to alex_2.h5 51/51 [==============================] - 167s 3s/step - loss: 0.0546 - accuracy: 0.9841 - val_loss: 0.1222 - val_accuracy: 0.9635 Epoch 21/25 51/51 [==============================] - ETA: 0s - loss: 0.0561 - accuracy: 0.9786 Epoch 21: val_accuracy did not improve from 0.96354 51/51 [==============================] - 212s 4s/step - loss: 0.0561 - accuracy: 0.9786 - val_loss: 0.1749 - val_accuracy: 0.9349 Epoch 22/25 51/51 [==============================] - ETA: 0s - loss: 0.0399 - accuracy: 0.9902 Epoch 22: val_accuracy did not improve from 0.96354 51/51 [==============================] - 379s 7s/step - loss: 0.0399 - accuracy: 0.9902 - val_loss: 0.3205 - val_accuracy: 0.8958 Epoch 23/25 51/51 [==============================] - ETA: 0s - loss: 0.0587 - accuracy: 0.9804 Epoch 23: val_accuracy did not improve from 0.96354 51/51 [==============================] - 332s 7s/step - loss: 0.0587 - accuracy: 0.9804 - val_loss: 0.2606 - val_accuracy: 0.9036 Epoch 24/25 51/51 [==============================] - ETA: 0s - loss: 0.0629 - accuracy: 0.9804 Epoch 24: val_accuracy did not improve from 0.96354 51/51 [==============================] - 279s 6s/step - loss: 0.0629 - accuracy: 0.9804 - val_loss: 0.1527 - val_accuracy: 0.9531 Epoch 25/25 51/51 [==============================] - ETA: 0s - loss: 0.0471 - accuracy: 0.9853 Epoch 25: val_accuracy did not improve from 0.96354 51/51 [==============================] - 330s 7s/step - loss: 0.0471 - accuracy: 0.9853 - val_loss: 0.2199 - val_accuracy: 0.9297
8/8 [==============================] - 50s 7s/step - loss: 0.3729 - accuracy: 0.8828