Symulowanie-wizualne/sw_lab8.ipynb
2023-01-07 00:39:16 +01:00

644 KiB

Zadanie 8 - Alexnet + Dropout & BatchRegularization

Aleksandra Jonas, Aleksandra Gronowska, Iwona Christop

Przygotowanie danych

from IPython.display import Image, display
import sys
import subprocess
import pkg_resources
import numpy as np

required = { 'scikit-image'}
installed = {pkg.key for pkg in pkg_resources.working_set}
missing = required - installed
# Alexnet requires images to be of dim = (227, 227, 3)
newSize = (227,227)

if missing: 
    python = sys.executable
    subprocess.check_call([python, '-m', 'pip', 'install', *missing], stdout=subprocess.DEVNULL)

def load_train_data(input_dir):
    import numpy as np
    import pandas as pd
    import os
    from skimage.io import imread
    import cv2 as cv
    from pathlib import Path
    import random
    from shutil import copyfile, rmtree
    import json

    import seaborn as sns
    import matplotlib.pyplot as plt

    import matplotlib
    
    image_dir = Path(input_dir)
    categories_name = []
    for file in os.listdir(image_dir):
        d = os.path.join(image_dir, file)
        if os.path.isdir(d):
            categories_name.append(file)

    folders = [directory for directory in image_dir.iterdir() if directory.is_dir()]

    train_img = []
    categories_count=[]
    labels=[]
    for i, direc in enumerate(folders):
        count = 0
        for obj in direc.iterdir():
            if os.path.isfile(obj) and os.path.basename(os.path.normpath(obj)) != 'desktop.ini':
                labels.append(os.path.basename(os.path.normpath(direc)))
                count += 1
                img = imread(obj)#zwraca ndarry postaci xSize x ySize x colorDepth
                img = img[:, :, :3]
                img = cv.resize(img, newSize, interpolation=cv.INTER_AREA)# zwraca ndarray
                img = img / 255 #normalizacja
                train_img.append(img)
        categories_count.append(count)
    X={}
    X["values"] = np.array(train_img)
    X["categories_name"] = categories_name
    X["categories_count"] = categories_count
    X["labels"]=labels
    return X

def load_test_data(input_dir):
    import numpy as np
    import pandas as pd
    import os
    from skimage.io import imread
    import cv2 as cv
    from pathlib import Path
    import random
    from shutil import copyfile, rmtree
    import json

    import seaborn as sns
    import matplotlib.pyplot as plt

    import matplotlib

    image_path = Path(input_dir)

    labels_path = image_path.parents[0] / 'test_labels.json'

    jsonString = labels_path.read_text()
    objects = json.loads(jsonString)

    categories_name = []
    categories_count=[]
    count = 0
    c = objects[0]['value']
    for e in  objects:
        if e['value'] != c:
            categories_count.append(count)
            c = e['value']
            count = 1
        else:
            count += 1
        if not e['value'] in categories_name:
            categories_name.append(e['value'])

    categories_count.append(count)
    
    test_img = []

    labels=[]
    for e in objects:
        p = image_path / e['filename']
        img = imread(p)#zwraca ndarry postaci xSize x ySize x colorDepth
        img = img[:, :, :3]
        img = cv.resize(img, newSize, interpolation=cv.INTER_AREA)# zwraca ndarray
        img = img / 255#normalizacja
        test_img.append(img)
        labels.append(e['value'])

    X={}
    X["values"] = np.array(test_img)
    X["categories_name"] = categories_name
    X["categories_count"] = categories_count
    X["labels"]=labels
    return X
# Data load
data_train = load_train_data("./train_test_sw/train_sw")
values_train = data_train['values']
labels_train = data_train['labels']

data_test = load_test_data("./train_test_sw/test_sw")
X_test = data_test['values']
y_test = data_test['labels']
from sklearn.model_selection import train_test_split
X_train, X_validate, y_train, y_validate = train_test_split(values_train, labels_train, test_size=0.2, random_state=42)
from sklearn.preprocessing import LabelEncoder
class_le = LabelEncoder()
y_train_enc = class_le.fit_transform(y_train)
y_validate_enc = class_le.fit_transform(y_validate)
y_test_enc = class_le.fit_transform(y_test)
class_le = LabelEncoder()
y_train_enc = class_le.fit_transform(y_train)
y_validate_enc = class_le.fit_transform(y_validate)
y_test_enc = class_le.fit_transform(y_test)
import tensorflow as tf
train_ds = tf.data.Dataset.from_tensor_slices((X_train, y_train_enc))
validation_ds = tf.data.Dataset.from_tensor_slices((X_validate, y_validate_enc))
test_ds = tf.data.Dataset.from_tensor_slices((X_test, y_test_enc))
train_ds_size = tf.data.experimental.cardinality(train_ds).numpy()
test_ds_size = tf.data.experimental.cardinality(test_ds).numpy()
validation_ds_size = tf.data.experimental.cardinality(validation_ds).numpy()
print("Training data size:", train_ds_size)
print("Test data size:", test_ds_size)
print("Validation data size:", validation_ds_size)
Training data size: 820
Test data size: 259
Validation data size: 206
train_ds = (train_ds
                  .shuffle(buffer_size=train_ds_size)
                  .batch(batch_size=32, drop_remainder=True))
test_ds = (test_ds
                  .shuffle(buffer_size=train_ds_size)
                  .batch(batch_size=32, drop_remainder=True))
validation_ds = (validation_ds
                  .shuffle(buffer_size=train_ds_size)
                  .batch(batch_size=32, drop_remainder=True))
from tensorflow import keras

Dropout

Do warstw spłaszczonych

model_flat_drop = keras.models.Sequential([
    keras.layers.Conv2D(filters=96, kernel_size=(11,11), strides=(4,4), activation='relu', input_shape=(227,227,3)),
    keras.layers.MaxPool2D(pool_size=(3,3), strides=(2,2)),
    keras.layers.Conv2D(filters=256, kernel_size=(5,5), strides=(1,1), activation='relu', padding="same"),
    keras.layers.MaxPool2D(pool_size=(3,3), strides=(2,2)),
    keras.layers.Conv2D(filters=384, kernel_size=(3,3), strides=(1,1), activation='relu', padding="same"),
    keras.layers.Conv2D(filters=384, kernel_size=(3,3), strides=(1,1), activation='relu', padding="same"),
    keras.layers.Conv2D(filters=256, kernel_size=(3,3), strides=(1,1), activation='relu', padding="same"),
    keras.layers.MaxPool2D(pool_size=(3,3), strides=(2,2)),
    keras.layers.Flatten(),
    keras.layers.Dense(4096, activation='relu'),
    keras.layers.Dropout(.5),
    keras.layers.Dense(4096, activation='relu'),
    keras.layers.Dropout(.5),
    keras.layers.Dense(10, activation='softmax')
])
model_flat_drop.compile(loss='sparse_categorical_crossentropy', optimizer=tf.optimizers.SGD(lr=.001), metrics=['accuracy'])
model_flat_drop.summary()
WARNING:absl:`lr` is deprecated, please use `learning_rate` instead, or use the legacy optimizer, e.g.,tf.keras.optimizers.legacy.SGD.
Model: "sequential"
_________________________________________________________________
 Layer (type)                Output Shape              Param #   
=================================================================
 conv2d (Conv2D)             (None, 55, 55, 96)        34944     
                                                                 
 max_pooling2d (MaxPooling2D  (None, 27, 27, 96)       0         
 )                                                               
                                                                 
 conv2d_1 (Conv2D)           (None, 27, 27, 256)       614656    
                                                                 
 max_pooling2d_1 (MaxPooling  (None, 13, 13, 256)      0         
 2D)                                                             
                                                                 
 conv2d_2 (Conv2D)           (None, 13, 13, 384)       885120    
                                                                 
 conv2d_3 (Conv2D)           (None, 13, 13, 384)       1327488   
                                                                 
 conv2d_4 (Conv2D)           (None, 13, 13, 256)       884992    
                                                                 
 max_pooling2d_2 (MaxPooling  (None, 6, 6, 256)        0         
 2D)                                                             
                                                                 
 flatten (Flatten)           (None, 9216)              0         
                                                                 
 dense (Dense)               (None, 4096)              37752832  
                                                                 
 dropout (Dropout)           (None, 4096)              0         
                                                                 
 dense_1 (Dense)             (None, 4096)              16781312  
                                                                 
 dropout_1 (Dropout)         (None, 4096)              0         
                                                                 
 dense_2 (Dense)             (None, 10)                40970     
                                                                 
=================================================================
Total params: 58,322,314
Trainable params: 58,322,314
Non-trainable params: 0
_________________________________________________________________
from keras.callbacks import ModelCheckpoint, EarlyStopping

checkpoint = ModelCheckpoint("alex_1.h5", monitor='val_accuracy', verbose=1, save_best_only=True, save_weights_only=False, mode='auto', period=1)
early = EarlyStopping(monitor='val_accuracy', min_delta=0, patience=20, verbose=1, mode='auto')

alex1 = model_flat_drop.fit_generator(
    steps_per_epoch=len(train_ds), 
    generator=train_ds, 
    validation_data= validation_ds, 
    validation_steps=len(validation_ds), 
    epochs=25, 
    callbacks=[checkpoint,early])
WARNING:tensorflow:`period` argument is deprecated. Please use `save_freq` to specify the frequency in number of batches seen.
WARNING:tensorflow:`period` argument is deprecated. Please use `save_freq` to specify the frequency in number of batches seen.
/var/folders/6b/j4d60ym516x2s6wymzj707rh0000gn/T/ipykernel_13671/1946638494.py:6: UserWarning: `Model.fit_generator` is deprecated and will be removed in a future version. Please use `Model.fit`, which supports generators.
  alex1 = model_flat_drop.fit_generator(
Epoch 1/25
2023-01-06 21:33:12.260921: W tensorflow/tsl/platform/profile_utils/cpu_utils.cc:128] Failed to get CPU frequency: 0 Hz
25/25 [==============================] - ETA: 0s - loss: 2.2671 - accuracy: 0.1963
Epoch 1: val_accuracy improved from -inf to 0.20312, saving model to alex_1.h5
25/25 [==============================] - 24s 939ms/step - loss: 2.2671 - accuracy: 0.1963 - val_loss: 2.2120 - val_accuracy: 0.2031
Epoch 2/25
25/25 [==============================] - ETA: 0s - loss: 2.0757 - accuracy: 0.1875
Epoch 2: val_accuracy improved from 0.20312 to 0.28125, saving model to alex_1.h5
25/25 [==============================] - 22s 899ms/step - loss: 2.0757 - accuracy: 0.1875 - val_loss: 1.7334 - val_accuracy: 0.2812
Epoch 3/25
25/25 [==============================] - ETA: 0s - loss: 1.7064 - accuracy: 0.2100
Epoch 3: val_accuracy did not improve from 0.28125
25/25 [==============================] - 23s 940ms/step - loss: 1.7064 - accuracy: 0.2100 - val_loss: 1.6128 - val_accuracy: 0.2656
Epoch 4/25
25/25 [==============================] - ETA: 0s - loss: 1.6449 - accuracy: 0.2537
Epoch 4: val_accuracy improved from 0.28125 to 0.34896, saving model to alex_1.h5
25/25 [==============================] - 23s 918ms/step - loss: 1.6449 - accuracy: 0.2537 - val_loss: 1.5930 - val_accuracy: 0.3490
Epoch 5/25
25/25 [==============================] - ETA: 0s - loss: 1.6596 - accuracy: 0.2275
Epoch 5: val_accuracy did not improve from 0.34896
25/25 [==============================] - 23s 928ms/step - loss: 1.6596 - accuracy: 0.2275 - val_loss: 1.5650 - val_accuracy: 0.2865
Epoch 6/25
25/25 [==============================] - ETA: 0s - loss: 1.6292 - accuracy: 0.2625
Epoch 6: val_accuracy did not improve from 0.34896
25/25 [==============================] - 23s 935ms/step - loss: 1.6292 - accuracy: 0.2625 - val_loss: 1.5573 - val_accuracy: 0.3021
Epoch 7/25
25/25 [==============================] - ETA: 0s - loss: 1.6197 - accuracy: 0.2562
Epoch 7: val_accuracy did not improve from 0.34896
25/25 [==============================] - 23s 929ms/step - loss: 1.6197 - accuracy: 0.2562 - val_loss: 1.5328 - val_accuracy: 0.3125
Epoch 8/25
25/25 [==============================] - ETA: 0s - loss: 1.5907 - accuracy: 0.2975
Epoch 8: val_accuracy improved from 0.34896 to 0.36458, saving model to alex_1.h5
25/25 [==============================] - 24s 943ms/step - loss: 1.5907 - accuracy: 0.2975 - val_loss: 1.4958 - val_accuracy: 0.3646
Epoch 9/25
25/25 [==============================] - ETA: 0s - loss: 1.5715 - accuracy: 0.2962
Epoch 9: val_accuracy improved from 0.36458 to 0.40104, saving model to alex_1.h5
25/25 [==============================] - 24s 944ms/step - loss: 1.5715 - accuracy: 0.2962 - val_loss: 1.4821 - val_accuracy: 0.4010
Epoch 10/25
25/25 [==============================] - ETA: 0s - loss: 1.5357 - accuracy: 0.3162
Epoch 10: val_accuracy did not improve from 0.40104
25/25 [==============================] - 23s 937ms/step - loss: 1.5357 - accuracy: 0.3162 - val_loss: 1.4562 - val_accuracy: 0.3958
Epoch 11/25
25/25 [==============================] - ETA: 0s - loss: 1.5030 - accuracy: 0.3262
Epoch 11: val_accuracy improved from 0.40104 to 0.45833, saving model to alex_1.h5
25/25 [==============================] - 24s 970ms/step - loss: 1.5030 - accuracy: 0.3262 - val_loss: 1.4106 - val_accuracy: 0.4583
Epoch 12/25
25/25 [==============================] - ETA: 0s - loss: 1.4862 - accuracy: 0.3613
Epoch 12: val_accuracy improved from 0.45833 to 0.53125, saving model to alex_1.h5
25/25 [==============================] - 25s 1s/step - loss: 1.4862 - accuracy: 0.3613 - val_loss: 1.3597 - val_accuracy: 0.5312
Epoch 13/25
25/25 [==============================] - ETA: 0s - loss: 1.4194 - accuracy: 0.4162
Epoch 13: val_accuracy did not improve from 0.53125
25/25 [==============================] - 24s 974ms/step - loss: 1.4194 - accuracy: 0.4162 - val_loss: 1.3095 - val_accuracy: 0.4583
Epoch 14/25
25/25 [==============================] - ETA: 0s - loss: 1.3418 - accuracy: 0.4437
Epoch 14: val_accuracy did not improve from 0.53125
25/25 [==============================] - 24s 959ms/step - loss: 1.3418 - accuracy: 0.4437 - val_loss: 1.2787 - val_accuracy: 0.4792
Epoch 15/25
25/25 [==============================] - ETA: 0s - loss: 1.3059 - accuracy: 0.4675
Epoch 15: val_accuracy did not improve from 0.53125
25/25 [==============================] - 24s 951ms/step - loss: 1.3059 - accuracy: 0.4675 - val_loss: 1.2374 - val_accuracy: 0.4635
Epoch 16/25
25/25 [==============================] - ETA: 0s - loss: 1.2688 - accuracy: 0.4725
Epoch 16: val_accuracy did not improve from 0.53125
25/25 [==============================] - 24s 955ms/step - loss: 1.2688 - accuracy: 0.4725 - val_loss: 1.2178 - val_accuracy: 0.4583
Epoch 17/25
25/25 [==============================] - ETA: 0s - loss: 1.2209 - accuracy: 0.4875
Epoch 17: val_accuracy did not improve from 0.53125
25/25 [==============================] - 24s 958ms/step - loss: 1.2209 - accuracy: 0.4875 - val_loss: 1.2793 - val_accuracy: 0.3958
Epoch 18/25
25/25 [==============================] - ETA: 0s - loss: 1.1457 - accuracy: 0.5150
Epoch 18: val_accuracy improved from 0.53125 to 0.55729, saving model to alex_1.h5
25/25 [==============================] - 24s 980ms/step - loss: 1.1457 - accuracy: 0.5150 - val_loss: 1.0978 - val_accuracy: 0.5573
Epoch 19/25
25/25 [==============================] - ETA: 0s - loss: 1.1318 - accuracy: 0.5063
Epoch 19: val_accuracy did not improve from 0.55729
25/25 [==============================] - 27s 1s/step - loss: 1.1318 - accuracy: 0.5063 - val_loss: 1.0764 - val_accuracy: 0.5104
Epoch 20/25
25/25 [==============================] - ETA: 0s - loss: 1.1289 - accuracy: 0.5125
Epoch 20: val_accuracy improved from 0.55729 to 0.56771, saving model to alex_1.h5
25/25 [==============================] - 25s 1s/step - loss: 1.1289 - accuracy: 0.5125 - val_loss: 1.0067 - val_accuracy: 0.5677
Epoch 21/25
25/25 [==============================] - ETA: 0s - loss: 1.0175 - accuracy: 0.5638
Epoch 21: val_accuracy did not improve from 0.56771
25/25 [==============================] - 26s 1s/step - loss: 1.0175 - accuracy: 0.5638 - val_loss: 1.0095 - val_accuracy: 0.5625
Epoch 22/25
25/25 [==============================] - ETA: 0s - loss: 1.0559 - accuracy: 0.5288
Epoch 22: val_accuracy did not improve from 0.56771
25/25 [==============================] - 26s 1s/step - loss: 1.0559 - accuracy: 0.5288 - val_loss: 1.0557 - val_accuracy: 0.5208
Epoch 23/25
25/25 [==============================] - ETA: 0s - loss: 1.1151 - accuracy: 0.5412
Epoch 23: val_accuracy did not improve from 0.56771
25/25 [==============================] - 26s 1s/step - loss: 1.1151 - accuracy: 0.5412 - val_loss: 1.0837 - val_accuracy: 0.5052
Epoch 24/25
25/25 [==============================] - ETA: 0s - loss: 1.0158 - accuracy: 0.5625
Epoch 24: val_accuracy improved from 0.56771 to 0.58333, saving model to alex_1.h5
25/25 [==============================] - 28s 1s/step - loss: 1.0158 - accuracy: 0.5625 - val_loss: 0.9605 - val_accuracy: 0.5833
Epoch 25/25
25/25 [==============================] - ETA: 0s - loss: 0.9619 - accuracy: 0.5750
Epoch 25: val_accuracy did not improve from 0.58333
25/25 [==============================] - 28s 1s/step - loss: 0.9619 - accuracy: 0.5750 - val_loss: 1.4147 - val_accuracy: 0.3906
import matplotlib.pyplot as plt
plt.plot(alex1.history["accuracy"])
plt.plot(alex1.history['val_accuracy'])
plt.plot(alex1.history['loss'])
plt.plot(alex1.history['val_loss'])
plt.title("Model accuracy")
plt.ylabel("Value")
plt.xlabel("Epoch")
plt.legend(["Accuracy","Validation Accuracy","Loss","Validation Loss"])
plt.show()
model_flat_drop.evaluate(test_ds)
8/8 [==============================] - 2s 218ms/step - loss: 1.4086 - accuracy: 0.4141
[1.4086337089538574, 0.4140625]

Do warstw maxpooling

model_pool_drop = keras.models.Sequential([
    keras.layers.Conv2D(filters=96, kernel_size=(11,11), strides=(4,4), activation='relu', input_shape=(227,227,3)),
    keras.layers.MaxPool2D(pool_size=(3,3), strides=(2,2)),
    keras.layers.Dropout(.5),
    keras.layers.Conv2D(filters=256, kernel_size=(5,5), strides=(1,1), activation='relu', padding="same"),
    keras.layers.MaxPool2D(pool_size=(3,3), strides=(2,2)),
    keras.layers.Dropout(.5),
    keras.layers.Conv2D(filters=384, kernel_size=(3,3), strides=(1,1), activation='relu', padding="same"),
    keras.layers.Conv2D(filters=384, kernel_size=(3,3), strides=(1,1), activation='relu', padding="same"),
    keras.layers.Conv2D(filters=256, kernel_size=(3,3), strides=(1,1), activation='relu', padding="same"),
    keras.layers.MaxPool2D(pool_size=(3,3), strides=(2,2)),
    keras.layers.Dropout(.5),
    keras.layers.Flatten(),
    keras.layers.Dense(4096, activation='relu'),
    keras.layers.Dense(4096, activation='relu'),
    keras.layers.Dense(10, activation='softmax')
])
model_pool_drop.compile(loss='sparse_categorical_crossentropy', optimizer=tf.optimizers.SGD(lr=.001), metrics=['accuracy'])
model_pool_drop.summary()
WARNING:absl:`lr` is deprecated, please use `learning_rate` instead, or use the legacy optimizer, e.g.,tf.keras.optimizers.legacy.SGD.
Model: "sequential_1"
_________________________________________________________________
 Layer (type)                Output Shape              Param #   
=================================================================
 conv2d_5 (Conv2D)           (None, 55, 55, 96)        34944     
                                                                 
 max_pooling2d_3 (MaxPooling  (None, 27, 27, 96)       0         
 2D)                                                             
                                                                 
 dropout_2 (Dropout)         (None, 27, 27, 96)        0         
                                                                 
 conv2d_6 (Conv2D)           (None, 27, 27, 256)       614656    
                                                                 
 max_pooling2d_4 (MaxPooling  (None, 13, 13, 256)      0         
 2D)                                                             
                                                                 
 dropout_3 (Dropout)         (None, 13, 13, 256)       0         
                                                                 
 conv2d_7 (Conv2D)           (None, 13, 13, 384)       885120    
                                                                 
 conv2d_8 (Conv2D)           (None, 13, 13, 384)       1327488   
                                                                 
 conv2d_9 (Conv2D)           (None, 13, 13, 256)       884992    
                                                                 
 max_pooling2d_5 (MaxPooling  (None, 6, 6, 256)        0         
 2D)                                                             
                                                                 
 dropout_4 (Dropout)         (None, 6, 6, 256)         0         
                                                                 
 flatten_1 (Flatten)         (None, 9216)              0         
                                                                 
 dense_3 (Dense)             (None, 4096)              37752832  
                                                                 
 dense_4 (Dense)             (None, 4096)              16781312  
                                                                 
 dense_5 (Dense)             (None, 10)                40970     
                                                                 
=================================================================
Total params: 58,322,314
Trainable params: 58,322,314
Non-trainable params: 0
_________________________________________________________________
checkpoint = ModelCheckpoint("alex_2.h5", monitor='val_accuracy', verbose=1, save_best_only=True, save_weights_only=False, mode='auto', period=1)
early = EarlyStopping(monitor='val_accuracy', min_delta=0, patience=20, verbose=1, mode='auto')

alex2 = model_pool_drop.fit_generator(
    steps_per_epoch=len(train_ds), 
    generator=train_ds, 
    validation_data= validation_ds, 
    validation_steps=len(validation_ds), 
    epochs=25, 
    callbacks=[checkpoint,early])
WARNING:tensorflow:`period` argument is deprecated. Please use `save_freq` to specify the frequency in number of batches seen.
WARNING:tensorflow:`period` argument is deprecated. Please use `save_freq` to specify the frequency in number of batches seen.
Epoch 1/25
/var/folders/6b/j4d60ym516x2s6wymzj707rh0000gn/T/ipykernel_13671/3758035572.py:4: UserWarning: `Model.fit_generator` is deprecated and will be removed in a future version. Please use `Model.fit`, which supports generators.
  alex2 = model_pool_drop.fit_generator(
25/25 [==============================] - ETA: 0s - loss: 2.0517 - accuracy: 0.1963
Epoch 1: val_accuracy improved from -inf to 0.26042, saving model to alex_2.h5
25/25 [==============================] - 24s 926ms/step - loss: 2.0517 - accuracy: 0.1963 - val_loss: 1.8585 - val_accuracy: 0.2604
Epoch 2/25
25/25 [==============================] - ETA: 0s - loss: 1.6898 - accuracy: 0.2300
Epoch 2: val_accuracy improved from 0.26042 to 0.30208, saving model to alex_2.h5
25/25 [==============================] - 23s 937ms/step - loss: 1.6898 - accuracy: 0.2300 - val_loss: 1.7242 - val_accuracy: 0.3021
Epoch 3/25
25/25 [==============================] - ETA: 0s - loss: 1.6539 - accuracy: 0.2275
Epoch 3: val_accuracy did not improve from 0.30208
25/25 [==============================] - 23s 942ms/step - loss: 1.6539 - accuracy: 0.2275 - val_loss: 1.7515 - val_accuracy: 0.2552
Epoch 4/25
25/25 [==============================] - ETA: 0s - loss: 1.6148 - accuracy: 0.2775
Epoch 4: val_accuracy did not improve from 0.30208
25/25 [==============================] - 24s 971ms/step - loss: 1.6148 - accuracy: 0.2775 - val_loss: 1.7084 - val_accuracy: 0.2812
Epoch 5/25
25/25 [==============================] - ETA: 0s - loss: 1.5876 - accuracy: 0.3013
Epoch 5: val_accuracy did not improve from 0.30208
25/25 [==============================] - 24s 947ms/step - loss: 1.5876 - accuracy: 0.3013 - val_loss: 1.6701 - val_accuracy: 0.2344
Epoch 6/25
25/25 [==============================] - ETA: 0s - loss: 1.5765 - accuracy: 0.2962
Epoch 6: val_accuracy improved from 0.30208 to 0.34896, saving model to alex_2.h5
25/25 [==============================] - 22s 894ms/step - loss: 1.5765 - accuracy: 0.2962 - val_loss: 1.6380 - val_accuracy: 0.3490
Epoch 7/25
25/25 [==============================] - ETA: 0s - loss: 1.5710 - accuracy: 0.2825
Epoch 7: val_accuracy improved from 0.34896 to 0.36979, saving model to alex_2.h5
25/25 [==============================] - 22s 865ms/step - loss: 1.5710 - accuracy: 0.2825 - val_loss: 1.6219 - val_accuracy: 0.3698
Epoch 8/25
25/25 [==============================] - ETA: 0s - loss: 1.5406 - accuracy: 0.3275
Epoch 8: val_accuracy did not improve from 0.36979
25/25 [==============================] - 22s 872ms/step - loss: 1.5406 - accuracy: 0.3275 - val_loss: 1.6149 - val_accuracy: 0.3646
Epoch 9/25
25/25 [==============================] - ETA: 0s - loss: 1.4844 - accuracy: 0.3537
Epoch 9: val_accuracy did not improve from 0.36979
25/25 [==============================] - 22s 879ms/step - loss: 1.4844 - accuracy: 0.3537 - val_loss: 1.5673 - val_accuracy: 0.3490
Epoch 10/25
25/25 [==============================] - ETA: 0s - loss: 1.4884 - accuracy: 0.3462
Epoch 10: val_accuracy improved from 0.36979 to 0.41146, saving model to alex_2.h5
25/25 [==============================] - 23s 911ms/step - loss: 1.4884 - accuracy: 0.3462 - val_loss: 1.5698 - val_accuracy: 0.4115
Epoch 11/25
25/25 [==============================] - ETA: 0s - loss: 1.4408 - accuracy: 0.3887
Epoch 11: val_accuracy did not improve from 0.41146
25/25 [==============================] - 22s 897ms/step - loss: 1.4408 - accuracy: 0.3887 - val_loss: 1.5205 - val_accuracy: 0.4115
Epoch 12/25
25/25 [==============================] - ETA: 0s - loss: 1.3852 - accuracy: 0.4250
Epoch 12: val_accuracy did not improve from 0.41146
25/25 [==============================] - 23s 905ms/step - loss: 1.3852 - accuracy: 0.4250 - val_loss: 1.5540 - val_accuracy: 0.3594
Epoch 13/25
25/25 [==============================] - ETA: 0s - loss: 1.3202 - accuracy: 0.4663
Epoch 13: val_accuracy did not improve from 0.41146
25/25 [==============================] - 23s 906ms/step - loss: 1.3202 - accuracy: 0.4663 - val_loss: 1.3669 - val_accuracy: 0.4115
Epoch 14/25
25/25 [==============================] - ETA: 0s - loss: 1.2614 - accuracy: 0.4700
Epoch 14: val_accuracy improved from 0.41146 to 0.44792, saving model to alex_2.h5
25/25 [==============================] - 23s 917ms/step - loss: 1.2614 - accuracy: 0.4700 - val_loss: 1.3723 - val_accuracy: 0.4479
Epoch 15/25
25/25 [==============================] - ETA: 0s - loss: 1.1812 - accuracy: 0.4900
Epoch 15: val_accuracy did not improve from 0.44792
25/25 [==============================] - 23s 931ms/step - loss: 1.1812 - accuracy: 0.4900 - val_loss: 1.4332 - val_accuracy: 0.3854
Epoch 16/25
25/25 [==============================] - ETA: 0s - loss: 1.1327 - accuracy: 0.5113
Epoch 16: val_accuracy did not improve from 0.44792
25/25 [==============================] - 23s 908ms/step - loss: 1.1327 - accuracy: 0.5113 - val_loss: 1.4481 - val_accuracy: 0.3802
Epoch 17/25
25/25 [==============================] - ETA: 0s - loss: 1.0848 - accuracy: 0.5462
Epoch 17: val_accuracy did not improve from 0.44792
25/25 [==============================] - 23s 915ms/step - loss: 1.0848 - accuracy: 0.5462 - val_loss: 1.6393 - val_accuracy: 0.3594
Epoch 18/25
25/25 [==============================] - ETA: 0s - loss: 1.1003 - accuracy: 0.5462
Epoch 18: val_accuracy did not improve from 0.44792
25/25 [==============================] - 23s 915ms/step - loss: 1.1003 - accuracy: 0.5462 - val_loss: 1.9934 - val_accuracy: 0.3333
Epoch 19/25
25/25 [==============================] - ETA: 0s - loss: 1.0956 - accuracy: 0.5437
Epoch 19: val_accuracy improved from 0.44792 to 0.47917, saving model to alex_2.h5
25/25 [==============================] - 24s 951ms/step - loss: 1.0956 - accuracy: 0.5437 - val_loss: 1.1398 - val_accuracy: 0.4792
Epoch 20/25
25/25 [==============================] - ETA: 0s - loss: 1.0014 - accuracy: 0.5688
Epoch 20: val_accuracy did not improve from 0.47917
25/25 [==============================] - 24s 976ms/step - loss: 1.0014 - accuracy: 0.5688 - val_loss: 1.2802 - val_accuracy: 0.4062
Epoch 21/25
25/25 [==============================] - ETA: 0s - loss: 1.1812 - accuracy: 0.5213
Epoch 21: val_accuracy did not improve from 0.47917
25/25 [==============================] - 25s 994ms/step - loss: 1.1812 - accuracy: 0.5213 - val_loss: 1.2117 - val_accuracy: 0.4219
Epoch 22/25
25/25 [==============================] - ETA: 0s - loss: 1.1199 - accuracy: 0.5362
Epoch 22: val_accuracy did not improve from 0.47917
25/25 [==============================] - 25s 1s/step - loss: 1.1199 - accuracy: 0.5362 - val_loss: 1.1858 - val_accuracy: 0.4531
Epoch 23/25
25/25 [==============================] - ETA: 0s - loss: 1.0079 - accuracy: 0.5700
Epoch 23: val_accuracy did not improve from 0.47917
25/25 [==============================] - 25s 1s/step - loss: 1.0079 - accuracy: 0.5700 - val_loss: 1.2529 - val_accuracy: 0.4219
Epoch 24/25
25/25 [==============================] - ETA: 0s - loss: 0.9996 - accuracy: 0.5750
Epoch 24: val_accuracy did not improve from 0.47917
25/25 [==============================] - 25s 1s/step - loss: 0.9996 - accuracy: 0.5750 - val_loss: 1.1984 - val_accuracy: 0.4427
Epoch 25/25
25/25 [==============================] - ETA: 0s - loss: 0.9713 - accuracy: 0.5825
Epoch 25: val_accuracy improved from 0.47917 to 0.51042, saving model to alex_2.h5
25/25 [==============================] - 25s 1s/step - loss: 0.9713 - accuracy: 0.5825 - val_loss: 1.0454 - val_accuracy: 0.5104
plt.plot(alex2.history["accuracy"])
plt.plot(alex2.history['val_accuracy'])
plt.plot(alex2.history['loss'])
plt.plot(alex2.history['val_loss'])
plt.title("Model accuracy")
plt.ylabel("Value")
plt.xlabel("Epoch")
plt.legend(["Accuracy","Validation Accuracy","Loss","Validation Loss"])
plt.show()
model_pool_drop.evaluate(test_ds)
8/8 [==============================] - 2s 265ms/step - loss: 1.0271 - accuracy: 0.5391
[1.0271097421646118, 0.5390625]

Do warstw splotowych

model_conv_drop = keras.models.Sequential([
    keras.layers.Conv2D(filters=96, kernel_size=(11,11), strides=(4,4), activation='relu', input_shape=(227,227,3)),
    keras.layers.Dropout(.5),
    keras.layers.MaxPool2D(pool_size=(3,3), strides=(2,2)),
    keras.layers.Conv2D(filters=256, kernel_size=(5,5), strides=(1,1), activation='relu', padding="same"),
    keras.layers.Dropout(.5),
    keras.layers.MaxPool2D(pool_size=(3,3), strides=(2,2)),
    keras.layers.Conv2D(filters=384, kernel_size=(3,3), strides=(1,1), activation='relu', padding="same"),
    keras.layers.Dropout(.5),
    keras.layers.Conv2D(filters=384, kernel_size=(3,3), strides=(1,1), activation='relu', padding="same"),
    keras.layers.Dropout(.5),
    keras.layers.Conv2D(filters=256, kernel_size=(3,3), strides=(1,1), activation='relu', padding="same"),
    keras.layers.Dropout(.5),
    keras.layers.MaxPool2D(pool_size=(3,3), strides=(2,2)),
    keras.layers.Flatten(),
    keras.layers.Dense(4096, activation='relu'),
    keras.layers.Dense(4096, activation='relu'),
    keras.layers.Dense(10, activation='softmax')
])
model_conv_drop.compile(loss='sparse_categorical_crossentropy', optimizer=tf.optimizers.SGD(lr=.001), metrics=['accuracy'])
model_conv_drop.summary()
WARNING:absl:`lr` is deprecated, please use `learning_rate` instead, or use the legacy optimizer, e.g.,tf.keras.optimizers.legacy.SGD.
Model: "sequential_2"
_________________________________________________________________
 Layer (type)                Output Shape              Param #   
=================================================================
 conv2d_10 (Conv2D)          (None, 55, 55, 96)        34944     
                                                                 
 dropout_5 (Dropout)         (None, 55, 55, 96)        0         
                                                                 
 max_pooling2d_6 (MaxPooling  (None, 27, 27, 96)       0         
 2D)                                                             
                                                                 
 conv2d_11 (Conv2D)          (None, 27, 27, 256)       614656    
                                                                 
 dropout_6 (Dropout)         (None, 27, 27, 256)       0         
                                                                 
 max_pooling2d_7 (MaxPooling  (None, 13, 13, 256)      0         
 2D)                                                             
                                                                 
 conv2d_12 (Conv2D)          (None, 13, 13, 384)       885120    
                                                                 
 dropout_7 (Dropout)         (None, 13, 13, 384)       0         
                                                                 
 conv2d_13 (Conv2D)          (None, 13, 13, 384)       1327488   
                                                                 
 dropout_8 (Dropout)         (None, 13, 13, 384)       0         
                                                                 
 conv2d_14 (Conv2D)          (None, 13, 13, 256)       884992    
                                                                 
 dropout_9 (Dropout)         (None, 13, 13, 256)       0         
                                                                 
 max_pooling2d_8 (MaxPooling  (None, 6, 6, 256)        0         
 2D)                                                             
                                                                 
 flatten_2 (Flatten)         (None, 9216)              0         
                                                                 
 dense_6 (Dense)             (None, 4096)              37752832  
                                                                 
 dense_7 (Dense)             (None, 4096)              16781312  
                                                                 
 dense_8 (Dense)             (None, 10)                40970     
                                                                 
=================================================================
Total params: 58,322,314
Trainable params: 58,322,314
Non-trainable params: 0
_________________________________________________________________
checkpoint = ModelCheckpoint("alex_3.h5", monitor='val_accuracy', verbose=1, save_best_only=True, save_weights_only=False, mode='auto', period=1)
early = EarlyStopping(monitor='val_accuracy', min_delta=0, patience=20, verbose=1, mode='auto')

alex3 = model_conv_drop.fit_generator(
    steps_per_epoch=len(train_ds), 
    generator=train_ds, 
    validation_data= validation_ds, 
    validation_steps=len(validation_ds), 
    epochs=25, 
    callbacks=[checkpoint,early])
WARNING:tensorflow:`period` argument is deprecated. Please use `save_freq` to specify the frequency in number of batches seen.
WARNING:tensorflow:`period` argument is deprecated. Please use `save_freq` to specify the frequency in number of batches seen.
Epoch 1/25
/var/folders/6b/j4d60ym516x2s6wymzj707rh0000gn/T/ipykernel_13671/3866647797.py:4: UserWarning: `Model.fit_generator` is deprecated and will be removed in a future version. Please use `Model.fit`, which supports generators.
  alex3 = model_conv_drop.fit_generator(
25/25 [==============================] - ETA: 0s - loss: 1.8090 - accuracy: 0.2450
Epoch 1: val_accuracy improved from -inf to 0.21354, saving model to alex_3.h5
25/25 [==============================] - 26s 1s/step - loss: 1.8090 - accuracy: 0.2450 - val_loss: 2.1443 - val_accuracy: 0.2135
Epoch 2/25
25/25 [==============================] - ETA: 0s - loss: 1.6573 - accuracy: 0.2738
Epoch 2: val_accuracy improved from 0.21354 to 0.40104, saving model to alex_3.h5
25/25 [==============================] - 25s 1s/step - loss: 1.6573 - accuracy: 0.2738 - val_loss: 2.1381 - val_accuracy: 0.4010
Epoch 3/25
25/25 [==============================] - ETA: 0s - loss: 1.5349 - accuracy: 0.3413
Epoch 3: val_accuracy did not improve from 0.40104
25/25 [==============================] - 25s 1s/step - loss: 1.5349 - accuracy: 0.3413 - val_loss: 2.0752 - val_accuracy: 0.2760
Epoch 4/25
25/25 [==============================] - ETA: 0s - loss: 1.4963 - accuracy: 0.3688
Epoch 4: val_accuracy did not improve from 0.40104
25/25 [==============================] - 25s 1s/step - loss: 1.4963 - accuracy: 0.3688 - val_loss: 2.0778 - val_accuracy: 0.2760
Epoch 5/25
25/25 [==============================] - ETA: 0s - loss: 1.3579 - accuracy: 0.4112
Epoch 5: val_accuracy improved from 0.40104 to 0.48958, saving model to alex_3.h5
25/25 [==============================] - 26s 1s/step - loss: 1.3579 - accuracy: 0.4112 - val_loss: 1.9411 - val_accuracy: 0.4896
Epoch 6/25
25/25 [==============================] - ETA: 0s - loss: 1.2882 - accuracy: 0.4512
Epoch 6: val_accuracy did not improve from 0.48958
25/25 [==============================] - 25s 1s/step - loss: 1.2882 - accuracy: 0.4512 - val_loss: 1.8212 - val_accuracy: 0.4323
Epoch 7/25
25/25 [==============================] - ETA: 0s - loss: 1.1601 - accuracy: 0.5163
Epoch 7: val_accuracy did not improve from 0.48958
25/25 [==============================] - 25s 1s/step - loss: 1.1601 - accuracy: 0.5163 - val_loss: 1.7429 - val_accuracy: 0.3802
Epoch 8/25
25/25 [==============================] - ETA: 0s - loss: 1.2260 - accuracy: 0.4950
Epoch 8: val_accuracy did not improve from 0.48958
25/25 [==============================] - 26s 1s/step - loss: 1.2260 - accuracy: 0.4950 - val_loss: 1.8061 - val_accuracy: 0.3490
Epoch 9/25
25/25 [==============================] - ETA: 0s - loss: 1.1188 - accuracy: 0.5200
Epoch 9: val_accuracy did not improve from 0.48958
25/25 [==============================] - 27s 1s/step - loss: 1.1188 - accuracy: 0.5200 - val_loss: 1.7995 - val_accuracy: 0.3177
Epoch 10/25
25/25 [==============================] - ETA: 0s - loss: 0.9879 - accuracy: 0.5950
Epoch 10: val_accuracy did not improve from 0.48958
25/25 [==============================] - 27s 1s/step - loss: 0.9879 - accuracy: 0.5950 - val_loss: 1.8887 - val_accuracy: 0.1875
Epoch 11/25
25/25 [==============================] - ETA: 0s - loss: 0.9848 - accuracy: 0.5800
Epoch 11: val_accuracy did not improve from 0.48958
25/25 [==============================] - 26s 1s/step - loss: 0.9848 - accuracy: 0.5800 - val_loss: 1.7492 - val_accuracy: 0.3073
Epoch 12/25
25/25 [==============================] - ETA: 0s - loss: 0.9861 - accuracy: 0.6100
Epoch 12: val_accuracy did not improve from 0.48958
25/25 [==============================] - 26s 1s/step - loss: 0.9861 - accuracy: 0.6100 - val_loss: 1.6876 - val_accuracy: 0.3646
Epoch 13/25
25/25 [==============================] - ETA: 0s - loss: 0.9351 - accuracy: 0.6075
Epoch 13: val_accuracy improved from 0.48958 to 0.51562, saving model to alex_3.h5
25/25 [==============================] - 27s 1s/step - loss: 0.9351 - accuracy: 0.6075 - val_loss: 1.5044 - val_accuracy: 0.5156
Epoch 14/25
25/25 [==============================] - ETA: 0s - loss: 0.9683 - accuracy: 0.6125
Epoch 14: val_accuracy did not improve from 0.51562
25/25 [==============================] - 28s 1s/step - loss: 0.9683 - accuracy: 0.6125 - val_loss: 1.5911 - val_accuracy: 0.4375
Epoch 15/25
25/25 [==============================] - ETA: 0s - loss: 0.9354 - accuracy: 0.6037
Epoch 15: val_accuracy did not improve from 0.51562
25/25 [==============================] - 28s 1s/step - loss: 0.9354 - accuracy: 0.6037 - val_loss: 1.6423 - val_accuracy: 0.3698
Epoch 16/25
25/25 [==============================] - ETA: 0s - loss: 0.8270 - accuracy: 0.6800
Epoch 16: val_accuracy did not improve from 0.51562
25/25 [==============================] - 30s 1s/step - loss: 0.8270 - accuracy: 0.6800 - val_loss: 1.6960 - val_accuracy: 0.2708
Epoch 17/25
25/25 [==============================] - ETA: 0s - loss: 0.8327 - accuracy: 0.6488
Epoch 17: val_accuracy did not improve from 0.51562
25/25 [==============================] - 30s 1s/step - loss: 0.8327 - accuracy: 0.6488 - val_loss: 1.6061 - val_accuracy: 0.3646
Epoch 18/25
25/25 [==============================] - ETA: 0s - loss: 0.8175 - accuracy: 0.6625
Epoch 18: val_accuracy did not improve from 0.51562
25/25 [==============================] - 27s 1s/step - loss: 0.8175 - accuracy: 0.6625 - val_loss: 1.5903 - val_accuracy: 0.4531
Epoch 19/25
25/25 [==============================] - ETA: 0s - loss: 0.7260 - accuracy: 0.7063
Epoch 19: val_accuracy did not improve from 0.51562
25/25 [==============================] - 29s 1s/step - loss: 0.7260 - accuracy: 0.7063 - val_loss: 1.4000 - val_accuracy: 0.4896
Epoch 20/25
25/25 [==============================] - ETA: 0s - loss: 0.7956 - accuracy: 0.6587
Epoch 20: val_accuracy did not improve from 0.51562
25/25 [==============================] - 28s 1s/step - loss: 0.7956 - accuracy: 0.6587 - val_loss: 1.6044 - val_accuracy: 0.4010
Epoch 21/25
25/25 [==============================] - ETA: 0s - loss: 0.8474 - accuracy: 0.6625
Epoch 21: val_accuracy did not improve from 0.51562
25/25 [==============================] - 28s 1s/step - loss: 0.8474 - accuracy: 0.6625 - val_loss: 1.5974 - val_accuracy: 0.3490
Epoch 22/25
25/25 [==============================] - ETA: 0s - loss: 0.6524 - accuracy: 0.7175
Epoch 22: val_accuracy did not improve from 0.51562
25/25 [==============================] - 27s 1s/step - loss: 0.6524 - accuracy: 0.7175 - val_loss: 1.5435 - val_accuracy: 0.3594
Epoch 23/25
25/25 [==============================] - ETA: 0s - loss: 0.8152 - accuracy: 0.6725
Epoch 23: val_accuracy did not improve from 0.51562
25/25 [==============================] - 26s 1s/step - loss: 0.8152 - accuracy: 0.6725 - val_loss: 1.8228 - val_accuracy: 0.2656
Epoch 24/25
25/25 [==============================] - ETA: 0s - loss: 0.8200 - accuracy: 0.6725
Epoch 24: val_accuracy did not improve from 0.51562
25/25 [==============================] - 29s 1s/step - loss: 0.8200 - accuracy: 0.6725 - val_loss: 1.5864 - val_accuracy: 0.3854
Epoch 25/25
25/25 [==============================] - ETA: 0s - loss: 0.7701 - accuracy: 0.6825
Epoch 25: val_accuracy did not improve from 0.51562
25/25 [==============================] - 27s 1s/step - loss: 0.7701 - accuracy: 0.6825 - val_loss: 1.4605 - val_accuracy: 0.5104
plt.plot(alex3.history["accuracy"])
plt.plot(alex3.history['val_accuracy'])
plt.plot(alex3.history['loss'])
plt.plot(alex3.history['val_loss'])
plt.title("Model accuracy")
plt.ylabel("Value")
plt.xlabel("Epoch")
plt.legend(["Accuracy","Validation Accuracy","Loss","Validation Loss"])
plt.show()
model_conv_drop.evaluate(test_ds)
8/8 [==============================] - 2s 280ms/step - loss: 1.4843 - accuracy: 0.4570
[1.4843157529830933, 0.45703125]

Do warstw spłaszczonych i maxpooling

model_flat_pool_drop = keras.models.Sequential([
    keras.layers.Conv2D(filters=96, kernel_size=(11,11), strides=(4,4), activation='relu', input_shape=(227,227,3)),
    keras.layers.MaxPool2D(pool_size=(3,3), strides=(2,2)),
    keras.layers.Dropout(.5),
    keras.layers.Conv2D(filters=256, kernel_size=(5,5), strides=(1,1), activation='relu', padding="same"),
    keras.layers.MaxPool2D(pool_size=(3,3), strides=(2,2)),
    keras.layers.Dropout(.5),
    keras.layers.Conv2D(filters=384, kernel_size=(3,3), strides=(1,1), activation='relu', padding="same"),
    keras.layers.Conv2D(filters=384, kernel_size=(3,3), strides=(1,1), activation='relu', padding="same"),
    keras.layers.Conv2D(filters=256, kernel_size=(3,3), strides=(1,1), activation='relu', padding="same"),
    keras.layers.MaxPool2D(pool_size=(3,3), strides=(2,2)),
    keras.layers.Dropout(.5),
    keras.layers.Flatten(),
    keras.layers.Dense(4096, activation='relu'),
    keras.layers.Dropout(.5),
    keras.layers.Dense(4096, activation='relu'),
    keras.layers.Dropout(.5),
    keras.layers.Dense(10, activation='softmax')
])
model_flat_pool_drop.compile(loss='sparse_categorical_crossentropy', optimizer=tf.optimizers.SGD(lr=.001), metrics=['accuracy'])
model_flat_pool_drop.summary()
WARNING:absl:`lr` is deprecated, please use `learning_rate` instead, or use the legacy optimizer, e.g.,tf.keras.optimizers.legacy.SGD.
Model: "sequential_3"
_________________________________________________________________
 Layer (type)                Output Shape              Param #   
=================================================================
 conv2d_15 (Conv2D)          (None, 55, 55, 96)        34944     
                                                                 
 max_pooling2d_9 (MaxPooling  (None, 27, 27, 96)       0         
 2D)                                                             
                                                                 
 dropout_10 (Dropout)        (None, 27, 27, 96)        0         
                                                                 
 conv2d_16 (Conv2D)          (None, 27, 27, 256)       614656    
                                                                 
 max_pooling2d_10 (MaxPoolin  (None, 13, 13, 256)      0         
 g2D)                                                            
                                                                 
 dropout_11 (Dropout)        (None, 13, 13, 256)       0         
                                                                 
 conv2d_17 (Conv2D)          (None, 13, 13, 384)       885120    
                                                                 
 conv2d_18 (Conv2D)          (None, 13, 13, 384)       1327488   
                                                                 
 conv2d_19 (Conv2D)          (None, 13, 13, 256)       884992    
                                                                 
 max_pooling2d_11 (MaxPoolin  (None, 6, 6, 256)        0         
 g2D)                                                            
                                                                 
 dropout_12 (Dropout)        (None, 6, 6, 256)         0         
                                                                 
 flatten_3 (Flatten)         (None, 9216)              0         
                                                                 
 dense_9 (Dense)             (None, 4096)              37752832  
                                                                 
 dropout_13 (Dropout)        (None, 4096)              0         
                                                                 
 dense_10 (Dense)            (None, 4096)              16781312  
                                                                 
 dropout_14 (Dropout)        (None, 4096)              0         
                                                                 
 dense_11 (Dense)            (None, 10)                40970     
                                                                 
=================================================================
Total params: 58,322,314
Trainable params: 58,322,314
Non-trainable params: 0
_________________________________________________________________
checkpoint = ModelCheckpoint("alex_4.h5", monitor='val_accuracy', verbose=1, save_best_only=True, save_weights_only=False, mode='auto', period=1)
early = EarlyStopping(monitor='val_accuracy', min_delta=0, patience=20, verbose=1, mode='auto')

alex4 = model_flat_pool_drop.fit_generator(
    steps_per_epoch=len(train_ds), 
    generator=train_ds, 
    validation_data= validation_ds, 
    validation_steps=len(validation_ds), 
    epochs=25, 
    callbacks=[checkpoint,early])
WARNING:tensorflow:`period` argument is deprecated. Please use `save_freq` to specify the frequency in number of batches seen.
WARNING:tensorflow:`period` argument is deprecated. Please use `save_freq` to specify the frequency in number of batches seen.
Epoch 1/25
/var/folders/6b/j4d60ym516x2s6wymzj707rh0000gn/T/ipykernel_13671/2334869237.py:4: UserWarning: `Model.fit_generator` is deprecated and will be removed in a future version. Please use `Model.fit`, which supports generators.
  alex4 = model_flat_pool_drop.fit_generator(
25/25 [==============================] - ETA: 0s - loss: 2.1044 - accuracy: 0.1750
Epoch 1: val_accuracy improved from -inf to 0.25000, saving model to alex_4.h5
25/25 [==============================] - 27s 1s/step - loss: 2.1044 - accuracy: 0.1750 - val_loss: 1.9644 - val_accuracy: 0.2500
Epoch 2/25
25/25 [==============================] - ETA: 0s - loss: 1.7691 - accuracy: 0.1875
Epoch 2: val_accuracy did not improve from 0.25000
25/25 [==============================] - 26s 1s/step - loss: 1.7691 - accuracy: 0.1875 - val_loss: 1.8190 - val_accuracy: 0.1979
Epoch 3/25
25/25 [==============================] - ETA: 0s - loss: 1.7062 - accuracy: 0.2113
Epoch 3: val_accuracy did not improve from 0.25000
25/25 [==============================] - 27s 1s/step - loss: 1.7062 - accuracy: 0.2113 - val_loss: 1.8115 - val_accuracy: 0.2083
Epoch 4/25
25/25 [==============================] - ETA: 0s - loss: 1.6706 - accuracy: 0.2362
Epoch 4: val_accuracy improved from 0.25000 to 0.30208, saving model to alex_4.h5
25/25 [==============================] - 26s 1s/step - loss: 1.6706 - accuracy: 0.2362 - val_loss: 1.7808 - val_accuracy: 0.3021
Epoch 5/25
25/25 [==============================] - ETA: 0s - loss: 1.6715 - accuracy: 0.2113
Epoch 5: val_accuracy improved from 0.30208 to 0.30729, saving model to alex_4.h5
25/25 [==============================] - 28s 1s/step - loss: 1.6715 - accuracy: 0.2113 - val_loss: 1.7774 - val_accuracy: 0.3073
Epoch 6/25
25/25 [==============================] - ETA: 0s - loss: 1.6512 - accuracy: 0.2425
Epoch 6: val_accuracy improved from 0.30729 to 0.32812, saving model to alex_4.h5
25/25 [==============================] - 27s 1s/step - loss: 1.6512 - accuracy: 0.2425 - val_loss: 1.7714 - val_accuracy: 0.3281
Epoch 7/25
25/25 [==============================] - ETA: 0s - loss: 1.6418 - accuracy: 0.2475
Epoch 7: val_accuracy did not improve from 0.32812
25/25 [==============================] - 27s 1s/step - loss: 1.6418 - accuracy: 0.2475 - val_loss: 1.7421 - val_accuracy: 0.2969
Epoch 8/25
25/25 [==============================] - ETA: 0s - loss: 1.5988 - accuracy: 0.2488
Epoch 8: val_accuracy did not improve from 0.32812
25/25 [==============================] - 27s 1s/step - loss: 1.5988 - accuracy: 0.2488 - val_loss: 1.7183 - val_accuracy: 0.3177
Epoch 9/25
25/25 [==============================] - ETA: 0s - loss: 1.5946 - accuracy: 0.2800
Epoch 9: val_accuracy improved from 0.32812 to 0.34896, saving model to alex_4.h5
25/25 [==============================] - 27s 1s/step - loss: 1.5946 - accuracy: 0.2800 - val_loss: 1.6653 - val_accuracy: 0.3490
Epoch 10/25
25/25 [==============================] - ETA: 0s - loss: 1.5646 - accuracy: 0.2875
Epoch 10: val_accuracy did not improve from 0.34896
25/25 [==============================] - 28s 1s/step - loss: 1.5646 - accuracy: 0.2875 - val_loss: 1.6476 - val_accuracy: 0.3490
Epoch 11/25
25/25 [==============================] - ETA: 0s - loss: 1.5359 - accuracy: 0.3200
Epoch 11: val_accuracy improved from 0.34896 to 0.45312, saving model to alex_4.h5
25/25 [==============================] - 28s 1s/step - loss: 1.5359 - accuracy: 0.3200 - val_loss: 1.5768 - val_accuracy: 0.4531
Epoch 12/25
25/25 [==============================] - ETA: 0s - loss: 1.4968 - accuracy: 0.3200
Epoch 12: val_accuracy did not improve from 0.45312
25/25 [==============================] - 27s 1s/step - loss: 1.4968 - accuracy: 0.3200 - val_loss: 1.5472 - val_accuracy: 0.3594
Epoch 13/25
25/25 [==============================] - ETA: 0s - loss: 1.4612 - accuracy: 0.3975
Epoch 13: val_accuracy did not improve from 0.45312
25/25 [==============================] - 27s 1s/step - loss: 1.4612 - accuracy: 0.3975 - val_loss: 1.4494 - val_accuracy: 0.4427
Epoch 14/25
25/25 [==============================] - ETA: 0s - loss: 1.3955 - accuracy: 0.4038
Epoch 14: val_accuracy did not improve from 0.45312
25/25 [==============================] - 27s 1s/step - loss: 1.3955 - accuracy: 0.4038 - val_loss: 1.4523 - val_accuracy: 0.3542
Epoch 15/25
25/25 [==============================] - ETA: 0s - loss: 1.3153 - accuracy: 0.4525
Epoch 15: val_accuracy did not improve from 0.45312
25/25 [==============================] - 28s 1s/step - loss: 1.3153 - accuracy: 0.4525 - val_loss: 1.3144 - val_accuracy: 0.4062
Epoch 16/25
25/25 [==============================] - ETA: 0s - loss: 1.2655 - accuracy: 0.4638
Epoch 16: val_accuracy did not improve from 0.45312
25/25 [==============================] - 26s 1s/step - loss: 1.2655 - accuracy: 0.4638 - val_loss: 1.2121 - val_accuracy: 0.4479
Epoch 17/25
25/25 [==============================] - ETA: 0s - loss: 1.1774 - accuracy: 0.4900
Epoch 17: val_accuracy improved from 0.45312 to 0.47917, saving model to alex_4.h5
25/25 [==============================] - 26s 1s/step - loss: 1.1774 - accuracy: 0.4900 - val_loss: 1.1340 - val_accuracy: 0.4792
Epoch 18/25
25/25 [==============================] - ETA: 0s - loss: 1.1709 - accuracy: 0.4875
Epoch 18: val_accuracy did not improve from 0.47917
25/25 [==============================] - 26s 1s/step - loss: 1.1709 - accuracy: 0.4875 - val_loss: 1.1360 - val_accuracy: 0.4635
Epoch 19/25
25/25 [==============================] - ETA: 0s - loss: 1.1127 - accuracy: 0.5125
Epoch 19: val_accuracy improved from 0.47917 to 0.48958, saving model to alex_4.h5
25/25 [==============================] - 26s 1s/step - loss: 1.1127 - accuracy: 0.5125 - val_loss: 1.1156 - val_accuracy: 0.4896
Epoch 20/25
25/25 [==============================] - ETA: 0s - loss: 1.0822 - accuracy: 0.5263
Epoch 20: val_accuracy improved from 0.48958 to 0.54167, saving model to alex_4.h5
25/25 [==============================] - 26s 1s/step - loss: 1.0822 - accuracy: 0.5263 - val_loss: 0.9865 - val_accuracy: 0.5417
Epoch 21/25
25/25 [==============================] - ETA: 0s - loss: 1.1573 - accuracy: 0.5063
Epoch 21: val_accuracy did not improve from 0.54167
25/25 [==============================] - 26s 1s/step - loss: 1.1573 - accuracy: 0.5063 - val_loss: 1.5426 - val_accuracy: 0.3490
Epoch 22/25
25/25 [==============================] - ETA: 0s - loss: 1.0643 - accuracy: 0.5400
Epoch 22: val_accuracy did not improve from 0.54167
25/25 [==============================] - 26s 1s/step - loss: 1.0643 - accuracy: 0.5400 - val_loss: 1.1197 - val_accuracy: 0.4896
Epoch 23/25
25/25 [==============================] - ETA: 0s - loss: 1.0817 - accuracy: 0.5512
Epoch 23: val_accuracy improved from 0.54167 to 0.56771, saving model to alex_4.h5
25/25 [==============================] - 28s 1s/step - loss: 1.0817 - accuracy: 0.5512 - val_loss: 1.0690 - val_accuracy: 0.5677
Epoch 24/25
25/25 [==============================] - ETA: 0s - loss: 1.0167 - accuracy: 0.5600
Epoch 24: val_accuracy did not improve from 0.56771
25/25 [==============================] - 28s 1s/step - loss: 1.0167 - accuracy: 0.5600 - val_loss: 1.0323 - val_accuracy: 0.5208
Epoch 25/25
25/25 [==============================] - ETA: 0s - loss: 1.1168 - accuracy: 0.5537
Epoch 25: val_accuracy did not improve from 0.56771
25/25 [==============================] - 29s 1s/step - loss: 1.1168 - accuracy: 0.5537 - val_loss: 1.1679 - val_accuracy: 0.4948
plt.plot(alex4.history["accuracy"])
plt.plot(alex4.history['val_accuracy'])
plt.plot(alex4.history['loss'])
plt.plot(alex4.history['val_loss'])
plt.title("Model accuracy")
plt.ylabel("Value")
plt.xlabel("Epoch")
plt.legend(["Accuracy","Validation Accuracy","Loss","Validation Loss"])
plt.show()
model_flat_pool_drop.evaluate(test_ds)
8/8 [==============================] - 3s 321ms/step - loss: 1.2209 - accuracy: 0.5000
[1.220850944519043, 0.5]

Do warstw spłaszczonych i splotowych

model_flat_conv_drop = keras.models.Sequential([
    keras.layers.Conv2D(filters=96, kernel_size=(11,11), strides=(4,4), activation='relu', input_shape=(227,227,3)),
    keras.layers.Dropout(.5),
    keras.layers.MaxPool2D(pool_size=(3,3), strides=(2,2)),
    keras.layers.Conv2D(filters=256, kernel_size=(5,5), strides=(1,1), activation='relu', padding="same"),
    keras.layers.Dropout(.5),
    keras.layers.MaxPool2D(pool_size=(3,3), strides=(2,2)),
    keras.layers.Conv2D(filters=384, kernel_size=(3,3), strides=(1,1), activation='relu', padding="same"),
    keras.layers.Dropout(.5),
    keras.layers.Conv2D(filters=384, kernel_size=(3,3), strides=(1,1), activation='relu', padding="same"),
    keras.layers.Dropout(.5),
    keras.layers.Conv2D(filters=256, kernel_size=(3,3), strides=(1,1), activation='relu', padding="same"),
    keras.layers.Dropout(.5),
    keras.layers.MaxPool2D(pool_size=(3,3), strides=(2,2)),
    keras.layers.Flatten(),
    keras.layers.Dense(4096, activation='relu'),
    keras.layers.Dropout(.5),
    keras.layers.Dense(4096, activation='relu'),
    keras.layers.Dropout(.5),
    keras.layers.Dense(10, activation='softmax')
])
model_flat_conv_drop.compile(loss='sparse_categorical_crossentropy', optimizer=tf.optimizers.SGD(lr=.001), metrics=['accuracy'])
model_flat_conv_drop.summary()
WARNING:absl:`lr` is deprecated, please use `learning_rate` instead, or use the legacy optimizer, e.g.,tf.keras.optimizers.legacy.SGD.
Model: "sequential_4"
_________________________________________________________________
 Layer (type)                Output Shape              Param #   
=================================================================
 conv2d_20 (Conv2D)          (None, 55, 55, 96)        34944     
                                                                 
 dropout_15 (Dropout)        (None, 55, 55, 96)        0         
                                                                 
 max_pooling2d_12 (MaxPoolin  (None, 27, 27, 96)       0         
 g2D)                                                            
                                                                 
 conv2d_21 (Conv2D)          (None, 27, 27, 256)       614656    
                                                                 
 dropout_16 (Dropout)        (None, 27, 27, 256)       0         
                                                                 
 max_pooling2d_13 (MaxPoolin  (None, 13, 13, 256)      0         
 g2D)                                                            
                                                                 
 conv2d_22 (Conv2D)          (None, 13, 13, 384)       885120    
                                                                 
 dropout_17 (Dropout)        (None, 13, 13, 384)       0         
                                                                 
 conv2d_23 (Conv2D)          (None, 13, 13, 384)       1327488   
                                                                 
 dropout_18 (Dropout)        (None, 13, 13, 384)       0         
                                                                 
 conv2d_24 (Conv2D)          (None, 13, 13, 256)       884992    
                                                                 
 dropout_19 (Dropout)        (None, 13, 13, 256)       0         
                                                                 
 max_pooling2d_14 (MaxPoolin  (None, 6, 6, 256)        0         
 g2D)                                                            
                                                                 
 flatten_4 (Flatten)         (None, 9216)              0         
                                                                 
 dense_12 (Dense)            (None, 4096)              37752832  
                                                                 
 dropout_20 (Dropout)        (None, 4096)              0         
                                                                 
 dense_13 (Dense)            (None, 4096)              16781312  
                                                                 
 dropout_21 (Dropout)        (None, 4096)              0         
                                                                 
 dense_14 (Dense)            (None, 10)                40970     
                                                                 
=================================================================
Total params: 58,322,314
Trainable params: 58,322,314
Non-trainable params: 0
_________________________________________________________________
checkpoint = ModelCheckpoint("alex_5.h5", monitor='val_accuracy', verbose=1, save_best_only=True, save_weights_only=False, mode='auto', period=1)
early = EarlyStopping(monitor='val_accuracy', min_delta=0, patience=20, verbose=1, mode='auto')

alex5 = model_flat_conv_drop.fit_generator(
    steps_per_epoch=len(train_ds), 
    generator=train_ds, 
    validation_data= validation_ds, 
    validation_steps=len(validation_ds), 
    epochs=25, 
    callbacks=[checkpoint,early])
WARNING:tensorflow:`period` argument is deprecated. Please use `save_freq` to specify the frequency in number of batches seen.
WARNING:tensorflow:`period` argument is deprecated. Please use `save_freq` to specify the frequency in number of batches seen.
Epoch 1/25
/var/folders/6b/j4d60ym516x2s6wymzj707rh0000gn/T/ipykernel_13671/1544533144.py:4: UserWarning: `Model.fit_generator` is deprecated and will be removed in a future version. Please use `Model.fit`, which supports generators.
  alex5 = model_flat_conv_drop.fit_generator(
25/25 [==============================] - ETA: 0s - loss: 1.8865 - accuracy: 0.2087
Epoch 1: val_accuracy improved from -inf to 0.31771, saving model to alex_5.h5
25/25 [==============================] - 31s 1s/step - loss: 1.8865 - accuracy: 0.2087 - val_loss: 2.1611 - val_accuracy: 0.3177
Epoch 2/25
25/25 [==============================] - ETA: 0s - loss: 1.6987 - accuracy: 0.2250
Epoch 2: val_accuracy did not improve from 0.31771
25/25 [==============================] - 33s 1s/step - loss: 1.6987 - accuracy: 0.2250 - val_loss: 2.1324 - val_accuracy: 0.1823
Epoch 3/25
25/25 [==============================] - ETA: 0s - loss: 1.6349 - accuracy: 0.2675
Epoch 3: val_accuracy did not improve from 0.31771
25/25 [==============================] - 29s 1s/step - loss: 1.6349 - accuracy: 0.2675 - val_loss: 2.0670 - val_accuracy: 0.3125
Epoch 4/25
25/25 [==============================] - ETA: 0s - loss: 1.5613 - accuracy: 0.3212
Epoch 4: val_accuracy improved from 0.31771 to 0.34896, saving model to alex_5.h5
25/25 [==============================] - 29s 1s/step - loss: 1.5613 - accuracy: 0.3212 - val_loss: 2.0176 - val_accuracy: 0.3490
Epoch 5/25
25/25 [==============================] - ETA: 0s - loss: 1.4594 - accuracy: 0.3587
Epoch 5: val_accuracy did not improve from 0.34896
25/25 [==============================] - 30s 1s/step - loss: 1.4594 - accuracy: 0.3587 - val_loss: 1.9236 - val_accuracy: 0.3177
Epoch 6/25
25/25 [==============================] - ETA: 0s - loss: 1.3418 - accuracy: 0.4050
Epoch 6: val_accuracy improved from 0.34896 to 0.38021, saving model to alex_5.h5
25/25 [==============================] - 30s 1s/step - loss: 1.3418 - accuracy: 0.4050 - val_loss: 1.8750 - val_accuracy: 0.3802
Epoch 7/25
25/25 [==============================] - ETA: 0s - loss: 1.3014 - accuracy: 0.4437
Epoch 7: val_accuracy did not improve from 0.38021
25/25 [==============================] - 29s 1s/step - loss: 1.3014 - accuracy: 0.4437 - val_loss: 2.0340 - val_accuracy: 0.1979
Epoch 8/25
25/25 [==============================] - ETA: 0s - loss: 1.2022 - accuracy: 0.4638
Epoch 8: val_accuracy improved from 0.38021 to 0.44271, saving model to alex_5.h5
25/25 [==============================] - 29s 1s/step - loss: 1.2022 - accuracy: 0.4638 - val_loss: 1.7184 - val_accuracy: 0.4427
Epoch 9/25
25/25 [==============================] - ETA: 0s - loss: 1.1867 - accuracy: 0.4712
Epoch 9: val_accuracy did not improve from 0.44271
25/25 [==============================] - 27s 1s/step - loss: 1.1867 - accuracy: 0.4712 - val_loss: 1.8339 - val_accuracy: 0.3385
Epoch 10/25
25/25 [==============================] - ETA: 0s - loss: 1.0586 - accuracy: 0.5225
Epoch 10: val_accuracy improved from 0.44271 to 0.44792, saving model to alex_5.h5
25/25 [==============================] - 30s 1s/step - loss: 1.0586 - accuracy: 0.5225 - val_loss: 1.6957 - val_accuracy: 0.4479
Epoch 11/25
25/25 [==============================] - ETA: 0s - loss: 1.1329 - accuracy: 0.4988
Epoch 11: val_accuracy did not improve from 0.44792
25/25 [==============================] - 31s 1s/step - loss: 1.1329 - accuracy: 0.4988 - val_loss: 1.7963 - val_accuracy: 0.3646
Epoch 12/25
25/25 [==============================] - ETA: 0s - loss: 1.0527 - accuracy: 0.5387
Epoch 12: val_accuracy did not improve from 0.44792
25/25 [==============================] - 33s 1s/step - loss: 1.0527 - accuracy: 0.5387 - val_loss: 1.7027 - val_accuracy: 0.4062
Epoch 13/25
25/25 [==============================] - ETA: 0s - loss: 1.1811 - accuracy: 0.5063
Epoch 13: val_accuracy did not improve from 0.44792
25/25 [==============================] - 30s 1s/step - loss: 1.1811 - accuracy: 0.5063 - val_loss: 1.7790 - val_accuracy: 0.3542
Epoch 14/25
25/25 [==============================] - ETA: 0s - loss: 1.0314 - accuracy: 0.5450
Epoch 14: val_accuracy did not improve from 0.44792
25/25 [==============================] - 28s 1s/step - loss: 1.0314 - accuracy: 0.5450 - val_loss: 1.6602 - val_accuracy: 0.4323
Epoch 15/25
25/25 [==============================] - ETA: 0s - loss: 1.0199 - accuracy: 0.5663
Epoch 15: val_accuracy did not improve from 0.44792
25/25 [==============================] - 28s 1s/step - loss: 1.0199 - accuracy: 0.5663 - val_loss: 1.7097 - val_accuracy: 0.3542
Epoch 16/25
25/25 [==============================] - ETA: 0s - loss: 1.0358 - accuracy: 0.5525
Epoch 16: val_accuracy did not improve from 0.44792
25/25 [==============================] - 28s 1s/step - loss: 1.0358 - accuracy: 0.5525 - val_loss: 1.7355 - val_accuracy: 0.3177
Epoch 17/25
25/25 [==============================] - ETA: 0s - loss: 0.9676 - accuracy: 0.5875
Epoch 17: val_accuracy improved from 0.44792 to 0.54167, saving model to alex_5.h5
25/25 [==============================] - 28s 1s/step - loss: 0.9676 - accuracy: 0.5875 - val_loss: 1.5246 - val_accuracy: 0.5417
Epoch 18/25
25/25 [==============================] - ETA: 0s - loss: 0.9063 - accuracy: 0.5950
Epoch 18: val_accuracy did not improve from 0.54167
25/25 [==============================] - 28s 1s/step - loss: 0.9063 - accuracy: 0.5950 - val_loss: 1.5602 - val_accuracy: 0.4688
Epoch 19/25
25/25 [==============================] - ETA: 0s - loss: 0.9411 - accuracy: 0.6250
Epoch 19: val_accuracy did not improve from 0.54167
25/25 [==============================] - 28s 1s/step - loss: 0.9411 - accuracy: 0.6250 - val_loss: 1.7089 - val_accuracy: 0.2917
Epoch 20/25
25/25 [==============================] - ETA: 0s - loss: 0.8750 - accuracy: 0.6475
Epoch 20: val_accuracy did not improve from 0.54167
25/25 [==============================] - 28s 1s/step - loss: 0.8750 - accuracy: 0.6475 - val_loss: 1.7448 - val_accuracy: 0.2812
Epoch 21/25
25/25 [==============================] - ETA: 0s - loss: 0.8677 - accuracy: 0.6087
Epoch 21: val_accuracy did not improve from 0.54167
25/25 [==============================] - 28s 1s/step - loss: 0.8677 - accuracy: 0.6087 - val_loss: 1.5079 - val_accuracy: 0.5000
Epoch 22/25
25/25 [==============================] - ETA: 0s - loss: 0.8868 - accuracy: 0.6275
Epoch 22: val_accuracy did not improve from 0.54167
25/25 [==============================] - 28s 1s/step - loss: 0.8868 - accuracy: 0.6275 - val_loss: 1.6442 - val_accuracy: 0.3073
Epoch 23/25
25/25 [==============================] - ETA: 0s - loss: 0.8708 - accuracy: 0.6338
Epoch 23: val_accuracy did not improve from 0.54167
25/25 [==============================] - 29s 1s/step - loss: 0.8708 - accuracy: 0.6338 - val_loss: 1.6207 - val_accuracy: 0.3646
Epoch 24/25
25/25 [==============================] - ETA: 0s - loss: 0.7959 - accuracy: 0.6712
Epoch 24: val_accuracy did not improve from 0.54167
25/25 [==============================] - 29s 1s/step - loss: 0.7959 - accuracy: 0.6712 - val_loss: 1.6913 - val_accuracy: 0.3073
Epoch 25/25
25/25 [==============================] - ETA: 0s - loss: 0.8158 - accuracy: 0.6775
Epoch 25: val_accuracy did not improve from 0.54167
25/25 [==============================] - 29s 1s/step - loss: 0.8158 - accuracy: 0.6775 - val_loss: 1.4933 - val_accuracy: 0.4323
plt.plot(alex5.history["accuracy"])
plt.plot(alex5.history['val_accuracy'])
plt.plot(alex5.history['loss'])
plt.plot(alex5.history['val_loss'])
plt.title("Model accuracy")
plt.ylabel("Value")
plt.xlabel("Epoch")
plt.legend(["Accuracy","Validation Accuracy","Loss","Validation Loss"])
plt.show()
model_flat_conv_drop.evaluate(test_ds)
8/8 [==============================] - 2s 307ms/step - loss: 1.4823 - accuracy: 0.4531
[1.4823071956634521, 0.453125]

Do warstw maxpooling i splotowych

model_pool_conv_drop = keras.models.Sequential([
    keras.layers.Conv2D(filters=96, kernel_size=(11,11), strides=(4,4), activation='relu', input_shape=(227,227,3)),
    keras.layers.Dropout(.5),
    keras.layers.MaxPool2D(pool_size=(3,3), strides=(2,2)),
    keras.layers.Dropout(.5),
    keras.layers.Conv2D(filters=256, kernel_size=(5,5), strides=(1,1), activation='relu', padding="same"),
    keras.layers.Dropout(.5),
    keras.layers.MaxPool2D(pool_size=(3,3), strides=(2,2)),
    keras.layers.Dropout(.5),
    keras.layers.Conv2D(filters=384, kernel_size=(3,3), strides=(1,1), activation='relu', padding="same"),
    keras.layers.Dropout(.5),
    keras.layers.Conv2D(filters=384, kernel_size=(3,3), strides=(1,1), activation='relu', padding="same"),
    keras.layers.Dropout(.5),
    keras.layers.Conv2D(filters=256, kernel_size=(3,3), strides=(1,1), activation='relu', padding="same"),
    keras.layers.Dropout(.5),
    keras.layers.MaxPool2D(pool_size=(3,3), strides=(2,2)),
    keras.layers.Dropout(.5),
    keras.layers.Flatten(),
    keras.layers.Dense(4096, activation='relu'),
    keras.layers.Dense(4096, activation='relu'),
    keras.layers.Dense(10, activation='softmax')
])
model_pool_conv_drop.compile(loss='sparse_categorical_crossentropy', optimizer=tf.optimizers.SGD(lr=.001), metrics=['accuracy'])
model_pool_conv_drop.summary()
WARNING:absl:`lr` is deprecated, please use `learning_rate` instead, or use the legacy optimizer, e.g.,tf.keras.optimizers.legacy.SGD.
Model: "sequential_5"
_________________________________________________________________
 Layer (type)                Output Shape              Param #   
=================================================================
 conv2d_25 (Conv2D)          (None, 55, 55, 96)        34944     
                                                                 
 dropout_22 (Dropout)        (None, 55, 55, 96)        0         
                                                                 
 max_pooling2d_15 (MaxPoolin  (None, 27, 27, 96)       0         
 g2D)                                                            
                                                                 
 dropout_23 (Dropout)        (None, 27, 27, 96)        0         
                                                                 
 conv2d_26 (Conv2D)          (None, 27, 27, 256)       614656    
                                                                 
 dropout_24 (Dropout)        (None, 27, 27, 256)       0         
                                                                 
 max_pooling2d_16 (MaxPoolin  (None, 13, 13, 256)      0         
 g2D)                                                            
                                                                 
 dropout_25 (Dropout)        (None, 13, 13, 256)       0         
                                                                 
 conv2d_27 (Conv2D)          (None, 13, 13, 384)       885120    
                                                                 
 dropout_26 (Dropout)        (None, 13, 13, 384)       0         
                                                                 
 conv2d_28 (Conv2D)          (None, 13, 13, 384)       1327488   
                                                                 
 dropout_27 (Dropout)        (None, 13, 13, 384)       0         
                                                                 
 conv2d_29 (Conv2D)          (None, 13, 13, 256)       884992    
                                                                 
 dropout_28 (Dropout)        (None, 13, 13, 256)       0         
                                                                 
 max_pooling2d_17 (MaxPoolin  (None, 6, 6, 256)        0         
 g2D)                                                            
                                                                 
 dropout_29 (Dropout)        (None, 6, 6, 256)         0         
                                                                 
 flatten_5 (Flatten)         (None, 9216)              0         
                                                                 
 dense_15 (Dense)            (None, 4096)              37752832  
                                                                 
 dense_16 (Dense)            (None, 4096)              16781312  
                                                                 
 dense_17 (Dense)            (None, 10)                40970     
                                                                 
=================================================================
Total params: 58,322,314
Trainable params: 58,322,314
Non-trainable params: 0
_________________________________________________________________
checkpoint = ModelCheckpoint("alex_6.h5", monitor='val_accuracy', verbose=1, save_best_only=True, save_weights_only=False, mode='auto', period=1)
early = EarlyStopping(monitor='val_accuracy', min_delta=0, patience=20, verbose=1, mode='auto')

alex6 = model_pool_conv_drop.fit_generator(
    steps_per_epoch=len(train_ds), 
    generator=train_ds, 
    validation_data= validation_ds, 
    validation_steps=len(validation_ds), 
    epochs=25, 
    callbacks=[checkpoint,early])
WARNING:tensorflow:`period` argument is deprecated. Please use `save_freq` to specify the frequency in number of batches seen.
WARNING:tensorflow:`period` argument is deprecated. Please use `save_freq` to specify the frequency in number of batches seen.
Epoch 1/25
/var/folders/6b/j4d60ym516x2s6wymzj707rh0000gn/T/ipykernel_13671/3120705445.py:4: UserWarning: `Model.fit_generator` is deprecated and will be removed in a future version. Please use `Model.fit`, which supports generators.
  alex6 = model_pool_conv_drop.fit_generator(
25/25 [==============================] - ETA: 0s - loss: 1.8171 - accuracy: 0.2288
Epoch 1: val_accuracy improved from -inf to 0.27604, saving model to alex_6.h5
25/25 [==============================] - 29s 1s/step - loss: 1.8171 - accuracy: 0.2288 - val_loss: 2.2332 - val_accuracy: 0.2760
Epoch 2/25
25/25 [==============================] - ETA: 0s - loss: 1.6441 - accuracy: 0.2512
Epoch 2: val_accuracy did not improve from 0.27604
25/25 [==============================] - 28s 1s/step - loss: 1.6441 - accuracy: 0.2512 - val_loss: 2.2203 - val_accuracy: 0.1823
Epoch 3/25
25/25 [==============================] - ETA: 0s - loss: 1.5645 - accuracy: 0.3013
Epoch 3: val_accuracy did not improve from 0.27604
25/25 [==============================] - 28s 1s/step - loss: 1.5645 - accuracy: 0.3013 - val_loss: 2.1670 - val_accuracy: 0.2240
Epoch 4/25
25/25 [==============================] - ETA: 0s - loss: 1.5076 - accuracy: 0.3237
Epoch 4: val_accuracy did not improve from 0.27604
25/25 [==============================] - 28s 1s/step - loss: 1.5076 - accuracy: 0.3237 - val_loss: 2.1759 - val_accuracy: 0.1875
Epoch 5/25
25/25 [==============================] - ETA: 0s - loss: 1.4085 - accuracy: 0.3913
Epoch 5: val_accuracy did not improve from 0.27604
25/25 [==============================] - 29s 1s/step - loss: 1.4085 - accuracy: 0.3913 - val_loss: 2.0652 - val_accuracy: 0.2083
Epoch 6/25
25/25 [==============================] - ETA: 0s - loss: 1.3140 - accuracy: 0.4263
Epoch 6: val_accuracy did not improve from 0.27604
25/25 [==============================] - 29s 1s/step - loss: 1.3140 - accuracy: 0.4263 - val_loss: 2.0968 - val_accuracy: 0.1875
Epoch 7/25
25/25 [==============================] - ETA: 0s - loss: 1.3008 - accuracy: 0.4275
Epoch 7: val_accuracy did not improve from 0.27604
25/25 [==============================] - 28s 1s/step - loss: 1.3008 - accuracy: 0.4275 - val_loss: 1.9457 - val_accuracy: 0.2760
Epoch 8/25
25/25 [==============================] - ETA: 0s - loss: 1.2462 - accuracy: 0.4700
Epoch 8: val_accuracy improved from 0.27604 to 0.34375, saving model to alex_6.h5
25/25 [==============================] - 29s 1s/step - loss: 1.2462 - accuracy: 0.4700 - val_loss: 1.8961 - val_accuracy: 0.3438
Epoch 9/25
25/25 [==============================] - ETA: 0s - loss: 1.2202 - accuracy: 0.4737
Epoch 9: val_accuracy did not improve from 0.34375
25/25 [==============================] - 29s 1s/step - loss: 1.2202 - accuracy: 0.4737 - val_loss: 2.0365 - val_accuracy: 0.1979
Epoch 10/25
25/25 [==============================] - ETA: 0s - loss: 1.1927 - accuracy: 0.4975
Epoch 10: val_accuracy did not improve from 0.34375
25/25 [==============================] - 30s 1s/step - loss: 1.1927 - accuracy: 0.4975 - val_loss: 2.0173 - val_accuracy: 0.2083
Epoch 11/25
25/25 [==============================] - ETA: 0s - loss: 1.1185 - accuracy: 0.5138
Epoch 11: val_accuracy did not improve from 0.34375
25/25 [==============================] - 29s 1s/step - loss: 1.1185 - accuracy: 0.5138 - val_loss: 1.8485 - val_accuracy: 0.3385
Epoch 12/25
25/25 [==============================] - ETA: 0s - loss: 1.1445 - accuracy: 0.5088
Epoch 12: val_accuracy did not improve from 0.34375
25/25 [==============================] - 29s 1s/step - loss: 1.1445 - accuracy: 0.5088 - val_loss: 1.8848 - val_accuracy: 0.2604
Epoch 13/25
25/25 [==============================] - ETA: 0s - loss: 1.1042 - accuracy: 0.5387
Epoch 13: val_accuracy did not improve from 0.34375
25/25 [==============================] - 29s 1s/step - loss: 1.1042 - accuracy: 0.5387 - val_loss: 1.9293 - val_accuracy: 0.2135
Epoch 14/25
25/25 [==============================] - ETA: 0s - loss: 1.0768 - accuracy: 0.5412
Epoch 14: val_accuracy did not improve from 0.34375
25/25 [==============================] - 29s 1s/step - loss: 1.0768 - accuracy: 0.5412 - val_loss: 1.9871 - val_accuracy: 0.1979
Epoch 15/25
25/25 [==============================] - ETA: 0s - loss: 1.0332 - accuracy: 0.5512
Epoch 15: val_accuracy did not improve from 0.34375
25/25 [==============================] - 30s 1s/step - loss: 1.0332 - accuracy: 0.5512 - val_loss: 1.9616 - val_accuracy: 0.1927
Epoch 16/25
25/25 [==============================] - ETA: 0s - loss: 1.0965 - accuracy: 0.5475
Epoch 16: val_accuracy did not improve from 0.34375
25/25 [==============================] - 35s 1s/step - loss: 1.0965 - accuracy: 0.5475 - val_loss: 1.8993 - val_accuracy: 0.2083
Epoch 17/25
25/25 [==============================] - ETA: 0s - loss: 1.0335 - accuracy: 0.5387
Epoch 17: val_accuracy did not improve from 0.34375
25/25 [==============================] - 31s 1s/step - loss: 1.0335 - accuracy: 0.5387 - val_loss: 1.9000 - val_accuracy: 0.2188
Epoch 18/25
25/25 [==============================] - ETA: 0s - loss: 1.0124 - accuracy: 0.5475
Epoch 18: val_accuracy did not improve from 0.34375
25/25 [==============================] - 32s 1s/step - loss: 1.0124 - accuracy: 0.5475 - val_loss: 1.9711 - val_accuracy: 0.1927
Epoch 19/25
25/25 [==============================] - ETA: 0s - loss: 1.0936 - accuracy: 0.5512
Epoch 19: val_accuracy did not improve from 0.34375
25/25 [==============================] - 31s 1s/step - loss: 1.0936 - accuracy: 0.5512 - val_loss: 1.9364 - val_accuracy: 0.1927
Epoch 20/25
25/25 [==============================] - ETA: 0s - loss: 0.9696 - accuracy: 0.5775
Epoch 20: val_accuracy did not improve from 0.34375
25/25 [==============================] - 31s 1s/step - loss: 0.9696 - accuracy: 0.5775 - val_loss: 1.8897 - val_accuracy: 0.1927
Epoch 21/25
25/25 [==============================] - ETA: 0s - loss: 1.0047 - accuracy: 0.5288
Epoch 21: val_accuracy did not improve from 0.34375
25/25 [==============================] - 29s 1s/step - loss: 1.0047 - accuracy: 0.5288 - val_loss: 1.8192 - val_accuracy: 0.2083
Epoch 22/25
25/25 [==============================] - ETA: 0s - loss: 0.9775 - accuracy: 0.5738
Epoch 22: val_accuracy did not improve from 0.34375
25/25 [==============================] - 29s 1s/step - loss: 0.9775 - accuracy: 0.5738 - val_loss: 1.9259 - val_accuracy: 0.1875
Epoch 23/25
25/25 [==============================] - ETA: 0s - loss: 0.9873 - accuracy: 0.5763
Epoch 23: val_accuracy did not improve from 0.34375
25/25 [==============================] - 29s 1s/step - loss: 0.9873 - accuracy: 0.5763 - val_loss: 1.9257 - val_accuracy: 0.1979
Epoch 24/25
25/25 [==============================] - ETA: 0s - loss: 0.9560 - accuracy: 0.5938
Epoch 24: val_accuracy did not improve from 0.34375
25/25 [==============================] - 29s 1s/step - loss: 0.9560 - accuracy: 0.5938 - val_loss: 1.8322 - val_accuracy: 0.2031
Epoch 25/25
25/25 [==============================] - ETA: 0s - loss: 0.9225 - accuracy: 0.6100
Epoch 25: val_accuracy did not improve from 0.34375
25/25 [==============================] - 29s 1s/step - loss: 0.9225 - accuracy: 0.6100 - val_loss: 1.7558 - val_accuracy: 0.2448
plt.plot(alex6.history["accuracy"])
plt.plot(alex6.history['val_accuracy'])
plt.plot(alex6.history['loss'])
plt.plot(alex6.history['val_loss'])
plt.title("Model accuracy")
plt.ylabel("Value")
plt.xlabel("Epoch")
plt.legend(["Accuracy","Validation Accuracy","Loss","Validation Loss"])
plt.show()
model_pool_conv_drop.evaluate(test_ds)
8/8 [==============================] - 2s 306ms/step - loss: 1.7711 - accuracy: 0.2227
[1.7710821628570557, 0.22265625]

Do warstw spłaszczonych, maxpooling i splotowych

model_drop = keras.models.Sequential([
    keras.layers.Conv2D(filters=96, kernel_size=(11,11), strides=(4,4), activation='relu', input_shape=(227,227,3)),
    keras.layers.Dropout(.5),
    keras.layers.MaxPool2D(pool_size=(3,3), strides=(2,2)),
    keras.layers.Dropout(.5),
    keras.layers.Conv2D(filters=256, kernel_size=(5,5), strides=(1,1), activation='relu', padding="same"),
    keras.layers.Dropout(.5),
    keras.layers.MaxPool2D(pool_size=(3,3), strides=(2,2)),
    keras.layers.Dropout(.5),
    keras.layers.Conv2D(filters=384, kernel_size=(3,3), strides=(1,1), activation='relu', padding="same"),
    keras.layers.Dropout(.5),
    keras.layers.Conv2D(filters=384, kernel_size=(3,3), strides=(1,1), activation='relu', padding="same"),
    keras.layers.Dropout(.5),
    keras.layers.Conv2D(filters=256, kernel_size=(3,3), strides=(1,1), activation='relu', padding="same"),
    keras.layers.Dropout(.5),
    keras.layers.MaxPool2D(pool_size=(3,3), strides=(2,2)),
    keras.layers.Dropout(.5),
    keras.layers.Flatten(),
    keras.layers.Dense(4096, activation='relu'),
    keras.layers.Dropout(.5),
    keras.layers.Dense(4096, activation='relu'),
    keras.layers.Dropout(.5),
    keras.layers.Dense(10, activation='softmax')
])
model_drop.compile(loss='sparse_categorical_crossentropy', optimizer=tf.optimizers.SGD(lr=.001), metrics=['accuracy'])
model_drop.summary()
WARNING:absl:`lr` is deprecated, please use `learning_rate` instead, or use the legacy optimizer, e.g.,tf.keras.optimizers.legacy.SGD.
Model: "sequential_6"
_________________________________________________________________
 Layer (type)                Output Shape              Param #   
=================================================================
 conv2d_30 (Conv2D)          (None, 55, 55, 96)        34944     
                                                                 
 dropout_30 (Dropout)        (None, 55, 55, 96)        0         
                                                                 
 max_pooling2d_18 (MaxPoolin  (None, 27, 27, 96)       0         
 g2D)                                                            
                                                                 
 dropout_31 (Dropout)        (None, 27, 27, 96)        0         
                                                                 
 conv2d_31 (Conv2D)          (None, 27, 27, 256)       614656    
                                                                 
 dropout_32 (Dropout)        (None, 27, 27, 256)       0         
                                                                 
 max_pooling2d_19 (MaxPoolin  (None, 13, 13, 256)      0         
 g2D)                                                            
                                                                 
 dropout_33 (Dropout)        (None, 13, 13, 256)       0         
                                                                 
 conv2d_32 (Conv2D)          (None, 13, 13, 384)       885120    
                                                                 
 dropout_34 (Dropout)        (None, 13, 13, 384)       0         
                                                                 
 conv2d_33 (Conv2D)          (None, 13, 13, 384)       1327488   
                                                                 
 dropout_35 (Dropout)        (None, 13, 13, 384)       0         
                                                                 
 conv2d_34 (Conv2D)          (None, 13, 13, 256)       884992    
                                                                 
 dropout_36 (Dropout)        (None, 13, 13, 256)       0         
                                                                 
 max_pooling2d_20 (MaxPoolin  (None, 6, 6, 256)        0         
 g2D)                                                            
                                                                 
 dropout_37 (Dropout)        (None, 6, 6, 256)         0         
                                                                 
 flatten_6 (Flatten)         (None, 9216)              0         
                                                                 
 dense_18 (Dense)            (None, 4096)              37752832  
                                                                 
 dropout_38 (Dropout)        (None, 4096)              0         
                                                                 
 dense_19 (Dense)            (None, 4096)              16781312  
                                                                 
 dropout_39 (Dropout)        (None, 4096)              0         
                                                                 
 dense_20 (Dense)            (None, 10)                40970     
                                                                 
=================================================================
Total params: 58,322,314
Trainable params: 58,322,314
Non-trainable params: 0
_________________________________________________________________
checkpoint = ModelCheckpoint("alex_7.h5", monitor='val_accuracy', verbose=1, save_best_only=True, save_weights_only=False, mode='auto', period=1)
early = EarlyStopping(monitor='val_accuracy', min_delta=0, patience=20, verbose=1, mode='auto')

alex7 = model_drop.fit_generator(
    steps_per_epoch=len(train_ds), 
    generator=train_ds, 
    validation_data= validation_ds, 
    validation_steps=len(validation_ds), 
    epochs=25, 
    callbacks=[checkpoint,early])
WARNING:tensorflow:`period` argument is deprecated. Please use `save_freq` to specify the frequency in number of batches seen.
WARNING:tensorflow:`period` argument is deprecated. Please use `save_freq` to specify the frequency in number of batches seen.
Epoch 1/25
/var/folders/6b/j4d60ym516x2s6wymzj707rh0000gn/T/ipykernel_13671/2699219498.py:4: UserWarning: `Model.fit_generator` is deprecated and will be removed in a future version. Please use `Model.fit`, which supports generators.
  alex7 = model_drop.fit_generator(
25/25 [==============================] - ETA: 0s - loss: 1.9261 - accuracy: 0.2025
Epoch 1: val_accuracy improved from -inf to 0.18229, saving model to alex_7.h5
25/25 [==============================] - 30s 1s/step - loss: 1.9261 - accuracy: 0.2025 - val_loss: 2.2480 - val_accuracy: 0.1823
Epoch 2/25
25/25 [==============================] - ETA: 0s - loss: 1.7103 - accuracy: 0.1963
Epoch 2: val_accuracy improved from 0.18229 to 0.18750, saving model to alex_7.h5
25/25 [==============================] - 29s 1s/step - loss: 1.7103 - accuracy: 0.1963 - val_loss: 2.2290 - val_accuracy: 0.1875
Epoch 3/25
25/25 [==============================] - ETA: 0s - loss: 1.6472 - accuracy: 0.2362
Epoch 3: val_accuracy improved from 0.18750 to 0.19271, saving model to alex_7.h5
25/25 [==============================] - 29s 1s/step - loss: 1.6472 - accuracy: 0.2362 - val_loss: 2.1991 - val_accuracy: 0.1927
Epoch 4/25
25/25 [==============================] - ETA: 0s - loss: 1.5965 - accuracy: 0.2675
Epoch 4: val_accuracy did not improve from 0.19271
25/25 [==============================] - 29s 1s/step - loss: 1.5965 - accuracy: 0.2675 - val_loss: 2.1612 - val_accuracy: 0.1927
Epoch 5/25
25/25 [==============================] - ETA: 0s - loss: 1.5649 - accuracy: 0.2862
Epoch 5: val_accuracy did not improve from 0.19271
25/25 [==============================] - 29s 1s/step - loss: 1.5649 - accuracy: 0.2862 - val_loss: 2.1174 - val_accuracy: 0.1927
Epoch 6/25
25/25 [==============================] - ETA: 0s - loss: 1.4497 - accuracy: 0.3750
Epoch 6: val_accuracy improved from 0.19271 to 0.20312, saving model to alex_7.h5
25/25 [==============================] - 29s 1s/step - loss: 1.4497 - accuracy: 0.3750 - val_loss: 2.0352 - val_accuracy: 0.2031
Epoch 7/25
25/25 [==============================] - ETA: 0s - loss: 1.3833 - accuracy: 0.3787
Epoch 7: val_accuracy did not improve from 0.20312
25/25 [==============================] - 28s 1s/step - loss: 1.3833 - accuracy: 0.3787 - val_loss: 2.0280 - val_accuracy: 0.1771
Epoch 8/25
25/25 [==============================] - ETA: 0s - loss: 1.3506 - accuracy: 0.4025
Epoch 8: val_accuracy did not improve from 0.20312
25/25 [==============================] - 27s 1s/step - loss: 1.3506 - accuracy: 0.4025 - val_loss: 1.9642 - val_accuracy: 0.1979
Epoch 9/25
25/25 [==============================] - ETA: 0s - loss: 1.3013 - accuracy: 0.4212
Epoch 9: val_accuracy did not improve from 0.20312
25/25 [==============================] - 27s 1s/step - loss: 1.3013 - accuracy: 0.4212 - val_loss: 1.9955 - val_accuracy: 0.1927
Epoch 10/25
25/25 [==============================] - ETA: 0s - loss: 1.3089 - accuracy: 0.4387
Epoch 10: val_accuracy did not improve from 0.20312
25/25 [==============================] - 30s 1s/step - loss: 1.3089 - accuracy: 0.4387 - val_loss: 2.0652 - val_accuracy: 0.1875
Epoch 11/25
25/25 [==============================] - ETA: 0s - loss: 1.3030 - accuracy: 0.4400
Epoch 11: val_accuracy improved from 0.20312 to 0.20833, saving model to alex_7.h5
25/25 [==============================] - 29s 1s/step - loss: 1.3030 - accuracy: 0.4400 - val_loss: 1.9548 - val_accuracy: 0.2083
Epoch 12/25
25/25 [==============================] - ETA: 0s - loss: 1.1538 - accuracy: 0.4913
Epoch 12: val_accuracy did not improve from 0.20833
25/25 [==============================] - 28s 1s/step - loss: 1.1538 - accuracy: 0.4913 - val_loss: 1.8886 - val_accuracy: 0.2083
Epoch 13/25
25/25 [==============================] - ETA: 0s - loss: 1.1939 - accuracy: 0.4913
Epoch 13: val_accuracy did not improve from 0.20833
25/25 [==============================] - 27s 1s/step - loss: 1.1939 - accuracy: 0.4913 - val_loss: 1.9482 - val_accuracy: 0.1875
Epoch 14/25
25/25 [==============================] - ETA: 0s - loss: 1.1846 - accuracy: 0.4775
Epoch 14: val_accuracy did not improve from 0.20833
25/25 [==============================] - 27s 1s/step - loss: 1.1846 - accuracy: 0.4775 - val_loss: 2.0470 - val_accuracy: 0.1927
Epoch 15/25
25/25 [==============================] - ETA: 0s - loss: 1.1359 - accuracy: 0.5075
Epoch 15: val_accuracy did not improve from 0.20833
25/25 [==============================] - 29s 1s/step - loss: 1.1359 - accuracy: 0.5075 - val_loss: 1.9831 - val_accuracy: 0.1875
Epoch 16/25
25/25 [==============================] - ETA: 0s - loss: 1.1575 - accuracy: 0.4963
Epoch 16: val_accuracy did not improve from 0.20833
25/25 [==============================] - 96s 4s/step - loss: 1.1575 - accuracy: 0.4963 - val_loss: 1.9085 - val_accuracy: 0.2083
Epoch 17/25
25/25 [==============================] - ETA: 0s - loss: 1.1165 - accuracy: 0.5113
Epoch 17: val_accuracy did not improve from 0.20833
25/25 [==============================] - 110s 4s/step - loss: 1.1165 - accuracy: 0.5113 - val_loss: 1.9389 - val_accuracy: 0.1979
Epoch 18/25
25/25 [==============================] - ETA: 0s - loss: 1.1276 - accuracy: 0.5163
Epoch 18: val_accuracy did not improve from 0.20833
25/25 [==============================] - 107s 4s/step - loss: 1.1276 - accuracy: 0.5163 - val_loss: 1.9441 - val_accuracy: 0.1875
Epoch 19/25
25/25 [==============================] - ETA: 0s - loss: 1.1038 - accuracy: 0.5238
Epoch 19: val_accuracy did not improve from 0.20833
25/25 [==============================] - 69s 3s/step - loss: 1.1038 - accuracy: 0.5238 - val_loss: 2.0581 - val_accuracy: 0.1875
Epoch 20/25
25/25 [==============================] - ETA: 0s - loss: 1.1174 - accuracy: 0.5250
Epoch 20: val_accuracy did not improve from 0.20833
25/25 [==============================] - 68s 3s/step - loss: 1.1174 - accuracy: 0.5250 - val_loss: 1.9579 - val_accuracy: 0.1823
Epoch 21/25
25/25 [==============================] - ETA: 0s - loss: 1.0253 - accuracy: 0.5575
Epoch 21: val_accuracy did not improve from 0.20833
25/25 [==============================] - 69s 3s/step - loss: 1.0253 - accuracy: 0.5575 - val_loss: 1.9376 - val_accuracy: 0.1979
Epoch 22/25
25/25 [==============================] - ETA: 0s - loss: 1.1088 - accuracy: 0.5450
Epoch 22: val_accuracy did not improve from 0.20833
25/25 [==============================] - 72s 3s/step - loss: 1.1088 - accuracy: 0.5450 - val_loss: 2.0030 - val_accuracy: 0.1875
Epoch 23/25
25/25 [==============================] - ETA: 0s - loss: 1.0789 - accuracy: 0.5475
Epoch 23: val_accuracy did not improve from 0.20833
25/25 [==============================] - 59s 2s/step - loss: 1.0789 - accuracy: 0.5475 - val_loss: 1.9403 - val_accuracy: 0.1979
Epoch 24/25
25/25 [==============================] - ETA: 0s - loss: 1.0523 - accuracy: 0.5500
Epoch 24: val_accuracy did not improve from 0.20833
25/25 [==============================] - 56s 2s/step - loss: 1.0523 - accuracy: 0.5500 - val_loss: 2.0287 - val_accuracy: 0.1875
Epoch 25/25
25/25 [==============================] - ETA: 0s - loss: 1.0160 - accuracy: 0.5587
Epoch 25: val_accuracy did not improve from 0.20833
25/25 [==============================] - 52s 2s/step - loss: 1.0160 - accuracy: 0.5587 - val_loss: 1.9327 - val_accuracy: 0.1979
plt.plot(alex7.history["accuracy"])
plt.plot(alex7.history['val_accuracy'])
plt.plot(alex7.history['loss'])
plt.plot(alex7.history['val_loss'])
plt.title("Model accuracy")
plt.ylabel("Value")
plt.xlabel("Epoch")
plt.legend(["Accuracy","Validation Accuracy","Loss","Validation Loss"])
plt.show()
model_drop.evaluate(test_ds)
8/8 [==============================] - 4s 534ms/step - loss: 1.9357 - accuracy: 0.2070
[1.9356722831726074, 0.20703125]

Batch Regularization

Bez dropoutu

model_batch = keras.models.Sequential([
    keras.layers.Conv2D(filters=96, kernel_size=(11,11), strides=(4,4), activation='relu', input_shape=(227,227,3)),
    keras.layers.BatchNormalization(),
    keras.layers.MaxPool2D(pool_size=(3,3), strides=(2,2)),
    keras.layers.Conv2D(filters=256, kernel_size=(5,5), strides=(1,1), activation='relu', padding="same"),
    keras.layers.BatchNormalization(),
    keras.layers.MaxPool2D(pool_size=(3,3), strides=(2,2)),
    keras.layers.Conv2D(filters=384, kernel_size=(3,3), strides=(1,1), activation='relu', padding="same"),
    keras.layers.BatchNormalization(),
    keras.layers.Conv2D(filters=384, kernel_size=(3,3), strides=(1,1), activation='relu', padding="same"),
    keras.layers.BatchNormalization(),
    keras.layers.Conv2D(filters=256, kernel_size=(3,3), strides=(1,1), activation='relu', padding="same"),
    keras.layers.BatchNormalization(),
    keras.layers.MaxPool2D(pool_size=(3,3), strides=(2,2)),
    keras.layers.Flatten(),
    keras.layers.Dense(4096, activation='relu'),
    keras.layers.Dense(4096, activation='relu'),
    keras.layers.Dense(10, activation='softmax')
])
model_batch.compile(loss='sparse_categorical_crossentropy', optimizer=tf.optimizers.SGD(lr=.001), metrics=['accuracy'])
model_batch.summary()
WARNING:absl:`lr` is deprecated, please use `learning_rate` instead, or use the legacy optimizer, e.g.,tf.keras.optimizers.legacy.SGD.
Model: "sequential_7"
_________________________________________________________________
 Layer (type)                Output Shape              Param #   
=================================================================
 conv2d_35 (Conv2D)          (None, 55, 55, 96)        34944     
                                                                 
 batch_normalization (BatchN  (None, 55, 55, 96)       384       
 ormalization)                                                   
                                                                 
 max_pooling2d_21 (MaxPoolin  (None, 27, 27, 96)       0         
 g2D)                                                            
                                                                 
 conv2d_36 (Conv2D)          (None, 27, 27, 256)       614656    
                                                                 
 batch_normalization_1 (Batc  (None, 27, 27, 256)      1024      
 hNormalization)                                                 
                                                                 
 max_pooling2d_22 (MaxPoolin  (None, 13, 13, 256)      0         
 g2D)                                                            
                                                                 
 conv2d_37 (Conv2D)          (None, 13, 13, 384)       885120    
                                                                 
 batch_normalization_2 (Batc  (None, 13, 13, 384)      1536      
 hNormalization)                                                 
                                                                 
 conv2d_38 (Conv2D)          (None, 13, 13, 384)       1327488   
                                                                 
 batch_normalization_3 (Batc  (None, 13, 13, 384)      1536      
 hNormalization)                                                 
                                                                 
 conv2d_39 (Conv2D)          (None, 13, 13, 256)       884992    
                                                                 
 batch_normalization_4 (Batc  (None, 13, 13, 256)      1024      
 hNormalization)                                                 
                                                                 
 max_pooling2d_23 (MaxPoolin  (None, 6, 6, 256)        0         
 g2D)                                                            
                                                                 
 flatten_7 (Flatten)         (None, 9216)              0         
                                                                 
 dense_21 (Dense)            (None, 4096)              37752832  
                                                                 
 dense_22 (Dense)            (None, 4096)              16781312  
                                                                 
 dense_23 (Dense)            (None, 10)                40970     
                                                                 
=================================================================
Total params: 58,327,818
Trainable params: 58,325,066
Non-trainable params: 2,752
_________________________________________________________________
checkpoint = ModelCheckpoint("alex_8.h5", monitor='val_accuracy', verbose=1, save_best_only=True, save_weights_only=False, mode='auto', period=1)
early = EarlyStopping(monitor='val_accuracy', min_delta=0, patience=20, verbose=1, mode='auto')

alex8 = model_batch.fit_generator(
    steps_per_epoch=len(train_ds), 
    generator=train_ds, 
    validation_data= validation_ds, 
    validation_steps=len(validation_ds), 
    epochs=25, 
    callbacks=[checkpoint,early])
WARNING:tensorflow:`period` argument is deprecated. Please use `save_freq` to specify the frequency in number of batches seen.
WARNING:tensorflow:`period` argument is deprecated. Please use `save_freq` to specify the frequency in number of batches seen.
Epoch 1/25
/var/folders/6b/j4d60ym516x2s6wymzj707rh0000gn/T/ipykernel_13671/2334374023.py:4: UserWarning: `Model.fit_generator` is deprecated and will be removed in a future version. Please use `Model.fit`, which supports generators.
  alex8 = model_batch.fit_generator(
25/25 [==============================] - ETA: 0s - loss: 3.5162 - accuracy: 0.4512
Epoch 1: val_accuracy improved from -inf to 0.20833, saving model to alex_8.h5
25/25 [==============================] - 51s 2s/step - loss: 3.5162 - accuracy: 0.4512 - val_loss: 2.1169 - val_accuracy: 0.2083
Epoch 2/25
25/25 [==============================] - ETA: 0s - loss: 0.6702 - accuracy: 0.7425
Epoch 2: val_accuracy did not improve from 0.20833
25/25 [==============================] - 52s 2s/step - loss: 0.6702 - accuracy: 0.7425 - val_loss: 2.1916 - val_accuracy: 0.1771
Epoch 3/25
25/25 [==============================] - ETA: 0s - loss: 0.3823 - accuracy: 0.8637
Epoch 3: val_accuracy did not improve from 0.20833
25/25 [==============================] - 52s 2s/step - loss: 0.3823 - accuracy: 0.8637 - val_loss: 2.5290 - val_accuracy: 0.1823
Epoch 4/25
25/25 [==============================] - ETA: 0s - loss: 0.2204 - accuracy: 0.9388
Epoch 4: val_accuracy did not improve from 0.20833
25/25 [==============================] - 52s 2s/step - loss: 0.2204 - accuracy: 0.9388 - val_loss: 3.1773 - val_accuracy: 0.1771
Epoch 5/25
25/25 [==============================] - ETA: 0s - loss: 0.1337 - accuracy: 0.9712
Epoch 5: val_accuracy did not improve from 0.20833
25/25 [==============================] - 53s 2s/step - loss: 0.1337 - accuracy: 0.9712 - val_loss: 3.4835 - val_accuracy: 0.1875
Epoch 6/25
25/25 [==============================] - ETA: 0s - loss: 0.0836 - accuracy: 0.9875
Epoch 6: val_accuracy did not improve from 0.20833
25/25 [==============================] - 52s 2s/step - loss: 0.0836 - accuracy: 0.9875 - val_loss: 4.0837 - val_accuracy: 0.1927
Epoch 7/25
25/25 [==============================] - ETA: 0s - loss: 0.0911 - accuracy: 0.9775
Epoch 7: val_accuracy improved from 0.20833 to 0.24479, saving model to alex_8.h5
25/25 [==============================] - 56s 2s/step - loss: 0.0911 - accuracy: 0.9775 - val_loss: 4.6900 - val_accuracy: 0.2448
Epoch 8/25
25/25 [==============================] - ETA: 0s - loss: 0.0658 - accuracy: 0.9862
Epoch 8: val_accuracy improved from 0.24479 to 0.28646, saving model to alex_8.h5
25/25 [==============================] - 52s 2s/step - loss: 0.0658 - accuracy: 0.9862 - val_loss: 4.7919 - val_accuracy: 0.2865
Epoch 9/25
25/25 [==============================] - ETA: 0s - loss: 0.0362 - accuracy: 0.9975
Epoch 9: val_accuracy improved from 0.28646 to 0.30729, saving model to alex_8.h5
25/25 [==============================] - 53s 2s/step - loss: 0.0362 - accuracy: 0.9975 - val_loss: 5.1122 - val_accuracy: 0.3073
Epoch 10/25
25/25 [==============================] - ETA: 0s - loss: 0.0309 - accuracy: 0.9962
Epoch 10: val_accuracy did not improve from 0.30729
25/25 [==============================] - 52s 2s/step - loss: 0.0309 - accuracy: 0.9962 - val_loss: 5.5180 - val_accuracy: 0.2760
Epoch 11/25
25/25 [==============================] - ETA: 0s - loss: 0.0250 - accuracy: 1.0000
Epoch 11: val_accuracy did not improve from 0.30729
25/25 [==============================] - 51s 2s/step - loss: 0.0250 - accuracy: 1.0000 - val_loss: 5.7030 - val_accuracy: 0.2969
Epoch 12/25
25/25 [==============================] - ETA: 0s - loss: 0.0243 - accuracy: 0.9962
Epoch 12: val_accuracy did not improve from 0.30729
25/25 [==============================] - 49s 2s/step - loss: 0.0243 - accuracy: 0.9962 - val_loss: 5.8668 - val_accuracy: 0.2917
Epoch 13/25
25/25 [==============================] - ETA: 0s - loss: 0.0163 - accuracy: 1.0000
Epoch 13: val_accuracy did not improve from 0.30729
25/25 [==============================] - 47s 2s/step - loss: 0.0163 - accuracy: 1.0000 - val_loss: 6.0192 - val_accuracy: 0.3021
Epoch 14/25
25/25 [==============================] - ETA: 0s - loss: 0.0121 - accuracy: 0.9987
Epoch 14: val_accuracy improved from 0.30729 to 0.32292, saving model to alex_8.h5
25/25 [==============================] - 45s 2s/step - loss: 0.0121 - accuracy: 0.9987 - val_loss: 5.2193 - val_accuracy: 0.3229
Epoch 15/25
25/25 [==============================] - ETA: 0s - loss: 0.0131 - accuracy: 1.0000
Epoch 15: val_accuracy did not improve from 0.32292
25/25 [==============================] - 43s 2s/step - loss: 0.0131 - accuracy: 1.0000 - val_loss: 5.9107 - val_accuracy: 0.3073
Epoch 16/25
25/25 [==============================] - ETA: 0s - loss: 0.0113 - accuracy: 1.0000
Epoch 16: val_accuracy did not improve from 0.32292
25/25 [==============================] - 43s 2s/step - loss: 0.0113 - accuracy: 1.0000 - val_loss: 5.8355 - val_accuracy: 0.2969
Epoch 17/25
25/25 [==============================] - ETA: 0s - loss: 0.0097 - accuracy: 1.0000
Epoch 17: val_accuracy did not improve from 0.32292
25/25 [==============================] - 45s 2s/step - loss: 0.0097 - accuracy: 1.0000 - val_loss: 5.1658 - val_accuracy: 0.3125
Epoch 18/25
25/25 [==============================] - ETA: 0s - loss: 0.0104 - accuracy: 0.9987
Epoch 18: val_accuracy did not improve from 0.32292
25/25 [==============================] - 44s 2s/step - loss: 0.0104 - accuracy: 0.9987 - val_loss: 4.9559 - val_accuracy: 0.3073
Epoch 19/25
25/25 [==============================] - ETA: 0s - loss: 0.0083 - accuracy: 1.0000
Epoch 19: val_accuracy improved from 0.32292 to 0.33333, saving model to alex_8.h5
25/25 [==============================] - 45s 2s/step - loss: 0.0083 - accuracy: 1.0000 - val_loss: 4.3347 - val_accuracy: 0.3333
Epoch 20/25
25/25 [==============================] - ETA: 0s - loss: 0.0076 - accuracy: 1.0000
Epoch 20: val_accuracy improved from 0.33333 to 0.36979, saving model to alex_8.h5
25/25 [==============================] - 46s 2s/step - loss: 0.0076 - accuracy: 1.0000 - val_loss: 3.3916 - val_accuracy: 0.3698
Epoch 21/25
25/25 [==============================] - ETA: 0s - loss: 0.0076 - accuracy: 1.0000
Epoch 21: val_accuracy improved from 0.36979 to 0.39062, saving model to alex_8.h5
25/25 [==============================] - 46s 2s/step - loss: 0.0076 - accuracy: 1.0000 - val_loss: 2.8197 - val_accuracy: 0.3906
Epoch 22/25
25/25 [==============================] - ETA: 0s - loss: 0.0056 - accuracy: 1.0000
Epoch 22: val_accuracy improved from 0.39062 to 0.45312, saving model to alex_8.h5
25/25 [==============================] - 45s 2s/step - loss: 0.0056 - accuracy: 1.0000 - val_loss: 2.2279 - val_accuracy: 0.4531
Epoch 23/25
25/25 [==============================] - ETA: 0s - loss: 0.0066 - accuracy: 1.0000
Epoch 23: val_accuracy improved from 0.45312 to 0.57292, saving model to alex_8.h5
25/25 [==============================] - 46s 2s/step - loss: 0.0066 - accuracy: 1.0000 - val_loss: 1.3994 - val_accuracy: 0.5729
Epoch 24/25
25/25 [==============================] - ETA: 0s - loss: 0.0052 - accuracy: 1.0000
Epoch 24: val_accuracy improved from 0.57292 to 0.63542, saving model to alex_8.h5
25/25 [==============================] - 49s 2s/step - loss: 0.0052 - accuracy: 1.0000 - val_loss: 1.2914 - val_accuracy: 0.6354
Epoch 25/25
25/25 [==============================] - ETA: 0s - loss: 0.0059 - accuracy: 1.0000
Epoch 25: val_accuracy improved from 0.63542 to 0.71354, saving model to alex_8.h5
25/25 [==============================] - 49s 2s/step - loss: 0.0059 - accuracy: 1.0000 - val_loss: 1.0022 - val_accuracy: 0.7135
plt.plot(alex8.history["accuracy"])
plt.plot(alex8.history['val_accuracy'])
plt.plot(alex8.history['loss'])
plt.plot(alex8.history['val_loss'])
plt.title("Model accuracy")
plt.ylabel("Value")
plt.xlabel("Epoch")
plt.legend(["Accuracy","Validation Accuracy","Loss","Validation Loss"])
plt.show()
model_batch.evaluate(test_ds)
8/8 [==============================] - 4s 557ms/step - loss: 0.8515 - accuracy: 0.7383
[0.8515095114707947, 0.73828125]

Z dropoutem

model_batch_drop = keras.models.Sequential([
    keras.layers.Conv2D(filters=96, kernel_size=(11,11), strides=(4,4), activation='relu', input_shape=(227,227,3)),
    keras.layers.BatchNormalization(),
    keras.layers.MaxPool2D(pool_size=(3,3), strides=(2,2)),
    keras.layers.Conv2D(filters=256, kernel_size=(5,5), strides=(1,1), activation='relu', padding="same"),
    keras.layers.BatchNormalization(),
    keras.layers.MaxPool2D(pool_size=(3,3), strides=(2,2)),
    keras.layers.Conv2D(filters=384, kernel_size=(3,3), strides=(1,1), activation='relu', padding="same"),
    keras.layers.BatchNormalization(),
    keras.layers.Conv2D(filters=384, kernel_size=(3,3), strides=(1,1), activation='relu', padding="same"),
    keras.layers.BatchNormalization(),
    keras.layers.Conv2D(filters=256, kernel_size=(3,3), strides=(1,1), activation='relu', padding="same"),
    keras.layers.BatchNormalization(),
    keras.layers.MaxPool2D(pool_size=(3,3), strides=(2,2)),
    keras.layers.Flatten(),
    keras.layers.Dense(4096, activation='relu'),
    keras.layers.Dropout(.5),
    keras.layers.Dense(4096, activation='relu'),
    keras.layers.Dropout(.5),
    keras.layers.Dense(10, activation='softmax')
])
model_batch_drop.compile(loss='sparse_categorical_crossentropy', optimizer=tf.optimizers.SGD(lr=.001), metrics=['accuracy'])
model_batch_drop.summary()
WARNING:absl:`lr` is deprecated, please use `learning_rate` instead, or use the legacy optimizer, e.g.,tf.keras.optimizers.legacy.SGD.
Model: "sequential_8"
_________________________________________________________________
 Layer (type)                Output Shape              Param #   
=================================================================
 conv2d_40 (Conv2D)          (None, 55, 55, 96)        34944     
                                                                 
 batch_normalization_5 (Batc  (None, 55, 55, 96)       384       
 hNormalization)                                                 
                                                                 
 max_pooling2d_24 (MaxPoolin  (None, 27, 27, 96)       0         
 g2D)                                                            
                                                                 
 conv2d_41 (Conv2D)          (None, 27, 27, 256)       614656    
                                                                 
 batch_normalization_6 (Batc  (None, 27, 27, 256)      1024      
 hNormalization)                                                 
                                                                 
 max_pooling2d_25 (MaxPoolin  (None, 13, 13, 256)      0         
 g2D)                                                            
                                                                 
 conv2d_42 (Conv2D)          (None, 13, 13, 384)       885120    
                                                                 
 batch_normalization_7 (Batc  (None, 13, 13, 384)      1536      
 hNormalization)                                                 
                                                                 
 conv2d_43 (Conv2D)          (None, 13, 13, 384)       1327488   
                                                                 
 batch_normalization_8 (Batc  (None, 13, 13, 384)      1536      
 hNormalization)                                                 
                                                                 
 conv2d_44 (Conv2D)          (None, 13, 13, 256)       884992    
                                                                 
 batch_normalization_9 (Batc  (None, 13, 13, 256)      1024      
 hNormalization)                                                 
                                                                 
 max_pooling2d_26 (MaxPoolin  (None, 6, 6, 256)        0         
 g2D)                                                            
                                                                 
 flatten_8 (Flatten)         (None, 9216)              0         
                                                                 
 dense_24 (Dense)            (None, 4096)              37752832  
                                                                 
 dropout_40 (Dropout)        (None, 4096)              0         
                                                                 
 dense_25 (Dense)            (None, 4096)              16781312  
                                                                 
 dropout_41 (Dropout)        (None, 4096)              0         
                                                                 
 dense_26 (Dense)            (None, 10)                40970     
                                                                 
=================================================================
Total params: 58,327,818
Trainable params: 58,325,066
Non-trainable params: 2,752
_________________________________________________________________
checkpoint = ModelCheckpoint("alex_9.h5", monitor='val_accuracy', verbose=1, save_best_only=True, save_weights_only=False, mode='auto', period=1)
early = EarlyStopping(monitor='val_accuracy', min_delta=0, patience=20, verbose=1, mode='auto')

alex9 = model_batch_drop.fit_generator(
    steps_per_epoch=len(train_ds), 
    generator=train_ds, 
    validation_data= validation_ds, 
    validation_steps=len(validation_ds), 
    epochs=25, 
    callbacks=[checkpoint,early])
WARNING:tensorflow:`period` argument is deprecated. Please use `save_freq` to specify the frequency in number of batches seen.
WARNING:tensorflow:`period` argument is deprecated. Please use `save_freq` to specify the frequency in number of batches seen.
Epoch 1/25
/var/folders/6b/j4d60ym516x2s6wymzj707rh0000gn/T/ipykernel_13671/3373435413.py:4: UserWarning: `Model.fit_generator` is deprecated and will be removed in a future version. Please use `Model.fit`, which supports generators.
  alex9 = model_batch_drop.fit_generator(
25/25 [==============================] - ETA: 0s - loss: 5.1567 - accuracy: 0.3462
Epoch 1: val_accuracy improved from -inf to 0.39583, saving model to alex_9.h5
25/25 [==============================] - 53s 2s/step - loss: 5.1567 - accuracy: 0.3462 - val_loss: 1.8424 - val_accuracy: 0.3958
Epoch 2/25
25/25 [==============================] - ETA: 0s - loss: 1.5037 - accuracy: 0.5688
Epoch 2: val_accuracy did not improve from 0.39583
25/25 [==============================] - 48s 2s/step - loss: 1.5037 - accuracy: 0.5688 - val_loss: 2.2144 - val_accuracy: 0.2396
Epoch 3/25
25/25 [==============================] - ETA: 0s - loss: 0.9447 - accuracy: 0.6812
Epoch 3: val_accuracy did not improve from 0.39583
25/25 [==============================] - 45s 2s/step - loss: 0.9447 - accuracy: 0.6812 - val_loss: 3.3665 - val_accuracy: 0.1823
Epoch 4/25
25/25 [==============================] - ETA: 0s - loss: 0.7950 - accuracy: 0.7287
Epoch 4: val_accuracy did not improve from 0.39583
25/25 [==============================] - 45s 2s/step - loss: 0.7950 - accuracy: 0.7287 - val_loss: 4.1486 - val_accuracy: 0.3125
Epoch 5/25
25/25 [==============================] - ETA: 0s - loss: 0.7825 - accuracy: 0.7600
Epoch 5: val_accuracy did not improve from 0.39583
25/25 [==============================] - 44s 2s/step - loss: 0.7825 - accuracy: 0.7600 - val_loss: 5.0991 - val_accuracy: 0.2448
Epoch 6/25
25/25 [==============================] - ETA: 0s - loss: 0.4594 - accuracy: 0.8425
Epoch 6: val_accuracy did not improve from 0.39583
25/25 [==============================] - 45s 2s/step - loss: 0.4594 - accuracy: 0.8425 - val_loss: 5.7482 - val_accuracy: 0.1771
Epoch 7/25
25/25 [==============================] - ETA: 0s - loss: 0.4009 - accuracy: 0.8600
Epoch 7: val_accuracy did not improve from 0.39583
25/25 [==============================] - 48s 2s/step - loss: 0.4009 - accuracy: 0.8600 - val_loss: 7.0191 - val_accuracy: 0.2135
Epoch 8/25
25/25 [==============================] - ETA: 0s - loss: 0.2893 - accuracy: 0.9075
Epoch 8: val_accuracy did not improve from 0.39583
25/25 [==============================] - 49s 2s/step - loss: 0.2893 - accuracy: 0.9075 - val_loss: 7.8847 - val_accuracy: 0.1979
Epoch 9/25
25/25 [==============================] - ETA: 0s - loss: 0.2533 - accuracy: 0.8950
Epoch 9: val_accuracy did not improve from 0.39583
25/25 [==============================] - 47s 2s/step - loss: 0.2533 - accuracy: 0.8950 - val_loss: 8.0985 - val_accuracy: 0.2500
Epoch 10/25
25/25 [==============================] - ETA: 0s - loss: 0.2697 - accuracy: 0.9013
Epoch 10: val_accuracy did not improve from 0.39583
25/25 [==============================] - 50s 2s/step - loss: 0.2697 - accuracy: 0.9013 - val_loss: 8.7342 - val_accuracy: 0.2865
Epoch 11/25
25/25 [==============================] - ETA: 0s - loss: 0.2353 - accuracy: 0.9212
Epoch 11: val_accuracy did not improve from 0.39583
25/25 [==============================] - 48s 2s/step - loss: 0.2353 - accuracy: 0.9212 - val_loss: 8.8148 - val_accuracy: 0.3021
Epoch 12/25
25/25 [==============================] - ETA: 0s - loss: 0.1378 - accuracy: 0.9525
Epoch 12: val_accuracy did not improve from 0.39583
25/25 [==============================] - 47s 2s/step - loss: 0.1378 - accuracy: 0.9525 - val_loss: 7.8579 - val_accuracy: 0.3177
Epoch 13/25
25/25 [==============================] - ETA: 0s - loss: 0.1722 - accuracy: 0.9450
Epoch 13: val_accuracy did not improve from 0.39583
25/25 [==============================] - 47s 2s/step - loss: 0.1722 - accuracy: 0.9450 - val_loss: 7.5631 - val_accuracy: 0.3125
Epoch 14/25
25/25 [==============================] - ETA: 0s - loss: 0.1326 - accuracy: 0.9500
Epoch 14: val_accuracy did not improve from 0.39583
25/25 [==============================] - 48s 2s/step - loss: 0.1326 - accuracy: 0.9500 - val_loss: 7.8681 - val_accuracy: 0.2760
Epoch 15/25
25/25 [==============================] - ETA: 0s - loss: 0.1235 - accuracy: 0.9538
Epoch 15: val_accuracy did not improve from 0.39583
25/25 [==============================] - 46s 2s/step - loss: 0.1235 - accuracy: 0.9538 - val_loss: 8.4553 - val_accuracy: 0.3021
Epoch 16/25
25/25 [==============================] - ETA: 0s - loss: 0.0752 - accuracy: 0.9737
Epoch 16: val_accuracy did not improve from 0.39583
25/25 [==============================] - 44s 2s/step - loss: 0.0752 - accuracy: 0.9737 - val_loss: 6.6568 - val_accuracy: 0.3229
Epoch 17/25
25/25 [==============================] - ETA: 0s - loss: 0.0540 - accuracy: 0.9862
Epoch 17: val_accuracy did not improve from 0.39583
25/25 [==============================] - 46s 2s/step - loss: 0.0540 - accuracy: 0.9862 - val_loss: 6.9686 - val_accuracy: 0.3229
Epoch 18/25
25/25 [==============================] - ETA: 0s - loss: 0.0681 - accuracy: 0.9750
Epoch 18: val_accuracy did not improve from 0.39583
25/25 [==============================] - 45s 2s/step - loss: 0.0681 - accuracy: 0.9750 - val_loss: 5.2376 - val_accuracy: 0.3281
Epoch 19/25
25/25 [==============================] - ETA: 0s - loss: 0.0530 - accuracy: 0.9800
Epoch 19: val_accuracy improved from 0.39583 to 0.42708, saving model to alex_9.h5
25/25 [==============================] - 53s 2s/step - loss: 0.0530 - accuracy: 0.9800 - val_loss: 3.4478 - val_accuracy: 0.4271
Epoch 20/25
25/25 [==============================] - ETA: 0s - loss: 0.0605 - accuracy: 0.9850
Epoch 20: val_accuracy improved from 0.42708 to 0.44792, saving model to alex_9.h5
25/25 [==============================] - 50s 2s/step - loss: 0.0605 - accuracy: 0.9850 - val_loss: 2.8303 - val_accuracy: 0.4479
Epoch 21/25
25/25 [==============================] - ETA: 0s - loss: 0.0447 - accuracy: 0.9862
Epoch 21: val_accuracy improved from 0.44792 to 0.47396, saving model to alex_9.h5
25/25 [==============================] - 51s 2s/step - loss: 0.0447 - accuracy: 0.9862 - val_loss: 3.0949 - val_accuracy: 0.4740
Epoch 22/25
25/25 [==============================] - ETA: 0s - loss: 0.0601 - accuracy: 0.9825
Epoch 22: val_accuracy improved from 0.47396 to 0.70312, saving model to alex_9.h5
25/25 [==============================] - 78s 3s/step - loss: 0.0601 - accuracy: 0.9825 - val_loss: 1.2678 - val_accuracy: 0.7031
Epoch 23/25
25/25 [==============================] - ETA: 0s - loss: 0.0483 - accuracy: 0.9850
Epoch 23: val_accuracy improved from 0.70312 to 0.76562, saving model to alex_9.h5
25/25 [==============================] - 55s 2s/step - loss: 0.0483 - accuracy: 0.9850 - val_loss: 1.0314 - val_accuracy: 0.7656
Epoch 24/25
25/25 [==============================] - ETA: 0s - loss: 0.0412 - accuracy: 0.9862
Epoch 24: val_accuracy did not improve from 0.76562
25/25 [==============================] - 60s 2s/step - loss: 0.0412 - accuracy: 0.9862 - val_loss: 1.1687 - val_accuracy: 0.7083
Epoch 25/25
25/25 [==============================] - ETA: 0s - loss: 0.0650 - accuracy: 0.9725
Epoch 25: val_accuracy did not improve from 0.76562
25/25 [==============================] - 48s 2s/step - loss: 0.0650 - accuracy: 0.9725 - val_loss: 1.4878 - val_accuracy: 0.6719
plt.plot(alex9.history["accuracy"])
plt.plot(alex9.history['val_accuracy'])
plt.plot(alex9.history['loss'])
plt.plot(alex9.history['val_loss'])
plt.title("Model accuracy")
plt.ylabel("Value")
plt.xlabel("Epoch")
plt.legend(["Accuracy","Validation Accuracy","Loss","Validation Loss"])
plt.show()
model_batch_drop.evaluate(test_ds)
8/8 [==============================] - 4s 493ms/step - loss: 1.3864 - accuracy: 0.6953
[1.386448621749878, 0.6953125]