Symulowanie-wizualne/sw_lab8.ipynb

3179 lines
644 KiB
Plaintext
Raw Permalink Normal View History

2022-12-10 00:55:36 +01:00
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Zadanie 8 - Alexnet + Dropout & BatchRegularization\n",
"### Aleksandra Jonas, Aleksandra Gronowska, Iwona Christop"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Przygotowanie danych"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"from IPython.display import Image, display"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "2fe63b50",
"metadata": {},
"outputs": [],
"source": [
"import sys\n",
"import subprocess\n",
"import pkg_resources\n",
"import numpy as np\n",
"\n",
"required = { 'scikit-image'}\n",
"installed = {pkg.key for pkg in pkg_resources.working_set}\n",
"missing = required - installed\n",
"# Alexnet requires images to be of dim = (227, 227, 3)\n",
"newSize = (227,227)\n",
"\n",
"if missing: \n",
" python = sys.executable\n",
" subprocess.check_call([python, '-m', 'pip', 'install', *missing], stdout=subprocess.DEVNULL)\n",
"\n",
"def load_train_data(input_dir):\n",
" import numpy as np\n",
" import pandas as pd\n",
" import os\n",
" from skimage.io import imread\n",
" import cv2 as cv\n",
" from pathlib import Path\n",
" import random\n",
" from shutil import copyfile, rmtree\n",
" import json\n",
"\n",
" import seaborn as sns\n",
" import matplotlib.pyplot as plt\n",
"\n",
" import matplotlib\n",
" \n",
" image_dir = Path(input_dir)\n",
" categories_name = []\n",
" for file in os.listdir(image_dir):\n",
" d = os.path.join(image_dir, file)\n",
" if os.path.isdir(d):\n",
" categories_name.append(file)\n",
"\n",
" folders = [directory for directory in image_dir.iterdir() if directory.is_dir()]\n",
"\n",
" train_img = []\n",
" categories_count=[]\n",
" labels=[]\n",
" for i, direc in enumerate(folders):\n",
" count = 0\n",
" for obj in direc.iterdir():\n",
" if os.path.isfile(obj) and os.path.basename(os.path.normpath(obj)) != 'desktop.ini':\n",
" labels.append(os.path.basename(os.path.normpath(direc)))\n",
" count += 1\n",
" img = imread(obj)#zwraca ndarry postaci xSize x ySize x colorDepth\n",
" img = img[:, :, :3]\n",
" img = cv.resize(img, newSize, interpolation=cv.INTER_AREA)# zwraca ndarray\n",
" img = img / 255 #normalizacja\n",
" train_img.append(img)\n",
" categories_count.append(count)\n",
" X={}\n",
" X[\"values\"] = np.array(train_img)\n",
" X[\"categories_name\"] = categories_name\n",
" X[\"categories_count\"] = categories_count\n",
" X[\"labels\"]=labels\n",
" return X\n",
"\n",
"def load_test_data(input_dir):\n",
" import numpy as np\n",
" import pandas as pd\n",
" import os\n",
" from skimage.io import imread\n",
" import cv2 as cv\n",
" from pathlib import Path\n",
" import random\n",
" from shutil import copyfile, rmtree\n",
" import json\n",
"\n",
" import seaborn as sns\n",
" import matplotlib.pyplot as plt\n",
"\n",
" import matplotlib\n",
"\n",
" image_path = Path(input_dir)\n",
"\n",
" labels_path = image_path.parents[0] / 'test_labels.json'\n",
"\n",
" jsonString = labels_path.read_text()\n",
" objects = json.loads(jsonString)\n",
"\n",
" categories_name = []\n",
" categories_count=[]\n",
" count = 0\n",
" c = objects[0]['value']\n",
" for e in objects:\n",
" if e['value'] != c:\n",
" categories_count.append(count)\n",
" c = e['value']\n",
" count = 1\n",
" else:\n",
" count += 1\n",
" if not e['value'] in categories_name:\n",
" categories_name.append(e['value'])\n",
"\n",
" categories_count.append(count)\n",
" \n",
" test_img = []\n",
"\n",
" labels=[]\n",
" for e in objects:\n",
" p = image_path / e['filename']\n",
" img = imread(p)#zwraca ndarry postaci xSize x ySize x colorDepth\n",
" img = img[:, :, :3]\n",
" img = cv.resize(img, newSize, interpolation=cv.INTER_AREA)# zwraca ndarray\n",
" img = img / 255#normalizacja\n",
" test_img.append(img)\n",
" labels.append(e['value'])\n",
"\n",
" X={}\n",
" X[\"values\"] = np.array(test_img)\n",
" X[\"categories_name\"] = categories_name\n",
" X[\"categories_count\"] = categories_count\n",
" X[\"labels\"]=labels\n",
" return X"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "cc941c5a",
"metadata": {},
"outputs": [],
"source": [
"# Data load\n",
"data_train = load_train_data(\"./train_test_sw/train_sw\")\n",
"values_train = data_train['values']\n",
"labels_train = data_train['labels']\n",
"\n",
"data_test = load_test_data(\"./train_test_sw/test_sw\")\n",
"X_test = data_test['values']\n",
"y_test = data_test['labels']"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "25040ac9",
"metadata": {},
"outputs": [],
"source": [
"from sklearn.model_selection import train_test_split"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "18d44949",
"metadata": {},
"outputs": [],
"source": [
"X_train, X_validate, y_train, y_validate = train_test_split(values_train, labels_train, test_size=0.2, random_state=42)"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "a1fe47e6",
"metadata": {},
"outputs": [],
"source": [
2023-01-06 21:43:41 +01:00
"from sklearn.preprocessing import LabelEncoder\n",
"class_le = LabelEncoder()\n",
"y_train_enc = class_le.fit_transform(y_train)\n",
"y_validate_enc = class_le.fit_transform(y_validate)\n",
"y_test_enc = class_le.fit_transform(y_test)"
2022-12-10 00:55:36 +01:00
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "d90af799",
"metadata": {},
"outputs": [],
"source": [
"class_le = LabelEncoder()\n",
"y_train_enc = class_le.fit_transform(y_train)\n",
"y_validate_enc = class_le.fit_transform(y_validate)\n",
"y_test_enc = class_le.fit_transform(y_test)"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "c2323985",
"metadata": {},
"outputs": [],
"source": [
"import tensorflow as tf"
]
},
{
"cell_type": "code",
"execution_count": 9,
"id": "dfe674dc",
"metadata": {},
"outputs": [],
"source": [
"train_ds = tf.data.Dataset.from_tensor_slices((X_train, y_train_enc))\n",
"validation_ds = tf.data.Dataset.from_tensor_slices((X_validate, y_validate_enc))\n",
"test_ds = tf.data.Dataset.from_tensor_slices((X_test, y_test_enc))"
]
},
{
"cell_type": "code",
"execution_count": 10,
"id": "076c8ac9",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Training data size: 820\n",
"Test data size: 259\n",
"Validation data size: 206\n"
]
}
],
"source": [
"train_ds_size = tf.data.experimental.cardinality(train_ds).numpy()\n",
"test_ds_size = tf.data.experimental.cardinality(test_ds).numpy()\n",
"validation_ds_size = tf.data.experimental.cardinality(validation_ds).numpy()\n",
"print(\"Training data size:\", train_ds_size)\n",
"print(\"Test data size:\", test_ds_size)\n",
"print(\"Validation data size:\", validation_ds_size)"
]
},
{
"cell_type": "code",
"execution_count": 11,
"id": "07ebcd4a",
"metadata": {},
"outputs": [],
"source": [
"train_ds = (train_ds\n",
" .shuffle(buffer_size=train_ds_size)\n",
" .batch(batch_size=32, drop_remainder=True))\n",
"test_ds = (test_ds\n",
" .shuffle(buffer_size=train_ds_size)\n",
" .batch(batch_size=32, drop_remainder=True))\n",
"validation_ds = (validation_ds\n",
" .shuffle(buffer_size=train_ds_size)\n",
" .batch(batch_size=32, drop_remainder=True))"
]
},
{
"cell_type": "code",
"execution_count": 12,
"metadata": {},
"outputs": [],
"source": [
2023-01-06 21:43:41 +01:00
"from tensorflow import keras"
2022-12-10 00:55:36 +01:00
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Dropout"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Do warstw spłaszczonych"
]
},
{
"cell_type": "code",
2023-01-06 21:43:41 +01:00
"execution_count": 13,
2022-12-10 00:55:36 +01:00
"metadata": {},
"outputs": [],
"source": [
"model_flat_drop = keras.models.Sequential([\n",
" keras.layers.Conv2D(filters=96, kernel_size=(11,11), strides=(4,4), activation='relu', input_shape=(227,227,3)),\n",
" keras.layers.MaxPool2D(pool_size=(3,3), strides=(2,2)),\n",
" keras.layers.Conv2D(filters=256, kernel_size=(5,5), strides=(1,1), activation='relu', padding=\"same\"),\n",
" keras.layers.MaxPool2D(pool_size=(3,3), strides=(2,2)),\n",
" keras.layers.Conv2D(filters=384, kernel_size=(3,3), strides=(1,1), activation='relu', padding=\"same\"),\n",
" keras.layers.Conv2D(filters=384, kernel_size=(3,3), strides=(1,1), activation='relu', padding=\"same\"),\n",
" keras.layers.Conv2D(filters=256, kernel_size=(3,3), strides=(1,1), activation='relu', padding=\"same\"),\n",
" keras.layers.MaxPool2D(pool_size=(3,3), strides=(2,2)),\n",
" keras.layers.Flatten(),\n",
" keras.layers.Dense(4096, activation='relu'),\n",
" keras.layers.Dropout(.5),\n",
" keras.layers.Dense(4096, activation='relu'),\n",
" keras.layers.Dropout(.5),\n",
" keras.layers.Dense(10, activation='softmax')\n",
"])"
]
},
{
"cell_type": "code",
2023-01-06 21:43:41 +01:00
"execution_count": 14,
2022-12-10 00:55:36 +01:00
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"WARNING:absl:`lr` is deprecated, please use `learning_rate` instead, or use the legacy optimizer, e.g.,tf.keras.optimizers.legacy.SGD.\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Model: \"sequential\"\n",
"_________________________________________________________________\n",
" Layer (type) Output Shape Param # \n",
"=================================================================\n",
" conv2d (Conv2D) (None, 55, 55, 96) 34944 \n",
" \n",
" max_pooling2d (MaxPooling2D (None, 27, 27, 96) 0 \n",
" ) \n",
" \n",
" conv2d_1 (Conv2D) (None, 27, 27, 256) 614656 \n",
" \n",
" max_pooling2d_1 (MaxPooling (None, 13, 13, 256) 0 \n",
" 2D) \n",
" \n",
" conv2d_2 (Conv2D) (None, 13, 13, 384) 885120 \n",
" \n",
" conv2d_3 (Conv2D) (None, 13, 13, 384) 1327488 \n",
" \n",
" conv2d_4 (Conv2D) (None, 13, 13, 256) 884992 \n",
" \n",
" max_pooling2d_2 (MaxPooling (None, 6, 6, 256) 0 \n",
" 2D) \n",
" \n",
" flatten (Flatten) (None, 9216) 0 \n",
" \n",
" dense (Dense) (None, 4096) 37752832 \n",
" \n",
" dropout (Dropout) (None, 4096) 0 \n",
" \n",
" dense_1 (Dense) (None, 4096) 16781312 \n",
" \n",
" dropout_1 (Dropout) (None, 4096) 0 \n",
" \n",
" dense_2 (Dense) (None, 10) 40970 \n",
" \n",
"=================================================================\n",
"Total params: 58,322,314\n",
"Trainable params: 58,322,314\n",
"Non-trainable params: 0\n",
"_________________________________________________________________\n"
]
}
],
"source": [
"model_flat_drop.compile(loss='sparse_categorical_crossentropy', optimizer=tf.optimizers.SGD(lr=.001), metrics=['accuracy'])\n",
"model_flat_drop.summary()"
]
},
{
"cell_type": "code",
2023-01-06 21:43:41 +01:00
"execution_count": 15,
2022-12-10 00:55:36 +01:00
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
2023-01-06 21:43:41 +01:00
"WARNING:tensorflow:`period` argument is deprecated. Please use `save_freq` to specify the frequency in number of batches seen.\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"WARNING:tensorflow:`period` argument is deprecated. Please use `save_freq` to specify the frequency in number of batches seen.\n",
"/var/folders/6b/j4d60ym516x2s6wymzj707rh0000gn/T/ipykernel_13671/1946638494.py:6: UserWarning: `Model.fit_generator` is deprecated and will be removed in a future version. Please use `Model.fit`, which supports generators.\n",
" alex1 = model_flat_drop.fit_generator(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Epoch 1/25\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"2023-01-06 21:33:12.260921: W tensorflow/tsl/platform/profile_utils/cpu_utils.cc:128] Failed to get CPU frequency: 0 Hz\n"
2022-12-10 00:55:36 +01:00
]
},
{
2023-01-06 21:43:41 +01:00
"name": "stdout",
"output_type": "stream",
"text": [
"25/25 [==============================] - ETA: 0s - loss: 2.2671 - accuracy: 0.1963\n",
"Epoch 1: val_accuracy improved from -inf to 0.20312, saving model to alex_1.h5\n",
"25/25 [==============================] - 24s 939ms/step - loss: 2.2671 - accuracy: 0.1963 - val_loss: 2.2120 - val_accuracy: 0.2031\n",
"Epoch 2/25\n",
"25/25 [==============================] - ETA: 0s - loss: 2.0757 - accuracy: 0.1875\n",
"Epoch 2: val_accuracy improved from 0.20312 to 0.28125, saving model to alex_1.h5\n",
"25/25 [==============================] - 22s 899ms/step - loss: 2.0757 - accuracy: 0.1875 - val_loss: 1.7334 - val_accuracy: 0.2812\n",
"Epoch 3/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.7064 - accuracy: 0.2100\n",
"Epoch 3: val_accuracy did not improve from 0.28125\n",
"25/25 [==============================] - 23s 940ms/step - loss: 1.7064 - accuracy: 0.2100 - val_loss: 1.6128 - val_accuracy: 0.2656\n",
"Epoch 4/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.6449 - accuracy: 0.2537\n",
"Epoch 4: val_accuracy improved from 0.28125 to 0.34896, saving model to alex_1.h5\n",
"25/25 [==============================] - 23s 918ms/step - loss: 1.6449 - accuracy: 0.2537 - val_loss: 1.5930 - val_accuracy: 0.3490\n",
"Epoch 5/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.6596 - accuracy: 0.2275\n",
"Epoch 5: val_accuracy did not improve from 0.34896\n",
"25/25 [==============================] - 23s 928ms/step - loss: 1.6596 - accuracy: 0.2275 - val_loss: 1.5650 - val_accuracy: 0.2865\n",
"Epoch 6/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.6292 - accuracy: 0.2625\n",
"Epoch 6: val_accuracy did not improve from 0.34896\n",
"25/25 [==============================] - 23s 935ms/step - loss: 1.6292 - accuracy: 0.2625 - val_loss: 1.5573 - val_accuracy: 0.3021\n",
"Epoch 7/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.6197 - accuracy: 0.2562\n",
"Epoch 7: val_accuracy did not improve from 0.34896\n",
"25/25 [==============================] - 23s 929ms/step - loss: 1.6197 - accuracy: 0.2562 - val_loss: 1.5328 - val_accuracy: 0.3125\n",
"Epoch 8/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.5907 - accuracy: 0.2975\n",
"Epoch 8: val_accuracy improved from 0.34896 to 0.36458, saving model to alex_1.h5\n",
"25/25 [==============================] - 24s 943ms/step - loss: 1.5907 - accuracy: 0.2975 - val_loss: 1.4958 - val_accuracy: 0.3646\n",
"Epoch 9/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.5715 - accuracy: 0.2962\n",
"Epoch 9: val_accuracy improved from 0.36458 to 0.40104, saving model to alex_1.h5\n",
"25/25 [==============================] - 24s 944ms/step - loss: 1.5715 - accuracy: 0.2962 - val_loss: 1.4821 - val_accuracy: 0.4010\n",
"Epoch 10/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.5357 - accuracy: 0.3162\n",
"Epoch 10: val_accuracy did not improve from 0.40104\n",
"25/25 [==============================] - 23s 937ms/step - loss: 1.5357 - accuracy: 0.3162 - val_loss: 1.4562 - val_accuracy: 0.3958\n",
"Epoch 11/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.5030 - accuracy: 0.3262\n",
"Epoch 11: val_accuracy improved from 0.40104 to 0.45833, saving model to alex_1.h5\n",
"25/25 [==============================] - 24s 970ms/step - loss: 1.5030 - accuracy: 0.3262 - val_loss: 1.4106 - val_accuracy: 0.4583\n",
"Epoch 12/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.4862 - accuracy: 0.3613\n",
"Epoch 12: val_accuracy improved from 0.45833 to 0.53125, saving model to alex_1.h5\n",
"25/25 [==============================] - 25s 1s/step - loss: 1.4862 - accuracy: 0.3613 - val_loss: 1.3597 - val_accuracy: 0.5312\n",
"Epoch 13/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.4194 - accuracy: 0.4162\n",
"Epoch 13: val_accuracy did not improve from 0.53125\n",
"25/25 [==============================] - 24s 974ms/step - loss: 1.4194 - accuracy: 0.4162 - val_loss: 1.3095 - val_accuracy: 0.4583\n",
"Epoch 14/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.3418 - accuracy: 0.4437\n",
"Epoch 14: val_accuracy did not improve from 0.53125\n",
"25/25 [==============================] - 24s 959ms/step - loss: 1.3418 - accuracy: 0.4437 - val_loss: 1.2787 - val_accuracy: 0.4792\n",
"Epoch 15/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.3059 - accuracy: 0.4675\n",
"Epoch 15: val_accuracy did not improve from 0.53125\n",
"25/25 [==============================] - 24s 951ms/step - loss: 1.3059 - accuracy: 0.4675 - val_loss: 1.2374 - val_accuracy: 0.4635\n",
"Epoch 16/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.2688 - accuracy: 0.4725\n",
"Epoch 16: val_accuracy did not improve from 0.53125\n",
"25/25 [==============================] - 24s 955ms/step - loss: 1.2688 - accuracy: 0.4725 - val_loss: 1.2178 - val_accuracy: 0.4583\n",
"Epoch 17/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.2209 - accuracy: 0.4875\n",
"Epoch 17: val_accuracy did not improve from 0.53125\n",
"25/25 [==============================] - 24s 958ms/step - loss: 1.2209 - accuracy: 0.4875 - val_loss: 1.2793 - val_accuracy: 0.3958\n",
"Epoch 18/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.1457 - accuracy: 0.5150\n",
"Epoch 18: val_accuracy improved from 0.53125 to 0.55729, saving model to alex_1.h5\n",
"25/25 [==============================] - 24s 980ms/step - loss: 1.1457 - accuracy: 0.5150 - val_loss: 1.0978 - val_accuracy: 0.5573\n",
"Epoch 19/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.1318 - accuracy: 0.5063\n",
"Epoch 19: val_accuracy did not improve from 0.55729\n",
"25/25 [==============================] - 27s 1s/step - loss: 1.1318 - accuracy: 0.5063 - val_loss: 1.0764 - val_accuracy: 0.5104\n",
"Epoch 20/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.1289 - accuracy: 0.5125\n",
"Epoch 20: val_accuracy improved from 0.55729 to 0.56771, saving model to alex_1.h5\n",
"25/25 [==============================] - 25s 1s/step - loss: 1.1289 - accuracy: 0.5125 - val_loss: 1.0067 - val_accuracy: 0.5677\n",
"Epoch 21/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.0175 - accuracy: 0.5638\n",
"Epoch 21: val_accuracy did not improve from 0.56771\n",
"25/25 [==============================] - 26s 1s/step - loss: 1.0175 - accuracy: 0.5638 - val_loss: 1.0095 - val_accuracy: 0.5625\n",
"Epoch 22/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.0559 - accuracy: 0.5288\n",
"Epoch 22: val_accuracy did not improve from 0.56771\n",
"25/25 [==============================] - 26s 1s/step - loss: 1.0559 - accuracy: 0.5288 - val_loss: 1.0557 - val_accuracy: 0.5208\n",
"Epoch 23/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.1151 - accuracy: 0.5412\n",
"Epoch 23: val_accuracy did not improve from 0.56771\n",
"25/25 [==============================] - 26s 1s/step - loss: 1.1151 - accuracy: 0.5412 - val_loss: 1.0837 - val_accuracy: 0.5052\n",
"Epoch 24/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.0158 - accuracy: 0.5625\n",
"Epoch 24: val_accuracy improved from 0.56771 to 0.58333, saving model to alex_1.h5\n",
"25/25 [==============================] - 28s 1s/step - loss: 1.0158 - accuracy: 0.5625 - val_loss: 0.9605 - val_accuracy: 0.5833\n",
"Epoch 25/25\n",
2023-01-07 00:39:16 +01:00
"25/25 [==============================] - ETA: 0s - loss: 0.9619 - accuracy: 0.5750\n",
"Epoch 25: val_accuracy did not improve from 0.58333\n",
"25/25 [==============================] - 28s 1s/step - loss: 0.9619 - accuracy: 0.5750 - val_loss: 1.4147 - val_accuracy: 0.3906\n"
2023-01-06 21:43:41 +01:00
]
2022-12-10 00:55:36 +01:00
}
],
"source": [
2023-01-06 21:43:41 +01:00
"from keras.callbacks import ModelCheckpoint, EarlyStopping\n",
"\n",
"checkpoint = ModelCheckpoint(\"alex_1.h5\", monitor='val_accuracy', verbose=1, save_best_only=True, save_weights_only=False, mode='auto', period=1)\n",
"early = EarlyStopping(monitor='val_accuracy', min_delta=0, patience=20, verbose=1, mode='auto')\n",
"\n",
"alex1 = model_flat_drop.fit_generator(\n",
" steps_per_epoch=len(train_ds), \n",
" generator=train_ds, \n",
" validation_data= validation_ds, \n",
" validation_steps=len(validation_ds), \n",
" epochs=25, \n",
" callbacks=[checkpoint,early])"
2022-12-10 00:55:36 +01:00
]
},
{
"cell_type": "code",
2023-01-07 00:39:16 +01:00
"execution_count": 16,
2022-12-10 00:55:36 +01:00
"metadata": {},
"outputs": [
{
2023-01-07 00:39:16 +01:00
"data": {
"image/png": "iVBORw0KGgoAAAANSUhEUgAAAjwAAAHHCAYAAAC7soLdAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjYuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8o6BhiAAAACXBIWXMAAA9hAAAPYQGoP6dpAACZAUlEQVR4nOzdd1yVdf/H8dc5BzjspciSIYqKihN3bsuRluXKNDVH911qma27X1ba3mnZTiXLbWk7U9xbVNwTERCQIXtzzrl+fxw9ijg4ChzAz/N+8IBzzc8xb8+b7/UdKkVRFIQQQgghajG1pQsQQgghhKhsEniEEEIIUetJ4BFCCCFErSeBRwghhBC1ngQeIYQQQtR6EniEEEIIUetJ4BFCCCFErSeBRwghhBC1ngQeIYQQQtR6EniEEBVGpVIxa9Yss887d+4cKpWK8PDwCq9JCCFAAo8QtU54eDgqlQqVSsW2bdvK7FcUBT8/P1QqFYMGDbJAhUIIUfUk8AhRS9na2rJkyZIy2zdv3sz58+fRarUWqEoIISxDAo8QtdTAgQNZuXIlOp2u1PYlS5bQrl07vLy8LFTZ3SMvL8/SJQghLpHAI0QtNWrUKC5evMi6detM24qLi1m1ahWPPvrodc/Jy8vjueeew8/PD61WS5MmTfjoo49QFKXUcUVFRTz77LN4eHjg5OTEAw88wPnz5697zYSEBCZMmICnpydarZbmzZuzYMGC23pP6enpPP/884SGhuLo6IizszMDBgzg4MGDZY4tLCxk1qxZNG7cGFtbW7y9vXn44YeJjo42HWMwGJg7dy6hoaHY2tri4eFB//79iYyMBG7et+ja/kqzZs1CpVJx7NgxHn30Udzc3LjnnnsAOHToEOPHjycoKAhbW1u8vLyYMGECFy9evO6f18SJE/Hx8UGr1dKgQQOefPJJiouLOXv2LCqVik8//bTMeTt27EClUrF06VJz/1iFuCtYWboAIUTlCAwMpHPnzixdupQBAwYA8Pfff5OVlcUjjzzCZ599Vup4RVF44IEH2LhxIxMnTqR169asXbuWF154gYSEhFIfspMmTeKnn37i0UcfpUuXLmzYsIH777+/TA3Jycl06tQJlUrF1KlT8fDw4O+//2bixIlkZ2czffp0s97T2bNnWbNmDcOHD6dBgwYkJyfzzTff0KNHD44dO4aPjw8Aer2eQYMGERERwSOPPMIzzzxDTk4O69at48iRIzRs2BCAiRMnEh4ezoABA5g0aRI6nY6tW7eya9cuwsLCzKrtsuHDhxMcHMw777xjCorr1q3j7NmzPP7443h5eXH06FG+/fZbjh49yq5du1CpVAAkJibSoUMHMjMzeeKJJ2jatCkJCQmsWrWK/Px8goKC6Nq1K4sXL+bZZ58tdd/Fixfj5OTEgw8+eFt1C1HrKUKIWmXhwoUKoOzdu1eZN2+e4uTkpOTn5yuKoijDhw9XevXqpSiKogQEBCj333+/6bw1a9YogPLWW2+Vut6wYcMUlUqlnDlzRlEURYmKilIA5amnnip13KOPPqoAyuuvv27aNnHiRMXb21tJS0srdewjjzyiuLi4mOqKiYlRAGXhwoU3fW+FhYWKXq8vtS0mJkbRarXKG2+8Ydq2YMECBVA++eSTMtcwGAyKoijKhg0bFEB5+umnb3jMzeq69r2+/vrrCqCMGjWqzLGX3+fVli5dqgDKli1bTNvGjh2rqNVqZe/evTes6ZtvvlEA5fjx46Z9xcXFSt26dZVx48aVOU8IYSSPtISoxUaMGEFBQQF//PEHOTk5/PHHHzd8nPXXX3+h0Wh4+umnS21/7rnnUBSFv//+23QcUOa4a1trFEXh559/ZvDgwSiKQlpamumrX79+ZGVlsX//frPej1arRa02/rOl1+u5ePEijo6ONGnSpNS1fv75Z+rWrcu0adPKXONya8rPP/+MSqXi9ddfv+Ext+O///1vmW12dnamnwsLC0lLS6NTp04AproNBgNr1qxh8ODB121dulzTiBEjsLW1ZfHixaZ9a9euJS0tjTFjxtx23ULUdhJ4hKjFPDw86Nu3L0uWLOGXX35Br9czbNiw6x4bGxuLj48PTk5OpbaHhISY9l/+rlarTY+FLmvSpEmp16mpqWRmZvLtt9/i4eFR6uvxxx8HICUlxaz3YzAY+PTTTwkODkar1VK3bl08PDw4dOgQWVlZpuOio6Np0qQJVlY3fmofHR2Nj48P7u7uZtVwKw0aNCizLT09nWeeeQZPT0/s7Ozw8PAwHXe57tTUVLKzs2nRosVNr+/q6srgwYNLjcBbvHgxvr6+9O7duwLfiRC1i/ThEaKWe/TRR5k8eTIXLlxgwIABuLq6Vsl9DQYDAGPGjGHcuHHXPaZly5ZmXfOdd97h1VdfZcKECbz55pu4u7ujVquZPn266X4V6UYtPXq9/obnXN2ac9mIESPYsWMHL7zwAq1bt8bR0RGDwUD//v1vq+6xY8eycuVKduzYQWhoKL/99htPPfWUqfVLCFGWBB4harmHHnqI//znP+zatYvly5ff8LiAgADWr19PTk5OqVaeEydOmPZf/m4wGEytKJedPHmy1PUuj+DS6/X07du3Qt7LqlWr6NWrF/Pnzy+1PTMzk7p165peN2zYkN27d1NSUoK1tfV1r9WwYUPWrl1Lenr6DVt53NzcTNe/2uXWrvLIyMggIiKC2bNn89prr5m2nz59utRxHh4eODs7c+TIkVtes3///nh4eLB48WI6duxIfn4+jz32WLlrEuJuJL8OCFHLOTo68tVXXzFr1iwGDx58w+MGDhyIXq9n3rx5pbZ/+umnqFQq00ivy9+vHeU1Z86cUq81Gg1Dhw7l559/vu6HeGpqqtnvRaPRlBkiv3LlShISEkptGzp0KGlpaWXeC2A6f+jQoSiKwuzZs294jLOzM3Xr1mXLli2l9n/55Zdm1Xz1NS+79s9LrVYzZMgQfv/9d9Ow+OvVBGBlZcWoUaNYsWIF4eHhhIaGmt1aJsTdRlp4hLgL3OiR0tUGDx5Mr169eOWVVzh37hytWrXi33//5ddff2X69OmmPjutW7dm1KhRfPnll2RlZdGlSxciIiI4c+ZMmWu+9957bNy4kY4dOzJ58mSaNWtGeno6+/fvZ/369aSnp5v1PgYNGsQbb7zB448/TpcuXTh8+DCLFy8mKCio1HFjx45l0aJFzJgxgz179tCtWzfy8vJYv349Tz31FA8++CC9evXiscce47PPPuP06dOmx0tbt26lV69eTJ06FTAOwX/vvfeYNGkSYWFhbNmyhVOnTpW7ZmdnZ7p3784HH3xASUkJvr6+/Pvvv8TExJQ59p133uHff/+lR48ePPHEE4SEhJCUlMTKlSvZtm1bqceRY8eO5bPPPmPjxo28//77Zv05CnFXstj4MCFEpbh6WPrNXDssXVEUJScnR3n22WcVHx8fxdraWgkODlY+/PBD05DoywoKCpSnn35aqVOnjuLg4KAMHjxYiY+PLzNUW1EUJTk5WZkyZYri5+enWFtbK15eXkqfPn2Ub7/91nSMOcPSn3vuOcXb21uxs7NTunbtquzcuVPp0aOH0qNHj1LH5ufnK6+88orSoEED032HDRumREdHm47R6XTKhx9+qDRt2lSxsbFRPDw8lAEDBij79u0rdZ2JEycqLi4uipOTkzJixAglJSXlhsPSU1NTy9R9/vx55aGHHlJcXV0VFxcXZfjw4UpiYuJ1/7xiY2OVsWPHKh4eHopWq1WCgoKUKVOmKEVFRWWu27x5c0WtVivnz5+/6Z+bEEJRVIpyTTurEEKIGqFNmza4u7sTERFh6VKEqPakD48QQtRAkZGRREVFMXbsWEuXIkSNIC08QghRgxw5coR9+/bx8ccfk5aWxtmzZ7G1tbV0WUJUe9LCI4QQNciqVat4/PHHKSkpYenSpRJ2hCgnaeERQgghRK0nLTxCCCGEqPUk8AghhBCi1rvrJh40GAwkJibi5OR0RysiCyGEEKLqKIpCTk4OPj4+t7Vu3F0XeBITE/Hz87N0GUIIIYS4DfHx8dSvX9/s8+66wHN5UcT
"text/plain": [
"<Figure size 640x480 with 1 Axes>"
]
},
"metadata": {},
"output_type": "display_data"
2023-01-06 21:43:41 +01:00
}
],
"source": [
"import matplotlib.pyplot as plt\n",
"plt.plot(alex1.history[\"accuracy\"])\n",
"plt.plot(alex1.history['val_accuracy'])\n",
"plt.plot(alex1.history['loss'])\n",
"plt.plot(alex1.history['val_loss'])\n",
"plt.title(\"Model accuracy\")\n",
"plt.ylabel(\"Value\")\n",
"plt.xlabel(\"Epoch\")\n",
"plt.legend([\"Accuracy\",\"Validation Accuracy\",\"Loss\",\"Validation Loss\"])\n",
"plt.show()"
]
},
{
"cell_type": "code",
2023-01-07 00:39:16 +01:00
"execution_count": 18,
2023-01-06 21:43:41 +01:00
"metadata": {},
"outputs": [
2022-12-10 00:55:36 +01:00
{
2023-01-07 00:39:16 +01:00
"name": "stdout",
"output_type": "stream",
"text": [
"8/8 [==============================] - 2s 218ms/step - loss: 1.4086 - accuracy: 0.4141\n"
2023-01-06 21:43:41 +01:00
]
2023-01-07 00:39:16 +01:00
},
{
"data": {
"text/plain": [
"[1.4086337089538574, 0.4140625]"
]
},
"execution_count": 18,
"metadata": {},
"output_type": "execute_result"
2022-12-10 00:55:36 +01:00
}
],
"source": [
2023-01-07 00:39:16 +01:00
"model_flat_drop.evaluate(test_ds)"
2022-12-10 00:55:36 +01:00
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Do warstw maxpooling"
]
},
{
"cell_type": "code",
2023-01-07 00:39:16 +01:00
"execution_count": 19,
2022-12-10 00:55:36 +01:00
"metadata": {},
2023-01-07 00:39:16 +01:00
"outputs": [],
2022-12-10 00:55:36 +01:00
"source": [
"model_pool_drop = keras.models.Sequential([\n",
" keras.layers.Conv2D(filters=96, kernel_size=(11,11), strides=(4,4), activation='relu', input_shape=(227,227,3)),\n",
" keras.layers.MaxPool2D(pool_size=(3,3), strides=(2,2)),\n",
" keras.layers.Dropout(.5),\n",
" keras.layers.Conv2D(filters=256, kernel_size=(5,5), strides=(1,1), activation='relu', padding=\"same\"),\n",
" keras.layers.MaxPool2D(pool_size=(3,3), strides=(2,2)),\n",
" keras.layers.Dropout(.5),\n",
" keras.layers.Conv2D(filters=384, kernel_size=(3,3), strides=(1,1), activation='relu', padding=\"same\"),\n",
" keras.layers.Conv2D(filters=384, kernel_size=(3,3), strides=(1,1), activation='relu', padding=\"same\"),\n",
" keras.layers.Conv2D(filters=256, kernel_size=(3,3), strides=(1,1), activation='relu', padding=\"same\"),\n",
" keras.layers.MaxPool2D(pool_size=(3,3), strides=(2,2)),\n",
" keras.layers.Dropout(.5),\n",
" keras.layers.Flatten(),\n",
" keras.layers.Dense(4096, activation='relu'),\n",
" keras.layers.Dense(4096, activation='relu'),\n",
" keras.layers.Dense(10, activation='softmax')\n",
"])"
]
},
{
"cell_type": "code",
2023-01-07 00:39:16 +01:00
"execution_count": 20,
2022-12-10 00:55:36 +01:00
"metadata": {},
"outputs": [
{
2023-01-07 00:39:16 +01:00
"name": "stderr",
"output_type": "stream",
"text": [
"WARNING:absl:`lr` is deprecated, please use `learning_rate` instead, or use the legacy optimizer, e.g.,tf.keras.optimizers.legacy.SGD.\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Model: \"sequential_1\"\n",
"_________________________________________________________________\n",
" Layer (type) Output Shape Param # \n",
"=================================================================\n",
" conv2d_5 (Conv2D) (None, 55, 55, 96) 34944 \n",
" \n",
" max_pooling2d_3 (MaxPooling (None, 27, 27, 96) 0 \n",
" 2D) \n",
" \n",
" dropout_2 (Dropout) (None, 27, 27, 96) 0 \n",
" \n",
" conv2d_6 (Conv2D) (None, 27, 27, 256) 614656 \n",
" \n",
" max_pooling2d_4 (MaxPooling (None, 13, 13, 256) 0 \n",
" 2D) \n",
" \n",
" dropout_3 (Dropout) (None, 13, 13, 256) 0 \n",
" \n",
" conv2d_7 (Conv2D) (None, 13, 13, 384) 885120 \n",
" \n",
" conv2d_8 (Conv2D) (None, 13, 13, 384) 1327488 \n",
" \n",
" conv2d_9 (Conv2D) (None, 13, 13, 256) 884992 \n",
" \n",
" max_pooling2d_5 (MaxPooling (None, 6, 6, 256) 0 \n",
" 2D) \n",
" \n",
" dropout_4 (Dropout) (None, 6, 6, 256) 0 \n",
" \n",
" flatten_1 (Flatten) (None, 9216) 0 \n",
" \n",
" dense_3 (Dense) (None, 4096) 37752832 \n",
" \n",
" dense_4 (Dense) (None, 4096) 16781312 \n",
" \n",
" dense_5 (Dense) (None, 10) 40970 \n",
" \n",
"=================================================================\n",
"Total params: 58,322,314\n",
"Trainable params: 58,322,314\n",
"Non-trainable params: 0\n",
"_________________________________________________________________\n"
2022-12-10 00:55:36 +01:00
]
}
],
"source": [
"model_pool_drop.compile(loss='sparse_categorical_crossentropy', optimizer=tf.optimizers.SGD(lr=.001), metrics=['accuracy'])\n",
"model_pool_drop.summary()"
]
},
{
"cell_type": "code",
2023-01-07 00:39:16 +01:00
"execution_count": 21,
2022-12-10 00:55:36 +01:00
"metadata": {},
"outputs": [
{
2023-01-07 00:39:16 +01:00
"name": "stdout",
"output_type": "stream",
"text": [
"WARNING:tensorflow:`period` argument is deprecated. Please use `save_freq` to specify the frequency in number of batches seen.\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"WARNING:tensorflow:`period` argument is deprecated. Please use `save_freq` to specify the frequency in number of batches seen.\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Epoch 1/25\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/var/folders/6b/j4d60ym516x2s6wymzj707rh0000gn/T/ipykernel_13671/3758035572.py:4: UserWarning: `Model.fit_generator` is deprecated and will be removed in a future version. Please use `Model.fit`, which supports generators.\n",
" alex2 = model_pool_drop.fit_generator(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"25/25 [==============================] - ETA: 0s - loss: 2.0517 - accuracy: 0.1963\n",
"Epoch 1: val_accuracy improved from -inf to 0.26042, saving model to alex_2.h5\n",
"25/25 [==============================] - 24s 926ms/step - loss: 2.0517 - accuracy: 0.1963 - val_loss: 1.8585 - val_accuracy: 0.2604\n",
"Epoch 2/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.6898 - accuracy: 0.2300\n",
"Epoch 2: val_accuracy improved from 0.26042 to 0.30208, saving model to alex_2.h5\n",
"25/25 [==============================] - 23s 937ms/step - loss: 1.6898 - accuracy: 0.2300 - val_loss: 1.7242 - val_accuracy: 0.3021\n",
"Epoch 3/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.6539 - accuracy: 0.2275\n",
"Epoch 3: val_accuracy did not improve from 0.30208\n",
"25/25 [==============================] - 23s 942ms/step - loss: 1.6539 - accuracy: 0.2275 - val_loss: 1.7515 - val_accuracy: 0.2552\n",
"Epoch 4/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.6148 - accuracy: 0.2775\n",
"Epoch 4: val_accuracy did not improve from 0.30208\n",
"25/25 [==============================] - 24s 971ms/step - loss: 1.6148 - accuracy: 0.2775 - val_loss: 1.7084 - val_accuracy: 0.2812\n",
"Epoch 5/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.5876 - accuracy: 0.3013\n",
"Epoch 5: val_accuracy did not improve from 0.30208\n",
"25/25 [==============================] - 24s 947ms/step - loss: 1.5876 - accuracy: 0.3013 - val_loss: 1.6701 - val_accuracy: 0.2344\n",
"Epoch 6/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.5765 - accuracy: 0.2962\n",
"Epoch 6: val_accuracy improved from 0.30208 to 0.34896, saving model to alex_2.h5\n",
"25/25 [==============================] - 22s 894ms/step - loss: 1.5765 - accuracy: 0.2962 - val_loss: 1.6380 - val_accuracy: 0.3490\n",
"Epoch 7/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.5710 - accuracy: 0.2825\n",
"Epoch 7: val_accuracy improved from 0.34896 to 0.36979, saving model to alex_2.h5\n",
"25/25 [==============================] - 22s 865ms/step - loss: 1.5710 - accuracy: 0.2825 - val_loss: 1.6219 - val_accuracy: 0.3698\n",
"Epoch 8/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.5406 - accuracy: 0.3275\n",
"Epoch 8: val_accuracy did not improve from 0.36979\n",
"25/25 [==============================] - 22s 872ms/step - loss: 1.5406 - accuracy: 0.3275 - val_loss: 1.6149 - val_accuracy: 0.3646\n",
"Epoch 9/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.4844 - accuracy: 0.3537\n",
"Epoch 9: val_accuracy did not improve from 0.36979\n",
"25/25 [==============================] - 22s 879ms/step - loss: 1.4844 - accuracy: 0.3537 - val_loss: 1.5673 - val_accuracy: 0.3490\n",
"Epoch 10/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.4884 - accuracy: 0.3462\n",
"Epoch 10: val_accuracy improved from 0.36979 to 0.41146, saving model to alex_2.h5\n",
"25/25 [==============================] - 23s 911ms/step - loss: 1.4884 - accuracy: 0.3462 - val_loss: 1.5698 - val_accuracy: 0.4115\n",
"Epoch 11/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.4408 - accuracy: 0.3887\n",
"Epoch 11: val_accuracy did not improve from 0.41146\n",
"25/25 [==============================] - 22s 897ms/step - loss: 1.4408 - accuracy: 0.3887 - val_loss: 1.5205 - val_accuracy: 0.4115\n",
"Epoch 12/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.3852 - accuracy: 0.4250\n",
"Epoch 12: val_accuracy did not improve from 0.41146\n",
"25/25 [==============================] - 23s 905ms/step - loss: 1.3852 - accuracy: 0.4250 - val_loss: 1.5540 - val_accuracy: 0.3594\n",
"Epoch 13/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.3202 - accuracy: 0.4663\n",
"Epoch 13: val_accuracy did not improve from 0.41146\n",
"25/25 [==============================] - 23s 906ms/step - loss: 1.3202 - accuracy: 0.4663 - val_loss: 1.3669 - val_accuracy: 0.4115\n",
"Epoch 14/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.2614 - accuracy: 0.4700\n",
"Epoch 14: val_accuracy improved from 0.41146 to 0.44792, saving model to alex_2.h5\n",
"25/25 [==============================] - 23s 917ms/step - loss: 1.2614 - accuracy: 0.4700 - val_loss: 1.3723 - val_accuracy: 0.4479\n",
"Epoch 15/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.1812 - accuracy: 0.4900\n",
"Epoch 15: val_accuracy did not improve from 0.44792\n",
"25/25 [==============================] - 23s 931ms/step - loss: 1.1812 - accuracy: 0.4900 - val_loss: 1.4332 - val_accuracy: 0.3854\n",
"Epoch 16/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.1327 - accuracy: 0.5113\n",
"Epoch 16: val_accuracy did not improve from 0.44792\n",
"25/25 [==============================] - 23s 908ms/step - loss: 1.1327 - accuracy: 0.5113 - val_loss: 1.4481 - val_accuracy: 0.3802\n",
"Epoch 17/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.0848 - accuracy: 0.5462\n",
"Epoch 17: val_accuracy did not improve from 0.44792\n",
"25/25 [==============================] - 23s 915ms/step - loss: 1.0848 - accuracy: 0.5462 - val_loss: 1.6393 - val_accuracy: 0.3594\n",
"Epoch 18/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.1003 - accuracy: 0.5462\n",
"Epoch 18: val_accuracy did not improve from 0.44792\n",
"25/25 [==============================] - 23s 915ms/step - loss: 1.1003 - accuracy: 0.5462 - val_loss: 1.9934 - val_accuracy: 0.3333\n",
"Epoch 19/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.0956 - accuracy: 0.5437\n",
"Epoch 19: val_accuracy improved from 0.44792 to 0.47917, saving model to alex_2.h5\n",
"25/25 [==============================] - 24s 951ms/step - loss: 1.0956 - accuracy: 0.5437 - val_loss: 1.1398 - val_accuracy: 0.4792\n",
"Epoch 20/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.0014 - accuracy: 0.5688\n",
"Epoch 20: val_accuracy did not improve from 0.47917\n",
"25/25 [==============================] - 24s 976ms/step - loss: 1.0014 - accuracy: 0.5688 - val_loss: 1.2802 - val_accuracy: 0.4062\n",
"Epoch 21/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.1812 - accuracy: 0.5213\n",
"Epoch 21: val_accuracy did not improve from 0.47917\n",
"25/25 [==============================] - 25s 994ms/step - loss: 1.1812 - accuracy: 0.5213 - val_loss: 1.2117 - val_accuracy: 0.4219\n",
"Epoch 22/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.1199 - accuracy: 0.5362\n",
"Epoch 22: val_accuracy did not improve from 0.47917\n",
"25/25 [==============================] - 25s 1s/step - loss: 1.1199 - accuracy: 0.5362 - val_loss: 1.1858 - val_accuracy: 0.4531\n",
"Epoch 23/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.0079 - accuracy: 0.5700\n",
"Epoch 23: val_accuracy did not improve from 0.47917\n",
"25/25 [==============================] - 25s 1s/step - loss: 1.0079 - accuracy: 0.5700 - val_loss: 1.2529 - val_accuracy: 0.4219\n",
"Epoch 24/25\n",
"25/25 [==============================] - ETA: 0s - loss: 0.9996 - accuracy: 0.5750\n",
"Epoch 24: val_accuracy did not improve from 0.47917\n",
"25/25 [==============================] - 25s 1s/step - loss: 0.9996 - accuracy: 0.5750 - val_loss: 1.1984 - val_accuracy: 0.4427\n",
"Epoch 25/25\n",
"25/25 [==============================] - ETA: 0s - loss: 0.9713 - accuracy: 0.5825\n",
"Epoch 25: val_accuracy improved from 0.47917 to 0.51042, saving model to alex_2.h5\n",
"25/25 [==============================] - 25s 1s/step - loss: 0.9713 - accuracy: 0.5825 - val_loss: 1.0454 - val_accuracy: 0.5104\n"
2022-12-10 00:55:36 +01:00
]
}
],
"source": [
2023-01-06 21:43:41 +01:00
"checkpoint = ModelCheckpoint(\"alex_2.h5\", monitor='val_accuracy', verbose=1, save_best_only=True, save_weights_only=False, mode='auto', period=1)\n",
"early = EarlyStopping(monitor='val_accuracy', min_delta=0, patience=20, verbose=1, mode='auto')\n",
"\n",
"alex2 = model_pool_drop.fit_generator(\n",
" steps_per_epoch=len(train_ds), \n",
" generator=train_ds, \n",
" validation_data= validation_ds, \n",
" validation_steps=len(validation_ds), \n",
" epochs=25, \n",
" callbacks=[checkpoint,early])"
2022-12-10 00:55:36 +01:00
]
},
{
"cell_type": "code",
2023-01-07 00:39:16 +01:00
"execution_count": 22,
2022-12-10 00:55:36 +01:00
"metadata": {},
2022-12-10 11:12:06 +01:00
"outputs": [
{
2023-01-07 00:39:16 +01:00
"data": {
"image/png": "iVBORw0KGgoAAAANSUhEUgAAAkUAAAHHCAYAAACx7iyPAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjYuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8o6BhiAAAACXBIWXMAAA9hAAAPYQGoP6dpAACx4UlEQVR4nOzdd3xT1fvA8U+SpnvTDaWDLVs2yBKUvWXJXooKMhQRUQH9/lyAgKK4gIqyZYvsLXsP2aUU6ITSprtpk/v7oxCplFFom7Q879crrzb3npzz3BKap+eeoVIURUEIIYQQ4hmnNncAQgghhBCWQJIiIYQQQggkKRJCCCGEACQpEkIIIYQAJCkSQgghhAAkKRJCCCGEACQpEkIIIYQAJCkSQgghhAAkKRJCCCGEACQpEkIUMpVKxeTJk/P8uqtXr6JSqQgJCcn3mIQQAiQpEuKZFBISgkqlQqVS8ffff993XlEU/P39UalUtG/f3gwRCiFE4ZOkSIhnmK2tLYsWLbrv+K5du7hx4wY2NjZmiEoIIcxDkiIhnmFt27Zl+fLlZGVl5Ti+aNEiatWqhY+Pj5kie3akpKSYOwQhxB2SFAnxDOvduzdxcXFs2bLFdEyv1/PHH3/w6quv5vqalJQU3nnnHfz9/bGxsaFChQpMmzYNRVFylMvIyGDMmDF4enri5OREx44duXHjRq51RkREMHjwYLy9vbGxsaFy5crMmzfvia7p9u3bvPvuu1StWhVHR0ecnZ1p06YNJ0+evK9seno6kydPpnz58tja2uLr60vXrl0JDQ01lTEajcyaNYuqVatia2uLp6cnrVu35siRI8DDxzr9d/zU5MmTUalUnD17lldffRU3NzdeeOEFAE6dOsXAgQMJDg7G1tYWHx8fBg8eTFxcXK4/ryFDhuDn54eNjQ1BQUG88cYb6PV6rly5gkqlYsaMGfe9bt++fahUKhYvXpzXH6sQzwQrcwcghDCfwMBAGjRowOLFi2nTpg0AGzZsQKfT0atXL7755psc5RVFoWPHjuzYsYMhQ4ZQo0YNNm3axLhx44iIiMjxQTx06FB+//13Xn31VRo2bMj27dtp167dfTHExMRQv359VCoVI0aMwNPTkw0bNjBkyBASExMZPXp0nq7pypUrrF69mu7duxMUFERMTAw//vgjTZs25ezZs/j5+QFgMBho374927Zto1evXowaNYqkpCS2bNnCmTNnKFOmDABDhgwhJCSENm3aMHToULKystizZw8HDhygdu3aeYrtru7du1OuXDk+++wzUzK5ZcsWrly5wqBBg/Dx8eGff/7hp59+4p9//uHAgQOoVCoAIiMjqVu3LgkJCbz22mtUrFiRiIgI/vjjD1JTUwkODqZRo0YsXLiQMWPG5Gh34cKFODk50alTpyeKW4hiTxFCPHPmz5+vAMrhw4eV2bNnK05OTkpqaqqiKIrSvXt3pXnz5oqiKEpAQIDSrl070+tWr16tAMr//ve/HPW98sorikqlUi5fvqwoiqKcOHFCAZQ333wzR7lXX31VAZRJkyaZjg0ZMkTx9fVVbt26laNsr169FBcXF1NcYWFhCqDMnz//odeWnp6uGAyGHMfCwsIUGxsb5ZNPPjEdmzdvngIoX3/99X11GI1GRVEUZfv27QqgvP322w8s87C4/nutkyZNUgCld+/e95W9e533Wrx4sQIou3fvNh3r37+/olarlcOHDz8wph9//FEBlHPnzpnO6fV6xcPDQxkwYMB9rxNCZJPbZ0I843r06EFaWhp//vknSUlJ/Pnnnw+8dfbXX3+h0Wh4++23cxx/5513UBSFDRs2mMoB95X7b6+PoiisWLGCDh06oCgKt27dMj1atWqFTqfj2LFjeboeGxsb1OrsX20Gg4G4uDgcHR2pUKFCjrpWrFiBh4cHI0eOvK+Ou70yK1asQKVSMWnSpAeWeRLDhw+/75idnZ3p+/T0dG7dukX9+vUBTHEbjUZWr15Nhw4dcu2luhtTjx49sLW1ZeHChaZzmzZt4tatW/Tt2/eJ4xaiuJOkSIhnnKenJy1btmTRokWsXLkSg8HAK6+8kmvZ8PBw/Pz8cHJyynG8UqVKpvN3v6rVatMtqLsqVKiQ4/nNmzdJSEjgp59+wtPTM8dj0KBBAMTGxubpeoxGIzNmzKBcuXLY2Njg4eGBp6cnp06dQqfTmcqFhoZSoUIFrKwePIogNDQUPz8/3N3d8xTDowQFBd137Pbt24waNQpvb2/s7Ozw9PQ0lbsb982bN0lMTKRKlSoPrd/V1ZUOHTrkmFm4cOFCSpYsyYsvvpiPVyJE8SJjioQQvPrqqwwbNozo6GjatGmDq6trobRrNBoB6Nu3LwMGDMi1TLVq1fJU52effcZHH33E4MGD+fTTT3F3d0etVjN69GhTe/npQT1GBoPhga+5t1forh49erBv3z7GjRtHjRo1cHR0xGg00rp16yeKu3///ixfvpx9+/ZRtWpV1q5dy5tvvmnqRRNC3E+SIiEEXbp04fXXX+fAgQMsXbr0geUCAgLYunUrSUlJOXqLzp8/bzp/96vRaDT1xtx14cKFHPXdnZlmMBho2bJlvlzLH3/8QfPmzZk7d26O4wkJCXh4eJielylThoMHD5KZmYlWq821rjJlyrBp0yZu3779wN4iNzc3U/33uttr9jji4+PZtm0bU6ZM4eOPPzYdv3TpUo5ynp6eODs7c+bMmUfW2bp1azw9PVm4cCH16tUjNTWVfv36PXZMQjyL5E8GIQSOjo7MmTOHyZMn06FDhweWa9u2LQaDgdmzZ+c4PmPGDFQqlWkG292v/529NnPmzBzPNRoN3bp1Y8WKFbl+0N+8eTPP16LRaO5bHmD58uVERETkONatWzdu3bp137UAptd369YNRVGYMmXKA8s4Ozvj4eHB7t27c5z//vvv8xTzvXXe9d+fl1qtpnPnzqxbt860JEBuMQFYWVnRu3dvli1bRkhICFWrVs1zr5sQzxrpKRJCADzw9tW9OnToQPPmzZk4cSJXr16levXqbN68mTVr1jB69GjTGKIaNWrQu3dvvv/+e3Q6HQ0bNmTbtm1cvnz5vjq/+OILduzYQb169Rg2bBjPPfcct2/f5tixY2zdupXbt2/n6Trat2/PJ598wqBBg2jYsCGnT59m4cKFBAcH5yjXv39/FixYwNixYzl06BCNGzcmJSWFrVu38uabb9KpUyeaN29Ov379+Oabb7h06ZLpVtaePXto3rw5I0aMALKXH/jiiy8YOnQotWvXZvfu3Vy8ePGxY3Z2dqZJkyZ89dVXZGZmUrJkSTZv3kxYWNh9ZT/77DM2b95M06ZNee2116hUqRJRUVEsX76cv//+O8etz/79+/PNN9+wY8cOvvzyyzz9HIV4Jplt3psQwmzunZL/MP+dkq8oipKUlKSMGTNG8fPzU7RarVKuXDll6tSppungd6WlpSlvv/22UqJECcXBwUHp0KGDcv369fumqSuKosTExChvvfWW4u/vr2i1WsXHx0dp0aKF8tNPP5nK5GVK/jvvvKP4+voqdnZ2SqNGjZT9+/crTZs2VZo2bZqjbGpqqjJx4kQlKCjI1O4rr7yihIaGmspkZWUpU6dOVSpWrKhYW1srnp6eSps2bZSjR4/mqGfIkCGKi4uL4uTkpPTo0UOJjY194JT8mzdv3hf3jRs3lC5duiiurq6Ki4uL0r17dyUyMjLXn1d4eLjSv39/xdPTU7GxsVGCg4OVt956S8nIyLiv3sqVKytqtVq5cePGQ39uQghFUSnKf/prhRBCFBs1a9bE3d2dbdu2mTsUISyejCkSQohi6siRI5w4cYL+/fubOxQhigTpKRJCiGLmzJkzHD16lOnTp3Pr1i2uXLmCra2tucMSwuJJT5EQQhQzf/zxB4MGDSIzM5PFixdLQiTEY5KeIiGEEEIIpKdICCGEEAKQpEgIIYQQApDFG3NlNBqJjIzEycnpqXbCFkIIIUThURSFpKQk/Pz8nmifP0mKchEZGYm/v7+5wxBCCCHEE7h+/TqlSpXK8+skKcrF3Y0ur1+/jrOzs5mjEUIIIcT
"text/plain": [
"<Figure size 640x480 with 1 Axes>"
]
},
"metadata": {},
"output_type": "display_data"
2023-01-06 21:43:41 +01:00
}
],
"source": [
"plt.plot(alex2.history[\"accuracy\"])\n",
"plt.plot(alex2.history['val_accuracy'])\n",
"plt.plot(alex2.history['loss'])\n",
"plt.plot(alex2.history['val_loss'])\n",
"plt.title(\"Model accuracy\")\n",
"plt.ylabel(\"Value\")\n",
"plt.xlabel(\"Epoch\")\n",
"plt.legend([\"Accuracy\",\"Validation Accuracy\",\"Loss\",\"Validation Loss\"])\n",
"plt.show()"
]
},
{
"cell_type": "code",
2023-01-07 00:39:16 +01:00
"execution_count": 23,
2023-01-06 21:43:41 +01:00
"metadata": {},
"outputs": [
2022-12-10 11:12:06 +01:00
{
2023-01-07 00:39:16 +01:00
"name": "stdout",
"output_type": "stream",
"text": [
"8/8 [==============================] - 2s 265ms/step - loss: 1.0271 - accuracy: 0.5391\n"
2023-01-06 21:43:41 +01:00
]
2023-01-07 00:39:16 +01:00
},
{
"data": {
"text/plain": [
"[1.0271097421646118, 0.5390625]"
]
},
"execution_count": 23,
"metadata": {},
"output_type": "execute_result"
2022-12-10 11:12:06 +01:00
}
],
2022-12-10 00:55:36 +01:00
"source": [
2023-01-07 00:39:16 +01:00
"model_pool_drop.evaluate(test_ds)"
2022-12-10 00:55:36 +01:00
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Do warstw splotowych"
]
},
{
"cell_type": "code",
2023-01-07 00:39:16 +01:00
"execution_count": 24,
2022-12-10 00:55:36 +01:00
"metadata": {},
2023-01-07 00:39:16 +01:00
"outputs": [],
2022-12-10 00:55:36 +01:00
"source": [
"model_conv_drop = keras.models.Sequential([\n",
" keras.layers.Conv2D(filters=96, kernel_size=(11,11), strides=(4,4), activation='relu', input_shape=(227,227,3)),\n",
" keras.layers.Dropout(.5),\n",
" keras.layers.MaxPool2D(pool_size=(3,3), strides=(2,2)),\n",
" keras.layers.Conv2D(filters=256, kernel_size=(5,5), strides=(1,1), activation='relu', padding=\"same\"),\n",
" keras.layers.Dropout(.5),\n",
" keras.layers.MaxPool2D(pool_size=(3,3), strides=(2,2)),\n",
" keras.layers.Conv2D(filters=384, kernel_size=(3,3), strides=(1,1), activation='relu', padding=\"same\"),\n",
" keras.layers.Dropout(.5),\n",
" keras.layers.Conv2D(filters=384, kernel_size=(3,3), strides=(1,1), activation='relu', padding=\"same\"),\n",
" keras.layers.Dropout(.5),\n",
" keras.layers.Conv2D(filters=256, kernel_size=(3,3), strides=(1,1), activation='relu', padding=\"same\"),\n",
" keras.layers.Dropout(.5),\n",
" keras.layers.MaxPool2D(pool_size=(3,3), strides=(2,2)),\n",
" keras.layers.Flatten(),\n",
" keras.layers.Dense(4096, activation='relu'),\n",
" keras.layers.Dense(4096, activation='relu'),\n",
" keras.layers.Dense(10, activation='softmax')\n",
"])"
]
},
{
"cell_type": "code",
2023-01-07 00:39:16 +01:00
"execution_count": 25,
2022-12-10 00:55:36 +01:00
"metadata": {},
2022-12-10 11:12:06 +01:00
"outputs": [
{
2023-01-07 00:39:16 +01:00
"name": "stderr",
"output_type": "stream",
"text": [
"WARNING:absl:`lr` is deprecated, please use `learning_rate` instead, or use the legacy optimizer, e.g.,tf.keras.optimizers.legacy.SGD.\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Model: \"sequential_2\"\n",
"_________________________________________________________________\n",
" Layer (type) Output Shape Param # \n",
"=================================================================\n",
" conv2d_10 (Conv2D) (None, 55, 55, 96) 34944 \n",
" \n",
" dropout_5 (Dropout) (None, 55, 55, 96) 0 \n",
" \n",
" max_pooling2d_6 (MaxPooling (None, 27, 27, 96) 0 \n",
" 2D) \n",
" \n",
" conv2d_11 (Conv2D) (None, 27, 27, 256) 614656 \n",
" \n",
" dropout_6 (Dropout) (None, 27, 27, 256) 0 \n",
" \n",
" max_pooling2d_7 (MaxPooling (None, 13, 13, 256) 0 \n",
" 2D) \n",
" \n",
" conv2d_12 (Conv2D) (None, 13, 13, 384) 885120 \n",
" \n",
" dropout_7 (Dropout) (None, 13, 13, 384) 0 \n",
" \n",
" conv2d_13 (Conv2D) (None, 13, 13, 384) 1327488 \n",
" \n",
" dropout_8 (Dropout) (None, 13, 13, 384) 0 \n",
" \n",
" conv2d_14 (Conv2D) (None, 13, 13, 256) 884992 \n",
" \n",
" dropout_9 (Dropout) (None, 13, 13, 256) 0 \n",
" \n",
" max_pooling2d_8 (MaxPooling (None, 6, 6, 256) 0 \n",
" 2D) \n",
" \n",
" flatten_2 (Flatten) (None, 9216) 0 \n",
" \n",
" dense_6 (Dense) (None, 4096) 37752832 \n",
" \n",
" dense_7 (Dense) (None, 4096) 16781312 \n",
" \n",
" dense_8 (Dense) (None, 10) 40970 \n",
" \n",
"=================================================================\n",
"Total params: 58,322,314\n",
"Trainable params: 58,322,314\n",
"Non-trainable params: 0\n",
"_________________________________________________________________\n"
2022-12-10 11:12:06 +01:00
]
}
],
2022-12-10 00:55:36 +01:00
"source": [
"model_conv_drop.compile(loss='sparse_categorical_crossentropy', optimizer=tf.optimizers.SGD(lr=.001), metrics=['accuracy'])\n",
"model_conv_drop.summary()"
]
},
{
"cell_type": "code",
2023-01-07 00:39:16 +01:00
"execution_count": 26,
2022-12-10 00:55:36 +01:00
"metadata": {},
2022-12-10 11:12:06 +01:00
"outputs": [
{
2023-01-07 00:39:16 +01:00
"name": "stdout",
"output_type": "stream",
"text": [
"WARNING:tensorflow:`period` argument is deprecated. Please use `save_freq` to specify the frequency in number of batches seen.\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"WARNING:tensorflow:`period` argument is deprecated. Please use `save_freq` to specify the frequency in number of batches seen.\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Epoch 1/25\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/var/folders/6b/j4d60ym516x2s6wymzj707rh0000gn/T/ipykernel_13671/3866647797.py:4: UserWarning: `Model.fit_generator` is deprecated and will be removed in a future version. Please use `Model.fit`, which supports generators.\n",
" alex3 = model_conv_drop.fit_generator(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"25/25 [==============================] - ETA: 0s - loss: 1.8090 - accuracy: 0.2450\n",
"Epoch 1: val_accuracy improved from -inf to 0.21354, saving model to alex_3.h5\n",
"25/25 [==============================] - 26s 1s/step - loss: 1.8090 - accuracy: 0.2450 - val_loss: 2.1443 - val_accuracy: 0.2135\n",
"Epoch 2/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.6573 - accuracy: 0.2738\n",
"Epoch 2: val_accuracy improved from 0.21354 to 0.40104, saving model to alex_3.h5\n",
"25/25 [==============================] - 25s 1s/step - loss: 1.6573 - accuracy: 0.2738 - val_loss: 2.1381 - val_accuracy: 0.4010\n",
"Epoch 3/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.5349 - accuracy: 0.3413\n",
"Epoch 3: val_accuracy did not improve from 0.40104\n",
"25/25 [==============================] - 25s 1s/step - loss: 1.5349 - accuracy: 0.3413 - val_loss: 2.0752 - val_accuracy: 0.2760\n",
"Epoch 4/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.4963 - accuracy: 0.3688\n",
"Epoch 4: val_accuracy did not improve from 0.40104\n",
"25/25 [==============================] - 25s 1s/step - loss: 1.4963 - accuracy: 0.3688 - val_loss: 2.0778 - val_accuracy: 0.2760\n",
"Epoch 5/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.3579 - accuracy: 0.4112\n",
"Epoch 5: val_accuracy improved from 0.40104 to 0.48958, saving model to alex_3.h5\n",
"25/25 [==============================] - 26s 1s/step - loss: 1.3579 - accuracy: 0.4112 - val_loss: 1.9411 - val_accuracy: 0.4896\n",
"Epoch 6/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.2882 - accuracy: 0.4512\n",
"Epoch 6: val_accuracy did not improve from 0.48958\n",
"25/25 [==============================] - 25s 1s/step - loss: 1.2882 - accuracy: 0.4512 - val_loss: 1.8212 - val_accuracy: 0.4323\n",
"Epoch 7/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.1601 - accuracy: 0.5163\n",
"Epoch 7: val_accuracy did not improve from 0.48958\n",
"25/25 [==============================] - 25s 1s/step - loss: 1.1601 - accuracy: 0.5163 - val_loss: 1.7429 - val_accuracy: 0.3802\n",
"Epoch 8/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.2260 - accuracy: 0.4950\n",
"Epoch 8: val_accuracy did not improve from 0.48958\n",
"25/25 [==============================] - 26s 1s/step - loss: 1.2260 - accuracy: 0.4950 - val_loss: 1.8061 - val_accuracy: 0.3490\n",
"Epoch 9/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.1188 - accuracy: 0.5200\n",
"Epoch 9: val_accuracy did not improve from 0.48958\n",
"25/25 [==============================] - 27s 1s/step - loss: 1.1188 - accuracy: 0.5200 - val_loss: 1.7995 - val_accuracy: 0.3177\n",
"Epoch 10/25\n",
"25/25 [==============================] - ETA: 0s - loss: 0.9879 - accuracy: 0.5950\n",
"Epoch 10: val_accuracy did not improve from 0.48958\n",
"25/25 [==============================] - 27s 1s/step - loss: 0.9879 - accuracy: 0.5950 - val_loss: 1.8887 - val_accuracy: 0.1875\n",
"Epoch 11/25\n",
"25/25 [==============================] - ETA: 0s - loss: 0.9848 - accuracy: 0.5800\n",
"Epoch 11: val_accuracy did not improve from 0.48958\n",
"25/25 [==============================] - 26s 1s/step - loss: 0.9848 - accuracy: 0.5800 - val_loss: 1.7492 - val_accuracy: 0.3073\n",
"Epoch 12/25\n",
"25/25 [==============================] - ETA: 0s - loss: 0.9861 - accuracy: 0.6100\n",
"Epoch 12: val_accuracy did not improve from 0.48958\n",
"25/25 [==============================] - 26s 1s/step - loss: 0.9861 - accuracy: 0.6100 - val_loss: 1.6876 - val_accuracy: 0.3646\n",
"Epoch 13/25\n",
"25/25 [==============================] - ETA: 0s - loss: 0.9351 - accuracy: 0.6075\n",
"Epoch 13: val_accuracy improved from 0.48958 to 0.51562, saving model to alex_3.h5\n",
"25/25 [==============================] - 27s 1s/step - loss: 0.9351 - accuracy: 0.6075 - val_loss: 1.5044 - val_accuracy: 0.5156\n",
"Epoch 14/25\n",
"25/25 [==============================] - ETA: 0s - loss: 0.9683 - accuracy: 0.6125\n",
"Epoch 14: val_accuracy did not improve from 0.51562\n",
"25/25 [==============================] - 28s 1s/step - loss: 0.9683 - accuracy: 0.6125 - val_loss: 1.5911 - val_accuracy: 0.4375\n",
"Epoch 15/25\n",
"25/25 [==============================] - ETA: 0s - loss: 0.9354 - accuracy: 0.6037\n",
"Epoch 15: val_accuracy did not improve from 0.51562\n",
"25/25 [==============================] - 28s 1s/step - loss: 0.9354 - accuracy: 0.6037 - val_loss: 1.6423 - val_accuracy: 0.3698\n",
"Epoch 16/25\n",
"25/25 [==============================] - ETA: 0s - loss: 0.8270 - accuracy: 0.6800\n",
"Epoch 16: val_accuracy did not improve from 0.51562\n",
"25/25 [==============================] - 30s 1s/step - loss: 0.8270 - accuracy: 0.6800 - val_loss: 1.6960 - val_accuracy: 0.2708\n",
"Epoch 17/25\n",
"25/25 [==============================] - ETA: 0s - loss: 0.8327 - accuracy: 0.6488\n",
"Epoch 17: val_accuracy did not improve from 0.51562\n",
"25/25 [==============================] - 30s 1s/step - loss: 0.8327 - accuracy: 0.6488 - val_loss: 1.6061 - val_accuracy: 0.3646\n",
"Epoch 18/25\n",
"25/25 [==============================] - ETA: 0s - loss: 0.8175 - accuracy: 0.6625\n",
"Epoch 18: val_accuracy did not improve from 0.51562\n",
"25/25 [==============================] - 27s 1s/step - loss: 0.8175 - accuracy: 0.6625 - val_loss: 1.5903 - val_accuracy: 0.4531\n",
"Epoch 19/25\n",
"25/25 [==============================] - ETA: 0s - loss: 0.7260 - accuracy: 0.7063\n",
"Epoch 19: val_accuracy did not improve from 0.51562\n",
"25/25 [==============================] - 29s 1s/step - loss: 0.7260 - accuracy: 0.7063 - val_loss: 1.4000 - val_accuracy: 0.4896\n",
"Epoch 20/25\n",
"25/25 [==============================] - ETA: 0s - loss: 0.7956 - accuracy: 0.6587\n",
"Epoch 20: val_accuracy did not improve from 0.51562\n",
"25/25 [==============================] - 28s 1s/step - loss: 0.7956 - accuracy: 0.6587 - val_loss: 1.6044 - val_accuracy: 0.4010\n",
"Epoch 21/25\n",
"25/25 [==============================] - ETA: 0s - loss: 0.8474 - accuracy: 0.6625\n",
"Epoch 21: val_accuracy did not improve from 0.51562\n",
"25/25 [==============================] - 28s 1s/step - loss: 0.8474 - accuracy: 0.6625 - val_loss: 1.5974 - val_accuracy: 0.3490\n",
"Epoch 22/25\n",
"25/25 [==============================] - ETA: 0s - loss: 0.6524 - accuracy: 0.7175\n",
"Epoch 22: val_accuracy did not improve from 0.51562\n",
"25/25 [==============================] - 27s 1s/step - loss: 0.6524 - accuracy: 0.7175 - val_loss: 1.5435 - val_accuracy: 0.3594\n",
"Epoch 23/25\n",
"25/25 [==============================] - ETA: 0s - loss: 0.8152 - accuracy: 0.6725\n",
"Epoch 23: val_accuracy did not improve from 0.51562\n",
"25/25 [==============================] - 26s 1s/step - loss: 0.8152 - accuracy: 0.6725 - val_loss: 1.8228 - val_accuracy: 0.2656\n",
"Epoch 24/25\n",
"25/25 [==============================] - ETA: 0s - loss: 0.8200 - accuracy: 0.6725\n",
"Epoch 24: val_accuracy did not improve from 0.51562\n",
"25/25 [==============================] - 29s 1s/step - loss: 0.8200 - accuracy: 0.6725 - val_loss: 1.5864 - val_accuracy: 0.3854\n",
"Epoch 25/25\n",
"25/25 [==============================] - ETA: 0s - loss: 0.7701 - accuracy: 0.6825\n",
"Epoch 25: val_accuracy did not improve from 0.51562\n",
"25/25 [==============================] - 27s 1s/step - loss: 0.7701 - accuracy: 0.6825 - val_loss: 1.4605 - val_accuracy: 0.5104\n"
2022-12-10 11:12:06 +01:00
]
}
],
2022-12-10 00:55:36 +01:00
"source": [
2023-01-06 21:43:41 +01:00
"checkpoint = ModelCheckpoint(\"alex_3.h5\", monitor='val_accuracy', verbose=1, save_best_only=True, save_weights_only=False, mode='auto', period=1)\n",
"early = EarlyStopping(monitor='val_accuracy', min_delta=0, patience=20, verbose=1, mode='auto')\n",
"\n",
"alex3 = model_conv_drop.fit_generator(\n",
" steps_per_epoch=len(train_ds), \n",
" generator=train_ds, \n",
" validation_data= validation_ds, \n",
" validation_steps=len(validation_ds), \n",
" epochs=25, \n",
" callbacks=[checkpoint,early])"
2022-12-10 00:55:36 +01:00
]
},
{
"cell_type": "code",
2023-01-07 00:39:16 +01:00
"execution_count": 27,
2022-12-10 00:55:36 +01:00
"metadata": {},
2022-12-10 11:12:06 +01:00
"outputs": [
{
2023-01-07 00:39:16 +01:00
"data": {
"image/png": "iVBORw0KGgoAAAANSUhEUgAAAkUAAAHHCAYAAACx7iyPAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjYuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8o6BhiAAAACXBIWXMAAA9hAAAPYQGoP6dpAAC8r0lEQVR4nOzdd3gUxRvA8e/epfeEdAih9yrSpQlKE+kdaQGVXqSIIEUpIlJEEAWByA/pTRSlhab0YkAEkRIgCWmk93K3vz8ip5HQk1zK+3mefcLtzs68GwL7Zmd2RlFVVUUIIYQQoojTGDsAIYQQQoj8QJIiIYQQQggkKRJCCCGEACQpEkIIIYQAJCkSQgghhAAkKRJCCCGEACQpEkIIIYQAJCkSQgghhAAkKRJCCCGEACQpEkLkMUVRmDlz5jOfd/v2bRRFwdfXN8djEkIIkKRIiCLJ19cXRVFQFIVff/31oeOqquLl5YWiKLzxxhtGiFAIIfKeJEVCFGEWFhZs2LDhof1Hjx4lKCgIc3NzI0QlhBDGIUmREEVYu3bt2Lp1KxkZGVn2b9iwgTp16uDu7m6kyIqOxMREY4cghPibJEVCFGG9e/cmMjKSAwcOGPalpaWxbds2+vTpk+05iYmJvPfee3h5eWFubk7FihX57LPPUFU1S7nU1FTGjRuHi4sLtra2vPnmmwQFBWVbZ3BwMIMHD8bNzQ1zc3OqVq3KmjVrnuuaoqKimDBhAtWrV8fGxgY7Ozvatm3LxYsXHyqbkpLCzJkzqVChAhYWFnh4eNClSxdu3rxpKKPX6/n888+pXr06FhYWuLi40KZNG86dOwc8fqzTf8dPzZw5E0VRuHLlCn369MHR0ZFXXnkFgEuXLjFw4EDKlCmDhYUF7u7uDB48mMjIyGy/Xz4+Pnh6emJubk7p0qUZNmwYaWlp3Lp1C0VRWLx48UPnnThxAkVR2Lhx47N+W4UoEkyMHYAQwnhKlSpFw4YN2bhxI23btgXg559/JjY2ll69erF06dIs5VVV5c033+Tw4cP4+PhQq1Yt9u3bx8SJEwkODs5yIx4yZAjr16+nT58+NGrUiEOHDtG+ffuHYggLC6NBgwYoisLIkSNxcXHh559/xsfHh7i4OMaOHftM13Tr1i127dpF9+7dKV26NGFhYXz99dc0a9aMK1eu4OnpCYBOp+ONN97Az8+PXr16MWbMGOLj4zlw4ACXL1+mbNmyAPj4+ODr60vbtm0ZMmQIGRkZ/PLLL5w6dYqXX375mWJ7oHv37pQvX565c+cakskDBw5w69YtBg0ahLu7O3/88QcrV67kjz/+4NSpUyiKAsC9e/eoV68eMTExvP3221SqVIng4GC2bdtGUlISZcqUoXHjxnz33XeMGzcuS7vfffcdtra2dOzY8bniFqLQU4UQRc7atWtVQD179qy6bNky1dbWVk1KSlJVVVW7d++utmjRQlVVVfX29lbbt29vOG/Xrl0qoM6ePTtLfd26dVMVRVFv3Lihqqqq+vv7q4A6fPjwLOX69OmjAuqMGTMM+3x8fFQPDw/1/v37Wcr26tVLtbe3N8QVEBCgAuratWsfe20pKSmqTqfLsi8gIEA1NzdXP/roI8O+NWvWqIC6aNGih+rQ6/WqqqrqoUOHVEAdPXr0I8s8Lq7/XuuMGTNUQO3du/dDZR9c579t3LhRBdRjx44Z9vXv31/VaDTq2bNnHxnT119/rQLq1atXDcfS0tJUZ2dndcCAAQ+dJ4TIJN1nQhRxPXr0IDk5mR9//JH4+Hh+/PHHR3ad/fTTT2i1WkaPHp1l/3vvvYeqqvz888+GcsBD5f771EdVVbZv306HDh1QVZX79+8bttatWxMbG8uFCxee6XrMzc3RaDL/a9PpdERGRmJjY0PFihWz1LV9+3acnZ0ZNWrUQ3U8eCqzfft2FEVhxowZjyzzPN59992H9llaWhr+nJKSwv3792nQoAGAIW69Xs+uXbvo0KFDtk+pHsTUo0cPLCws+O677wzH9u3bx/379+nXr99zxy1EYSdJkRBFnIuLC61atWLDhg3s2LEDnU5Ht27dsi17584dPD09sbW1zbK/cuXKhuMPvmo0GkMX1AMVK1bM8jkiIoKYmBhWrlyJi4tLlm3QoEEAhIeHP9P16PV6Fi9eTPny5TE3N8fZ2RkXFxcuXbpEbGysodzNmzepWLEiJiaPHkVw8+ZNPD09cXJyeqYYnqR06dIP7YuKimLMmDG4ublhaWmJi4uLodyDuCMiIoiLi6NatWqPrd/BwYEOHTpkebPwu+++o3jx4rz66qs5eCVCFC4ypkgIQZ8+fRg6dCihoaG0bdsWBweHPGlXr9cD0K9fPwYMGJBtmRo1ajxTnXPnzuXDDz9k8ODBfPzxxzg5OaHRaBg7dqyhvZz0qCdGOp3ukef8+6nQAz169ODEiRNMnDiRWrVqYWNjg16vp02bNs8Vd//+/dm6dSsnTpygevXq7N69m+HDhxueogkhHiZJkRCCzp07884773Dq1Ck2b978yHLe3t4cPHiQ+Pj4LE+L/vzzT8PxB1/1er3hacwD165dy1LfgzfTdDodrVq1ypFr2bZtGy1atGD16tVZ9sfExODs7Gz4XLZsWU6fPk16ejqmpqbZ1lW2bFn27dtHVFTUI58WOTo6Gur/twdPzZ5GdHQ0fn5+zJo1i+nTpxv2X79+PUs5FxcX7OzsuHz58hPrbNOmDS4uLnz33XfUr1+fpKQk3nrrraeOSYiiSH5lEEJgY2PDihUrmDlzJh06dHhkuXbt2qHT6Vi2bFmW/YsXL0ZRFMMbbA++/vfttSVLlmT5rNVq6dq1K9u3b8/2Rh8REfHM16LVah+aHmDr1q0EBwdn2de1a1fu37//0LUAhvO7du2KqqrMmjXrkWXs7Oxwdnbm2LFjWY5/+eWXzxTzv+t84L/fL41GQ6dOnfjhhx8MUwJkFxOAiYkJvXv3ZsuWLfj6+lK9evVnfuomRFEjT4qEEACP7L76tw4dOtCiRQumTp3K7du3qVmzJvv37+f7779n7NixhjFEtWrVonfv3nz55ZfExsbSqFEj/Pz8uHHjxkN1fvLJJxw+fJj69eszdOhQqlSpQlRUFBcuXODgwYNERUU903W88cYbfPTRRwwaNIhGjRrx+++/891331GmTJks5fr378+6desYP348Z86coUmTJiQmJnLw4EGGDx9Ox44dadGiBW+99RZLly7l+vXrhq6sX375hRYtWjBy5Eggc/qBTz75hCFDhvDyyy9z7Ngx/vrrr6eO2c7OjqZNm/Lpp5+Snp5O8eLF2b9/PwEBAQ+VnTt3Lvv376dZs2a8/fbbVK5cmZCQELZu3cqvv/6apeuzf//+LF26lMOHDzN//vxn+j4KUSQZ7b03IYTR/PuV/Mf57yv5qqqq8fHx6rhx41RPT0/V1NRULV++vLpgwQLD6+APJCcnq6NHj1aLFSumWltbqx06dFADAwMfek1dVVU1LCxMHTFihOrl5aWampqq7u7uasuWLdWVK1cayjzLK/nvvfee6uHhoVpaWqqNGzdWT548qTZr1kxt1qxZlrJJSUnq1KlT1dKlSxva7datm3rz5k1DmYyMDHXBggVqpUqVVDMzM9XFxUVt27atev78+Sz1+Pj4qPb29qqtra3ao0cPNTw8/JGv5EdERDwUd1BQkNq5c2fVwcFBtbe3V7t3767eu3cv2+/XnTt31P79+6suLi6qubm5WqZMGXXEiBFqamrqQ/VWrVpV1Wg0alBQ0GO/b0IIVVVU9T/Pa4UQQhQatWvXxsnJCT8/P2OHIkS+J2OKhBCikDp37hz+/v7079/f2KEIUSDIkyIhhChkLl++zPnz51m4cCH379/n1q1bWFhYGDssIfI9eVIkhBCFzLZt2xg0aBDp6els3LhREiIhnpI8KRJCCCGEQJ4UCSGEEEIAkhQJIYQQQgAyeWO29Ho99+7dw9bW9oVWwhZCCCFE3lFVlfj4eDw9PZ9rnT9JirJx7949vLy8jB2GEEIIIZ5DYGAgJUqUeObzJCnKxoOFLgMDA7GzszNyNEIIIYR4GnFxcXh
"text/plain": [
"<Figure size 640x480 with 1 Axes>"
]
},
"metadata": {},
"output_type": "display_data"
2023-01-06 21:43:41 +01:00
}
],
"source": [
"plt.plot(alex3.history[\"accuracy\"])\n",
"plt.plot(alex3.history['val_accuracy'])\n",
"plt.plot(alex3.history['loss'])\n",
"plt.plot(alex3.history['val_loss'])\n",
"plt.title(\"Model accuracy\")\n",
"plt.ylabel(\"Value\")\n",
"plt.xlabel(\"Epoch\")\n",
"plt.legend([\"Accuracy\",\"Validation Accuracy\",\"Loss\",\"Validation Loss\"])\n",
"plt.show()"
]
},
{
"cell_type": "code",
2023-01-07 00:39:16 +01:00
"execution_count": 28,
2023-01-06 21:43:41 +01:00
"metadata": {},
"outputs": [
2022-12-10 11:12:06 +01:00
{
2023-01-07 00:39:16 +01:00
"name": "stdout",
"output_type": "stream",
"text": [
"8/8 [==============================] - 2s 280ms/step - loss: 1.4843 - accuracy: 0.4570\n"
2023-01-06 21:43:41 +01:00
]
2023-01-07 00:39:16 +01:00
},
{
"data": {
"text/plain": [
"[1.4843157529830933, 0.45703125]"
]
},
"execution_count": 28,
"metadata": {},
"output_type": "execute_result"
2022-12-10 11:12:06 +01:00
}
],
2022-12-10 00:55:36 +01:00
"source": [
2023-01-07 00:39:16 +01:00
"model_conv_drop.evaluate(test_ds)"
2022-12-10 00:55:36 +01:00
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Do warstw spłaszczonych i maxpooling"
]
},
{
"cell_type": "code",
2023-01-07 00:39:16 +01:00
"execution_count": 29,
2022-12-10 00:55:36 +01:00
"metadata": {},
2023-01-07 00:39:16 +01:00
"outputs": [],
2022-12-10 00:55:36 +01:00
"source": [
"model_flat_pool_drop = keras.models.Sequential([\n",
" keras.layers.Conv2D(filters=96, kernel_size=(11,11), strides=(4,4), activation='relu', input_shape=(227,227,3)),\n",
" keras.layers.MaxPool2D(pool_size=(3,3), strides=(2,2)),\n",
" keras.layers.Dropout(.5),\n",
" keras.layers.Conv2D(filters=256, kernel_size=(5,5), strides=(1,1), activation='relu', padding=\"same\"),\n",
" keras.layers.MaxPool2D(pool_size=(3,3), strides=(2,2)),\n",
" keras.layers.Dropout(.5),\n",
" keras.layers.Conv2D(filters=384, kernel_size=(3,3), strides=(1,1), activation='relu', padding=\"same\"),\n",
" keras.layers.Conv2D(filters=384, kernel_size=(3,3), strides=(1,1), activation='relu', padding=\"same\"),\n",
" keras.layers.Conv2D(filters=256, kernel_size=(3,3), strides=(1,1), activation='relu', padding=\"same\"),\n",
" keras.layers.MaxPool2D(pool_size=(3,3), strides=(2,2)),\n",
" keras.layers.Dropout(.5),\n",
" keras.layers.Flatten(),\n",
" keras.layers.Dense(4096, activation='relu'),\n",
" keras.layers.Dropout(.5),\n",
" keras.layers.Dense(4096, activation='relu'),\n",
" keras.layers.Dropout(.5),\n",
" keras.layers.Dense(10, activation='softmax')\n",
"])"
]
},
{
"cell_type": "code",
2023-01-07 00:39:16 +01:00
"execution_count": 30,
2022-12-10 00:55:36 +01:00
"metadata": {},
2022-12-10 11:12:06 +01:00
"outputs": [
{
2023-01-07 00:39:16 +01:00
"name": "stderr",
"output_type": "stream",
"text": [
"WARNING:absl:`lr` is deprecated, please use `learning_rate` instead, or use the legacy optimizer, e.g.,tf.keras.optimizers.legacy.SGD.\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Model: \"sequential_3\"\n",
"_________________________________________________________________\n",
" Layer (type) Output Shape Param # \n",
"=================================================================\n",
" conv2d_15 (Conv2D) (None, 55, 55, 96) 34944 \n",
" \n",
" max_pooling2d_9 (MaxPooling (None, 27, 27, 96) 0 \n",
" 2D) \n",
" \n",
" dropout_10 (Dropout) (None, 27, 27, 96) 0 \n",
" \n",
" conv2d_16 (Conv2D) (None, 27, 27, 256) 614656 \n",
" \n",
" max_pooling2d_10 (MaxPoolin (None, 13, 13, 256) 0 \n",
" g2D) \n",
" \n",
" dropout_11 (Dropout) (None, 13, 13, 256) 0 \n",
" \n",
" conv2d_17 (Conv2D) (None, 13, 13, 384) 885120 \n",
" \n",
" conv2d_18 (Conv2D) (None, 13, 13, 384) 1327488 \n",
" \n",
" conv2d_19 (Conv2D) (None, 13, 13, 256) 884992 \n",
" \n",
" max_pooling2d_11 (MaxPoolin (None, 6, 6, 256) 0 \n",
" g2D) \n",
" \n",
" dropout_12 (Dropout) (None, 6, 6, 256) 0 \n",
" \n",
" flatten_3 (Flatten) (None, 9216) 0 \n",
" \n",
" dense_9 (Dense) (None, 4096) 37752832 \n",
" \n",
" dropout_13 (Dropout) (None, 4096) 0 \n",
" \n",
" dense_10 (Dense) (None, 4096) 16781312 \n",
" \n",
" dropout_14 (Dropout) (None, 4096) 0 \n",
" \n",
" dense_11 (Dense) (None, 10) 40970 \n",
" \n",
"=================================================================\n",
"Total params: 58,322,314\n",
"Trainable params: 58,322,314\n",
"Non-trainable params: 0\n",
"_________________________________________________________________\n"
2022-12-10 11:12:06 +01:00
]
}
],
2022-12-10 00:55:36 +01:00
"source": [
"model_flat_pool_drop.compile(loss='sparse_categorical_crossentropy', optimizer=tf.optimizers.SGD(lr=.001), metrics=['accuracy'])\n",
"model_flat_pool_drop.summary()"
]
},
{
"cell_type": "code",
2023-01-07 00:39:16 +01:00
"execution_count": 31,
2022-12-10 00:55:36 +01:00
"metadata": {},
2022-12-10 11:12:06 +01:00
"outputs": [
{
2023-01-07 00:39:16 +01:00
"name": "stdout",
"output_type": "stream",
"text": [
"WARNING:tensorflow:`period` argument is deprecated. Please use `save_freq` to specify the frequency in number of batches seen.\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"WARNING:tensorflow:`period` argument is deprecated. Please use `save_freq` to specify the frequency in number of batches seen.\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Epoch 1/25\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/var/folders/6b/j4d60ym516x2s6wymzj707rh0000gn/T/ipykernel_13671/2334869237.py:4: UserWarning: `Model.fit_generator` is deprecated and will be removed in a future version. Please use `Model.fit`, which supports generators.\n",
" alex4 = model_flat_pool_drop.fit_generator(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"25/25 [==============================] - ETA: 0s - loss: 2.1044 - accuracy: 0.1750\n",
"Epoch 1: val_accuracy improved from -inf to 0.25000, saving model to alex_4.h5\n",
"25/25 [==============================] - 27s 1s/step - loss: 2.1044 - accuracy: 0.1750 - val_loss: 1.9644 - val_accuracy: 0.2500\n",
"Epoch 2/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.7691 - accuracy: 0.1875\n",
"Epoch 2: val_accuracy did not improve from 0.25000\n",
"25/25 [==============================] - 26s 1s/step - loss: 1.7691 - accuracy: 0.1875 - val_loss: 1.8190 - val_accuracy: 0.1979\n",
"Epoch 3/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.7062 - accuracy: 0.2113\n",
"Epoch 3: val_accuracy did not improve from 0.25000\n",
"25/25 [==============================] - 27s 1s/step - loss: 1.7062 - accuracy: 0.2113 - val_loss: 1.8115 - val_accuracy: 0.2083\n",
"Epoch 4/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.6706 - accuracy: 0.2362\n",
"Epoch 4: val_accuracy improved from 0.25000 to 0.30208, saving model to alex_4.h5\n",
"25/25 [==============================] - 26s 1s/step - loss: 1.6706 - accuracy: 0.2362 - val_loss: 1.7808 - val_accuracy: 0.3021\n",
"Epoch 5/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.6715 - accuracy: 0.2113\n",
"Epoch 5: val_accuracy improved from 0.30208 to 0.30729, saving model to alex_4.h5\n",
"25/25 [==============================] - 28s 1s/step - loss: 1.6715 - accuracy: 0.2113 - val_loss: 1.7774 - val_accuracy: 0.3073\n",
"Epoch 6/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.6512 - accuracy: 0.2425\n",
"Epoch 6: val_accuracy improved from 0.30729 to 0.32812, saving model to alex_4.h5\n",
"25/25 [==============================] - 27s 1s/step - loss: 1.6512 - accuracy: 0.2425 - val_loss: 1.7714 - val_accuracy: 0.3281\n",
"Epoch 7/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.6418 - accuracy: 0.2475\n",
"Epoch 7: val_accuracy did not improve from 0.32812\n",
"25/25 [==============================] - 27s 1s/step - loss: 1.6418 - accuracy: 0.2475 - val_loss: 1.7421 - val_accuracy: 0.2969\n",
"Epoch 8/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.5988 - accuracy: 0.2488\n",
"Epoch 8: val_accuracy did not improve from 0.32812\n",
"25/25 [==============================] - 27s 1s/step - loss: 1.5988 - accuracy: 0.2488 - val_loss: 1.7183 - val_accuracy: 0.3177\n",
"Epoch 9/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.5946 - accuracy: 0.2800\n",
"Epoch 9: val_accuracy improved from 0.32812 to 0.34896, saving model to alex_4.h5\n",
"25/25 [==============================] - 27s 1s/step - loss: 1.5946 - accuracy: 0.2800 - val_loss: 1.6653 - val_accuracy: 0.3490\n",
"Epoch 10/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.5646 - accuracy: 0.2875\n",
"Epoch 10: val_accuracy did not improve from 0.34896\n",
"25/25 [==============================] - 28s 1s/step - loss: 1.5646 - accuracy: 0.2875 - val_loss: 1.6476 - val_accuracy: 0.3490\n",
"Epoch 11/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.5359 - accuracy: 0.3200\n",
"Epoch 11: val_accuracy improved from 0.34896 to 0.45312, saving model to alex_4.h5\n",
"25/25 [==============================] - 28s 1s/step - loss: 1.5359 - accuracy: 0.3200 - val_loss: 1.5768 - val_accuracy: 0.4531\n",
"Epoch 12/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.4968 - accuracy: 0.3200\n",
"Epoch 12: val_accuracy did not improve from 0.45312\n",
"25/25 [==============================] - 27s 1s/step - loss: 1.4968 - accuracy: 0.3200 - val_loss: 1.5472 - val_accuracy: 0.3594\n",
"Epoch 13/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.4612 - accuracy: 0.3975\n",
"Epoch 13: val_accuracy did not improve from 0.45312\n",
"25/25 [==============================] - 27s 1s/step - loss: 1.4612 - accuracy: 0.3975 - val_loss: 1.4494 - val_accuracy: 0.4427\n",
"Epoch 14/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.3955 - accuracy: 0.4038\n",
"Epoch 14: val_accuracy did not improve from 0.45312\n",
"25/25 [==============================] - 27s 1s/step - loss: 1.3955 - accuracy: 0.4038 - val_loss: 1.4523 - val_accuracy: 0.3542\n",
"Epoch 15/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.3153 - accuracy: 0.4525\n",
"Epoch 15: val_accuracy did not improve from 0.45312\n",
"25/25 [==============================] - 28s 1s/step - loss: 1.3153 - accuracy: 0.4525 - val_loss: 1.3144 - val_accuracy: 0.4062\n",
"Epoch 16/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.2655 - accuracy: 0.4638\n",
"Epoch 16: val_accuracy did not improve from 0.45312\n",
"25/25 [==============================] - 26s 1s/step - loss: 1.2655 - accuracy: 0.4638 - val_loss: 1.2121 - val_accuracy: 0.4479\n",
"Epoch 17/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.1774 - accuracy: 0.4900\n",
"Epoch 17: val_accuracy improved from 0.45312 to 0.47917, saving model to alex_4.h5\n",
"25/25 [==============================] - 26s 1s/step - loss: 1.1774 - accuracy: 0.4900 - val_loss: 1.1340 - val_accuracy: 0.4792\n",
"Epoch 18/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.1709 - accuracy: 0.4875\n",
"Epoch 18: val_accuracy did not improve from 0.47917\n",
"25/25 [==============================] - 26s 1s/step - loss: 1.1709 - accuracy: 0.4875 - val_loss: 1.1360 - val_accuracy: 0.4635\n",
"Epoch 19/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.1127 - accuracy: 0.5125\n",
"Epoch 19: val_accuracy improved from 0.47917 to 0.48958, saving model to alex_4.h5\n",
"25/25 [==============================] - 26s 1s/step - loss: 1.1127 - accuracy: 0.5125 - val_loss: 1.1156 - val_accuracy: 0.4896\n",
"Epoch 20/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.0822 - accuracy: 0.5263\n",
"Epoch 20: val_accuracy improved from 0.48958 to 0.54167, saving model to alex_4.h5\n",
"25/25 [==============================] - 26s 1s/step - loss: 1.0822 - accuracy: 0.5263 - val_loss: 0.9865 - val_accuracy: 0.5417\n",
"Epoch 21/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.1573 - accuracy: 0.5063\n",
"Epoch 21: val_accuracy did not improve from 0.54167\n",
"25/25 [==============================] - 26s 1s/step - loss: 1.1573 - accuracy: 0.5063 - val_loss: 1.5426 - val_accuracy: 0.3490\n",
"Epoch 22/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.0643 - accuracy: 0.5400\n",
"Epoch 22: val_accuracy did not improve from 0.54167\n",
"25/25 [==============================] - 26s 1s/step - loss: 1.0643 - accuracy: 0.5400 - val_loss: 1.1197 - val_accuracy: 0.4896\n",
"Epoch 23/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.0817 - accuracy: 0.5512\n",
"Epoch 23: val_accuracy improved from 0.54167 to 0.56771, saving model to alex_4.h5\n",
"25/25 [==============================] - 28s 1s/step - loss: 1.0817 - accuracy: 0.5512 - val_loss: 1.0690 - val_accuracy: 0.5677\n",
"Epoch 24/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.0167 - accuracy: 0.5600\n",
"Epoch 24: val_accuracy did not improve from 0.56771\n",
"25/25 [==============================] - 28s 1s/step - loss: 1.0167 - accuracy: 0.5600 - val_loss: 1.0323 - val_accuracy: 0.5208\n",
"Epoch 25/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.1168 - accuracy: 0.5537\n",
"Epoch 25: val_accuracy did not improve from 0.56771\n",
"25/25 [==============================] - 29s 1s/step - loss: 1.1168 - accuracy: 0.5537 - val_loss: 1.1679 - val_accuracy: 0.4948\n"
]
}
2022-12-10 11:12:06 +01:00
],
2022-12-10 00:55:36 +01:00
"source": [
2023-01-06 21:43:41 +01:00
"checkpoint = ModelCheckpoint(\"alex_4.h5\", monitor='val_accuracy', verbose=1, save_best_only=True, save_weights_only=False, mode='auto', period=1)\n",
"early = EarlyStopping(monitor='val_accuracy', min_delta=0, patience=20, verbose=1, mode='auto')\n",
"\n",
"alex4 = model_flat_pool_drop.fit_generator(\n",
" steps_per_epoch=len(train_ds), \n",
" generator=train_ds, \n",
" validation_data= validation_ds, \n",
" validation_steps=len(validation_ds), \n",
" epochs=25, \n",
" callbacks=[checkpoint,early])"
2022-12-10 00:55:36 +01:00
]
},
{
"cell_type": "code",
2023-01-07 00:39:16 +01:00
"execution_count": 32,
2022-12-10 00:55:36 +01:00
"metadata": {},
2022-12-10 11:12:06 +01:00
"outputs": [
{
2023-01-07 00:39:16 +01:00
"data": {
"image/png": "iVBORw0KGgoAAAANSUhEUgAAAkUAAAHHCAYAAACx7iyPAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjYuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8o6BhiAAAACXBIWXMAAA9hAAAPYQGoP6dpAACp50lEQVR4nOzdd3QUVRvH8e/uJtn03iGQhF5CkRKKCAgSqtKL0psioIAVUQF9FQsoIHaRgEpHUGz03lvonZBAKumkbrI77x+BlTWhBJJsAs/nnD3JztyZeWZF9sedO3dUiqIoCCGEEEI84tTmLkAIIYQQoiyQUCSEEEIIgYQiIYQQQghAQpEQQgghBCChSAghhBACkFAkhBBCCAFIKBJCCCGEACQUCSGEEEIAEoqEEEIIIQAJRUKIUqZSqZg2bVqRt7t8+TIqlYrQ0NBir0kIIUBCkRCPpNDQUFQqFSqVip07dxZYrygKfn5+qFQqunbtaoYKhRCi9EkoEuIRZm1tzeLFiwss37ZtG1evXkWr1ZqhKiGEMA8JRUI8wjp37syKFSvIy8szWb548WIaNWqEt7e3mSp7dGRkZJi7BCHEDRKKhHiEDRgwgMTERDZs2GBcptPpWLlyJc8++2yh22RkZPDKK6/g5+eHVqulRo0azJw5E0VRTNrl5OQwceJEPDw8cHBw4Omnn+bq1auF7jMqKorhw4fj5eWFVqulTp06/Pjjj/d1TklJSbz66qsEBQVhb2+Po6MjnTp14ujRowXaZmdnM23aNKpXr461tTU+Pj707NmTixcvGtsYDAbmzJlDUFAQ1tbWeHh40LFjRw4ePAjceazTf8dPTZs2DZVKxalTp3j22WdxcXHh8ccfB+DYsWMMHTqUwMBArK2t8fb2Zvjw4SQmJhb6eY0YMQJfX1+0Wi0BAQGMGTMGnU7HpUuXUKlUfP755wW22717NyqViiVLlhT1YxXikWBh7gKEEObj7+9P8+bNWbJkCZ06dQLg77//JjU1lf79+zN37lyT9oqi8PTTT7NlyxZGjBhBgwYNWLduHa+99hpRUVEmX8QjR47k559/5tlnn6VFixZs3ryZLl26FKghLi6OZs2aoVKpGDduHB4eHvz999+MGDGCtLQ0JkyYUKRzunTpEmvWrKFPnz4EBAQQFxfHt99+S+vWrTl16hS+vr4A6PV6unbtyqZNm+jfvz8vv/wy169fZ8OGDZw4cYIqVaoAMGLECEJDQ+nUqRMjR44kLy+PHTt2sHfvXho3blyk2m7q06cP1apV48MPPzSGyQ0bNnDp0iWGDRuGt7c3J0+e5LvvvuPkyZPs3bsXlUoFQHR0NE2bNiUlJYXRo0dTs2ZNoqKiWLlyJZmZmQQGBtKyZUt++eUXJk6caHLcX375BQcHB5555pn7qluIh54ihHjkLFiwQAGUAwcOKPPmzVMcHByUzMxMRVEUpU+fPkrbtm0VRVGUypUrK126dDFut2bNGgVQ/ve//5nsr3fv3opKpVIuXLigKIqihIWFKYDy4osvmrR79tlnFUCZOnWqcdmIESMUHx8fJSEhwaRt//79FScnJ2Nd4eHhCqAsWLDgjueWnZ2t6PV6k2Xh4eGKVqtV3nvvPeOyH3/8UQGUzz77rMA+DAaDoiiKsnnzZgVQXnrppdu2uVNd/z3XqVOnKoAyYMCAAm1vnuetlixZogDK9u3bjcsGDx6sqNVq5cCBA7et6dtvv1UA5fTp08Z1Op1OcXd3V4YMGVJgOyFEPrl8JsQjrm/fvmRlZfHHH39w/fp1/vjjj9teOvvrr7/QaDS89NJLJstfeeUVFEXh77//NrYDCrT7b6+PoiisWrWKbt26oSgKCQkJxldISAipqakcPny4SOej1WpRq/P/atPr9SQmJmJvb0+NGjVM9rVq1Src3d0ZP358gX3c7JVZtWoVKpWKqVOn3rbN/XjhhRcKLLOxsTH+np2dTUJCAs2aNQMw1m0wGFizZg3dunUrtJfqZk19+/bF2tqaX375xbhu3bp1JCQkMHDgwPuuW4iHnYQiIR5xHh4etG/fnsWLF/Prr7+i1+vp3bt3oW0jIiLw9fXFwcHBZHmtWrWM62/+VKvVxktQN9WoUcPk/bVr10hJSeG7777Dw8PD5DVs2DAA4uPji3Q+BoOBzz//nGrVqqHVanF3d8fDw4Njx46RmppqbHfx4kVq1KiBhcXtRxFcvHgRX19fXF1di1TD3QQEBBRYlpSUxMsvv4yXlxc2NjZ4eHgY292s+9q1a6SlpVG3bt077t/Z2Zlu3bqZ3Fn4yy+/UKFCBZ588sliPBMhHi4ypkgIwbPPPsuoUaOIjY2lU6dOODs7l8pxDQYDAAMHDmTIkCGFtqlXr16R9vnhhx/yzjvvMHz4cN5//31cXV1Rq9VMmDDBeLzidLseI71ef9ttbu0Vuqlv377s3r2b1157jQYNGmBvb4/BYKBjx473VffgwYNZsWIFu3fvJigoiN9//50XX3zR2IsmhChIQpEQgh49evD888+zd+9eli1bdtt2lStXZuPGjVy/ft2kt+jMmTPG9Td/GgwGY2/MTWfPnjXZ38070/R6Pe3bty+Wc1m5ciVt27Zl/vz5JstTUlJwd3c3vq9SpQr79u0jNzcXS0vLQvdVpUoV1q1bR1JS0m17i1xcXIz7v9XNXrN7kZyczKZNm5g+fTrvvvuucfn58+dN2nl4eODo6MiJEyfuus+OHTvi4eHBL7/8QnBwMJmZmQwaNOieaxLiUST/ZBBCYG9vz9dff820adPo1q3bbdt17twZvV7PvHnzTJZ//vnnqFQq4x1sN3/+9+612bNnm7zXaDT06tWLVatWFfpFf+3atSKfi0ajKTA9wIoVK4iKijJZ1qtXLxISEgqcC2DcvlevXiiKwvTp02/bxtHREXd3d7Zv326y/quvvipSzbfu86b/fl5qtZru3buzdu1a45QAhdUEYGFhwYABA1i+fDmhoaEEBQUVuddNiEeN9BQJIQBue/nqVt26daNt27ZMmTKFy5cvU79+fdavX89vv/3GhAkTjGOIGjRowIABA/jqq69ITU2lRYsWbNq0iQsXLhTY50cffcSWLVsIDg5m1KhR1K5dm6SkJA4fPszGjRtJSkoq0nl07dqV9957j2HDhtGiRQuOHz/OL7/8QmBgoEm7wYMHs2jRIiZNmsT+/ftp1aoVGRkZbNy4kRdffJFnnnmGtm3bMmjQIObOncv58+eNl7J27NhB27ZtGTduHJA//cBHH33EyJEjady4Mdu3b+fcuXP3XLOjoyNPPPEEn3zyCbm5uVSoUIH169cTHh5eoO2HH37I+vXrad26NaNHj6ZWrVrExMSwYsUKdu7caXLpc/DgwcydO5ctW7bw8ccfF+lzFOKRZLb73oQQZnPrLfl38t9b8hVFUa5fv65MnDhR8fX1VSwtLZVq1aopn376qfF28JuysrKUl156SXFzc1Ps7OyUbt26KVeuXClwm7qiKEpcXJwyduxYxc/PT7G0tFS8vb2Vdu3aKd99952xTVFuyX/llVcUHx8fxcbGRmnZsqWyZ88epXXr1krr1q1N2mZmZipTpkxRAgICjMft3bu3cvHiRWObvLw85dNPP1Vq1qypWFlZKR4eHkqnTp2UQ4cOmexnxIgRipOTk+Lg4KD07dtXiY+Pv+0t+deuXStQ99WrV5UePXoozs7OipOTk9KnTx8lOjq60M8rIiJCGTx4sOLh4aFotVolMDBQGTt2rJKTk1Ngv3Xq1FHUarVy9erVO35uQghFUSnKf/prhRBCPDQaNmyIq6srmzZtMncpQpR5MqZICCEeUgcPHiQsLIzBgwebuxQhygXpKRJCiIfMiRMnOHToELNmzSIhIYFLly5hbW1t7rKEKPOkp0gIIR4yK1euZNiwYeTm5rJkyRIJRELcI+kpEkIIIYRAeoqEEEIIIQAJRUIIIYQQgEzeWCiDwUB0dDQODg4P9CRsIYQQQpQeRVG4fv06vr6+9/WcPwlFhYiOjsbPz8/cZQghhBDiPly5coWKFSsWeTsJRYW4+aDLK1eu4OjoaOZqhBBCCHEv0tL
"text/plain": [
"<Figure size 640x480 with 1 Axes>"
]
},
"metadata": {},
"output_type": "display_data"
2023-01-06 21:43:41 +01:00
}
],
"source": [
"plt.plot(alex4.history[\"accuracy\"])\n",
"plt.plot(alex4.history['val_accuracy'])\n",
"plt.plot(alex4.history['loss'])\n",
"plt.plot(alex4.history['val_loss'])\n",
"plt.title(\"Model accuracy\")\n",
"plt.ylabel(\"Value\")\n",
"plt.xlabel(\"Epoch\")\n",
"plt.legend([\"Accuracy\",\"Validation Accuracy\",\"Loss\",\"Validation Loss\"])\n",
"plt.show()"
]
},
{
"cell_type": "code",
2023-01-07 00:39:16 +01:00
"execution_count": 33,
2023-01-06 21:43:41 +01:00
"metadata": {},
"outputs": [
2022-12-10 11:12:06 +01:00
{
2023-01-07 00:39:16 +01:00
"name": "stdout",
"output_type": "stream",
"text": [
"8/8 [==============================] - 3s 321ms/step - loss: 1.2209 - accuracy: 0.5000\n"
2023-01-06 21:43:41 +01:00
]
2023-01-07 00:39:16 +01:00
},
{
"data": {
"text/plain": [
"[1.220850944519043, 0.5]"
]
},
"execution_count": 33,
"metadata": {},
"output_type": "execute_result"
2022-12-10 11:12:06 +01:00
}
],
2022-12-10 00:55:36 +01:00
"source": [
2023-01-07 00:39:16 +01:00
"model_flat_pool_drop.evaluate(test_ds)"
2022-12-10 00:55:36 +01:00
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Do warstw spłaszczonych i splotowych"
]
},
{
"cell_type": "code",
2023-01-07 00:39:16 +01:00
"execution_count": 34,
2022-12-10 00:55:36 +01:00
"metadata": {},
2023-01-07 00:39:16 +01:00
"outputs": [],
2022-12-10 00:55:36 +01:00
"source": [
"model_flat_conv_drop = keras.models.Sequential([\n",
" keras.layers.Conv2D(filters=96, kernel_size=(11,11), strides=(4,4), activation='relu', input_shape=(227,227,3)),\n",
" keras.layers.Dropout(.5),\n",
" keras.layers.MaxPool2D(pool_size=(3,3), strides=(2,2)),\n",
" keras.layers.Conv2D(filters=256, kernel_size=(5,5), strides=(1,1), activation='relu', padding=\"same\"),\n",
" keras.layers.Dropout(.5),\n",
" keras.layers.MaxPool2D(pool_size=(3,3), strides=(2,2)),\n",
" keras.layers.Conv2D(filters=384, kernel_size=(3,3), strides=(1,1), activation='relu', padding=\"same\"),\n",
" keras.layers.Dropout(.5),\n",
" keras.layers.Conv2D(filters=384, kernel_size=(3,3), strides=(1,1), activation='relu', padding=\"same\"),\n",
" keras.layers.Dropout(.5),\n",
" keras.layers.Conv2D(filters=256, kernel_size=(3,3), strides=(1,1), activation='relu', padding=\"same\"),\n",
" keras.layers.Dropout(.5),\n",
" keras.layers.MaxPool2D(pool_size=(3,3), strides=(2,2)),\n",
" keras.layers.Flatten(),\n",
" keras.layers.Dense(4096, activation='relu'),\n",
" keras.layers.Dropout(.5),\n",
" keras.layers.Dense(4096, activation='relu'),\n",
" keras.layers.Dropout(.5),\n",
" keras.layers.Dense(10, activation='softmax')\n",
"])"
]
},
{
"cell_type": "code",
2023-01-07 00:39:16 +01:00
"execution_count": 35,
2022-12-10 00:55:36 +01:00
"metadata": {},
2022-12-10 11:12:06 +01:00
"outputs": [
{
2023-01-07 00:39:16 +01:00
"name": "stderr",
"output_type": "stream",
"text": [
"WARNING:absl:`lr` is deprecated, please use `learning_rate` instead, or use the legacy optimizer, e.g.,tf.keras.optimizers.legacy.SGD.\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Model: \"sequential_4\"\n",
"_________________________________________________________________\n",
" Layer (type) Output Shape Param # \n",
"=================================================================\n",
" conv2d_20 (Conv2D) (None, 55, 55, 96) 34944 \n",
" \n",
" dropout_15 (Dropout) (None, 55, 55, 96) 0 \n",
" \n",
" max_pooling2d_12 (MaxPoolin (None, 27, 27, 96) 0 \n",
" g2D) \n",
" \n",
" conv2d_21 (Conv2D) (None, 27, 27, 256) 614656 \n",
" \n",
" dropout_16 (Dropout) (None, 27, 27, 256) 0 \n",
" \n",
" max_pooling2d_13 (MaxPoolin (None, 13, 13, 256) 0 \n",
" g2D) \n",
" \n",
" conv2d_22 (Conv2D) (None, 13, 13, 384) 885120 \n",
" \n",
" dropout_17 (Dropout) (None, 13, 13, 384) 0 \n",
" \n",
" conv2d_23 (Conv2D) (None, 13, 13, 384) 1327488 \n",
" \n",
" dropout_18 (Dropout) (None, 13, 13, 384) 0 \n",
" \n",
" conv2d_24 (Conv2D) (None, 13, 13, 256) 884992 \n",
" \n",
" dropout_19 (Dropout) (None, 13, 13, 256) 0 \n",
" \n",
" max_pooling2d_14 (MaxPoolin (None, 6, 6, 256) 0 \n",
" g2D) \n",
" \n",
" flatten_4 (Flatten) (None, 9216) 0 \n",
" \n",
" dense_12 (Dense) (None, 4096) 37752832 \n",
" \n",
" dropout_20 (Dropout) (None, 4096) 0 \n",
" \n",
" dense_13 (Dense) (None, 4096) 16781312 \n",
" \n",
" dropout_21 (Dropout) (None, 4096) 0 \n",
" \n",
" dense_14 (Dense) (None, 10) 40970 \n",
" \n",
"=================================================================\n",
"Total params: 58,322,314\n",
"Trainable params: 58,322,314\n",
"Non-trainable params: 0\n",
"_________________________________________________________________\n"
2022-12-10 11:12:06 +01:00
]
}
],
2022-12-10 00:55:36 +01:00
"source": [
"model_flat_conv_drop.compile(loss='sparse_categorical_crossentropy', optimizer=tf.optimizers.SGD(lr=.001), metrics=['accuracy'])\n",
"model_flat_conv_drop.summary()"
]
},
{
"cell_type": "code",
2023-01-07 00:39:16 +01:00
"execution_count": 36,
2022-12-10 00:55:36 +01:00
"metadata": {},
2022-12-10 11:12:06 +01:00
"outputs": [
{
2023-01-07 00:39:16 +01:00
"name": "stdout",
"output_type": "stream",
"text": [
"WARNING:tensorflow:`period` argument is deprecated. Please use `save_freq` to specify the frequency in number of batches seen.\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"WARNING:tensorflow:`period` argument is deprecated. Please use `save_freq` to specify the frequency in number of batches seen.\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Epoch 1/25\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/var/folders/6b/j4d60ym516x2s6wymzj707rh0000gn/T/ipykernel_13671/1544533144.py:4: UserWarning: `Model.fit_generator` is deprecated and will be removed in a future version. Please use `Model.fit`, which supports generators.\n",
" alex5 = model_flat_conv_drop.fit_generator(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"25/25 [==============================] - ETA: 0s - loss: 1.8865 - accuracy: 0.2087\n",
"Epoch 1: val_accuracy improved from -inf to 0.31771, saving model to alex_5.h5\n",
"25/25 [==============================] - 31s 1s/step - loss: 1.8865 - accuracy: 0.2087 - val_loss: 2.1611 - val_accuracy: 0.3177\n",
"Epoch 2/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.6987 - accuracy: 0.2250\n",
"Epoch 2: val_accuracy did not improve from 0.31771\n",
"25/25 [==============================] - 33s 1s/step - loss: 1.6987 - accuracy: 0.2250 - val_loss: 2.1324 - val_accuracy: 0.1823\n",
"Epoch 3/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.6349 - accuracy: 0.2675\n",
"Epoch 3: val_accuracy did not improve from 0.31771\n",
"25/25 [==============================] - 29s 1s/step - loss: 1.6349 - accuracy: 0.2675 - val_loss: 2.0670 - val_accuracy: 0.3125\n",
"Epoch 4/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.5613 - accuracy: 0.3212\n",
"Epoch 4: val_accuracy improved from 0.31771 to 0.34896, saving model to alex_5.h5\n",
"25/25 [==============================] - 29s 1s/step - loss: 1.5613 - accuracy: 0.3212 - val_loss: 2.0176 - val_accuracy: 0.3490\n",
"Epoch 5/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.4594 - accuracy: 0.3587\n",
"Epoch 5: val_accuracy did not improve from 0.34896\n",
"25/25 [==============================] - 30s 1s/step - loss: 1.4594 - accuracy: 0.3587 - val_loss: 1.9236 - val_accuracy: 0.3177\n",
"Epoch 6/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.3418 - accuracy: 0.4050\n",
"Epoch 6: val_accuracy improved from 0.34896 to 0.38021, saving model to alex_5.h5\n",
"25/25 [==============================] - 30s 1s/step - loss: 1.3418 - accuracy: 0.4050 - val_loss: 1.8750 - val_accuracy: 0.3802\n",
"Epoch 7/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.3014 - accuracy: 0.4437\n",
"Epoch 7: val_accuracy did not improve from 0.38021\n",
"25/25 [==============================] - 29s 1s/step - loss: 1.3014 - accuracy: 0.4437 - val_loss: 2.0340 - val_accuracy: 0.1979\n",
"Epoch 8/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.2022 - accuracy: 0.4638\n",
"Epoch 8: val_accuracy improved from 0.38021 to 0.44271, saving model to alex_5.h5\n",
"25/25 [==============================] - 29s 1s/step - loss: 1.2022 - accuracy: 0.4638 - val_loss: 1.7184 - val_accuracy: 0.4427\n",
"Epoch 9/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.1867 - accuracy: 0.4712\n",
"Epoch 9: val_accuracy did not improve from 0.44271\n",
"25/25 [==============================] - 27s 1s/step - loss: 1.1867 - accuracy: 0.4712 - val_loss: 1.8339 - val_accuracy: 0.3385\n",
"Epoch 10/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.0586 - accuracy: 0.5225\n",
"Epoch 10: val_accuracy improved from 0.44271 to 0.44792, saving model to alex_5.h5\n",
"25/25 [==============================] - 30s 1s/step - loss: 1.0586 - accuracy: 0.5225 - val_loss: 1.6957 - val_accuracy: 0.4479\n",
"Epoch 11/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.1329 - accuracy: 0.4988\n",
"Epoch 11: val_accuracy did not improve from 0.44792\n",
"25/25 [==============================] - 31s 1s/step - loss: 1.1329 - accuracy: 0.4988 - val_loss: 1.7963 - val_accuracy: 0.3646\n",
"Epoch 12/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.0527 - accuracy: 0.5387\n",
"Epoch 12: val_accuracy did not improve from 0.44792\n",
"25/25 [==============================] - 33s 1s/step - loss: 1.0527 - accuracy: 0.5387 - val_loss: 1.7027 - val_accuracy: 0.4062\n",
"Epoch 13/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.1811 - accuracy: 0.5063\n",
"Epoch 13: val_accuracy did not improve from 0.44792\n",
"25/25 [==============================] - 30s 1s/step - loss: 1.1811 - accuracy: 0.5063 - val_loss: 1.7790 - val_accuracy: 0.3542\n",
"Epoch 14/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.0314 - accuracy: 0.5450\n",
"Epoch 14: val_accuracy did not improve from 0.44792\n",
"25/25 [==============================] - 28s 1s/step - loss: 1.0314 - accuracy: 0.5450 - val_loss: 1.6602 - val_accuracy: 0.4323\n",
"Epoch 15/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.0199 - accuracy: 0.5663\n",
"Epoch 15: val_accuracy did not improve from 0.44792\n",
"25/25 [==============================] - 28s 1s/step - loss: 1.0199 - accuracy: 0.5663 - val_loss: 1.7097 - val_accuracy: 0.3542\n",
"Epoch 16/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.0358 - accuracy: 0.5525\n",
"Epoch 16: val_accuracy did not improve from 0.44792\n",
"25/25 [==============================] - 28s 1s/step - loss: 1.0358 - accuracy: 0.5525 - val_loss: 1.7355 - val_accuracy: 0.3177\n",
"Epoch 17/25\n",
"25/25 [==============================] - ETA: 0s - loss: 0.9676 - accuracy: 0.5875\n",
"Epoch 17: val_accuracy improved from 0.44792 to 0.54167, saving model to alex_5.h5\n",
"25/25 [==============================] - 28s 1s/step - loss: 0.9676 - accuracy: 0.5875 - val_loss: 1.5246 - val_accuracy: 0.5417\n",
"Epoch 18/25\n",
"25/25 [==============================] - ETA: 0s - loss: 0.9063 - accuracy: 0.5950\n",
"Epoch 18: val_accuracy did not improve from 0.54167\n",
"25/25 [==============================] - 28s 1s/step - loss: 0.9063 - accuracy: 0.5950 - val_loss: 1.5602 - val_accuracy: 0.4688\n",
"Epoch 19/25\n",
"25/25 [==============================] - ETA: 0s - loss: 0.9411 - accuracy: 0.6250\n",
"Epoch 19: val_accuracy did not improve from 0.54167\n",
"25/25 [==============================] - 28s 1s/step - loss: 0.9411 - accuracy: 0.6250 - val_loss: 1.7089 - val_accuracy: 0.2917\n",
"Epoch 20/25\n",
"25/25 [==============================] - ETA: 0s - loss: 0.8750 - accuracy: 0.6475\n",
"Epoch 20: val_accuracy did not improve from 0.54167\n",
"25/25 [==============================] - 28s 1s/step - loss: 0.8750 - accuracy: 0.6475 - val_loss: 1.7448 - val_accuracy: 0.2812\n",
"Epoch 21/25\n",
"25/25 [==============================] - ETA: 0s - loss: 0.8677 - accuracy: 0.6087\n",
"Epoch 21: val_accuracy did not improve from 0.54167\n",
"25/25 [==============================] - 28s 1s/step - loss: 0.8677 - accuracy: 0.6087 - val_loss: 1.5079 - val_accuracy: 0.5000\n",
"Epoch 22/25\n",
"25/25 [==============================] - ETA: 0s - loss: 0.8868 - accuracy: 0.6275\n",
"Epoch 22: val_accuracy did not improve from 0.54167\n",
"25/25 [==============================] - 28s 1s/step - loss: 0.8868 - accuracy: 0.6275 - val_loss: 1.6442 - val_accuracy: 0.3073\n",
"Epoch 23/25\n",
"25/25 [==============================] - ETA: 0s - loss: 0.8708 - accuracy: 0.6338\n",
"Epoch 23: val_accuracy did not improve from 0.54167\n",
"25/25 [==============================] - 29s 1s/step - loss: 0.8708 - accuracy: 0.6338 - val_loss: 1.6207 - val_accuracy: 0.3646\n",
"Epoch 24/25\n",
"25/25 [==============================] - ETA: 0s - loss: 0.7959 - accuracy: 0.6712\n",
"Epoch 24: val_accuracy did not improve from 0.54167\n",
"25/25 [==============================] - 29s 1s/step - loss: 0.7959 - accuracy: 0.6712 - val_loss: 1.6913 - val_accuracy: 0.3073\n",
"Epoch 25/25\n",
"25/25 [==============================] - ETA: 0s - loss: 0.8158 - accuracy: 0.6775\n",
"Epoch 25: val_accuracy did not improve from 0.54167\n",
"25/25 [==============================] - 29s 1s/step - loss: 0.8158 - accuracy: 0.6775 - val_loss: 1.4933 - val_accuracy: 0.4323\n"
2022-12-10 11:12:06 +01:00
]
}
],
2022-12-10 00:55:36 +01:00
"source": [
2023-01-06 21:43:41 +01:00
"checkpoint = ModelCheckpoint(\"alex_5.h5\", monitor='val_accuracy', verbose=1, save_best_only=True, save_weights_only=False, mode='auto', period=1)\n",
"early = EarlyStopping(monitor='val_accuracy', min_delta=0, patience=20, verbose=1, mode='auto')\n",
"\n",
"alex5 = model_flat_conv_drop.fit_generator(\n",
" steps_per_epoch=len(train_ds), \n",
" generator=train_ds, \n",
" validation_data= validation_ds, \n",
" validation_steps=len(validation_ds), \n",
" epochs=25, \n",
" callbacks=[checkpoint,early])"
2022-12-10 00:55:36 +01:00
]
},
{
"cell_type": "code",
2023-01-07 00:39:16 +01:00
"execution_count": 37,
2022-12-10 00:55:36 +01:00
"metadata": {},
2022-12-10 11:12:06 +01:00
"outputs": [
{
2023-01-07 00:39:16 +01:00
"data": {
"image/png": "iVBORw0KGgoAAAANSUhEUgAAAkUAAAHHCAYAAACx7iyPAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjYuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8o6BhiAAAACXBIWXMAAA9hAAAPYQGoP6dpAADDFUlEQVR4nOzdd3gUxRvA8e9dei8kIQkEQugdpQmIVClCBOlNOggCUhURpIhKrwIqNaCCSFV67yBVeg+BhFRI7+Vuf39E7kdMgISUS8L7eZ59yO3Ozrx7BO692dkZlaIoCkIIIYQQbzi1vgMQQgghhMgPJCkSQgghhECSIiGEEEIIQJIiIYQQQghAkiIhhBBCCECSIiGEEEIIQJIiIYQQQghAkiIhhBBCCECSIiGEEEIIQJIiIUQeU6lUTJ06NcvnPXz4EJVKhZeXV47HJIQQIEmREG8kLy8vVCoVKpWKkydPpjuuKApubm6oVCratm2rhwiFECLvSVIkxBvM1NSU9evXp9t/7NgxHj9+jImJiR6iEkII/ZCkSIg32AcffMCmTZtISUlJs3/9+vXUrFkTZ2dnPUX25oiNjdV3CEKIf0lSJMQbrHv37oSGhnLgwAHdvqSkJDZv3kyPHj0yPCc2NpaxY8fi5uaGiYkJ5cuXZ+7cuSiKkqZcYmIio0ePxtHRESsrKz788EMeP36cYZ3+/v7079+fokWLYmJiQuXKlVm9evVrXVNYWBjjxo2jatWqWFpaYm1tTevWrbly5Uq6sgkJCUydOpVy5cphamqKi4sLHTp0wNvbW1dGq9WyaNEiqlatiqmpKY6OjrRq1YoLFy4ALx/r9N/xU1OnTkWlUnHz5k169OiBnZ0d7777LgBXr16lb9++eHh4YGpqirOzM/379yc0NDTD92vAgAG4urpiYmJCqVKlGDp0KElJSTx48ACVSsWCBQvSnXf69GlUKhUbNmzI6tsqxBvBUN8BCCH0x93dnXr16rFhwwZat24NwJ49e4iMjKRbt24sXrw4TXlFUfjwww85cuQIAwYMoEaNGuzbt4/PP/8cf3//NB/EAwcO5Ndff6VHjx7Ur1+fw4cP06ZNm3QxBAcH884776BSqRg+fDiOjo7s2bOHAQMGEBUVxahRo7J0TQ8ePGD79u107tyZUqVKERwczM8//0yjRo24efMmrq6uAGg0Gtq2bcuhQ4fo1q0bI0eOJDo6mgMHDnD9+nVKly4NwIABA/Dy8qJ169YMHDiQlJQUTpw4wd9//02tWrWyFNsznTt3pmzZsnz//fe6ZPLAgQM8ePCAfv364ezszI0bN1i+fDk3btzg77//RqVSARAQEECdOnWIiIhg8ODBVKhQAX9/fzZv3kxcXBweHh40aNCA3377jdGjR6dp97fffsPKyop27dq9VtxCFHqKEOKNs2bNGgVQzp8/ryxZskSxsrJS4uLiFEVRlM6dOytNmjRRFEVRSpYsqbRp00Z33vbt2xVA+fbbb9PU16lTJ0WlUin3799XFEVRLl++rADKp59+mqZcjx49FECZMmWKbt+AAQMUFxcX5enTp2nKduvWTbGxsdHF5ePjowDKmjVrXnptCQkJikajSbPPx8dHMTExUb755hvdvtWrVyuAMn/+/HR1aLVaRVEU5fDhwwqgfPbZZy8s87K4/nutU6ZMUQCle/fu6co+u87nbdiwQQGU48eP6/b17t1bUavVyvnz518Y088//6wAyq1bt3THkpKSFAcHB6VPnz7pzhNCpJLbZ0K84bp06UJ8fDw7d+4kOjqanTt3vvDW2e7duzEwMOCzzz5Ls3/s2LEoisKePXt05YB05f7b66MoClu2bMHT0xNFUXj69Klua9myJZGRkVy6dClL12NiYoJanfpfm0ajITQ0FEtLS8qXL5+mri1btuDg4MCIESPS1fGsV2bLli2oVCqmTJnywjKvY8iQIen2mZmZ6X5OSEjg6dOnvPPOOwC6uLVaLdu3b8fT0zPDXqpnMXXp0gVTU1N+++033bF9+/bx9OlTevXq9dpxC1HYSVIkxBvO0dGR5s2bs379erZu3YpGo6FTp04Zln306BGurq5YWVml2V+xYkXd8Wd/qtVq3S2oZ8qXL5/m9ZMnT4iIiGD58uU4Ojqm2fr16wdASEhIlq5Hq9WyYMECypYti4mJCQ4ODjg6OnL16lUiIyN15by9vSlfvjyGhi8eReDt7Y2rqyv29vZZiuFVSpUqlW5fWFgYI0eOpGjRopiZmeHo6Kgr9yzuJ0+eEBUVRZUqVV5av62tLZ6enmmeLPztt98oVqwYTZs2zcErEaJwkTFFQgh69OjBoEGDCAoKonXr1tja2uZJu1qtFoBevXrRp0+fDMtUq1YtS3V+//33fP311/Tv35/p06djb2+PWq1m1KhRuvZy0ot6jDQazQvPeb5X6JkuXbpw+vRpPv/8c2rUqIGlpSVarZZWrVq9Vty9e/dm06ZNnD59mqpVq/LXX3/x6aef6nrRhBDpSVIkhOCjjz7ik08+4e+//2bjxo0vLFeyZEkOHjxIdHR0mt6i27dv644/+1Or1ep6Y565c+dOmvqePZmm0Who3rx5jlzL5s2badKkCatWrUqzPyIiAgcHB93r0qVLc/bsWZKTkzEyMsqwrtKlS7Nv3z7CwsJe2FtkZ2enq/95z3rNMiM8PJxDhw4xbdo0Jk+erNt/7969NOUcHR2xtrbm+vXrr6yzVatWODo68ttvv1G3bl3i4uL4+OOPMx2TEG8i+coghMDS0pIff/yRqVOn4unp+cJyH3zwARqNhiVLlqTZv2DBAlQqle4Jtmd//vfptYULF6Z5bWBgQMeOHdmyZUuGH/RPnjzJ8rUYGBikmx5g06ZN+Pv7p9nXsWNHnj59mu5aAN35HTt2RFEUpk2b9sIy1tbWODg4cPz48TTHly1blqWYn6/zmf++X2q1mvbt27Njxw7dlAAZxQRgaGhI9+7d+eOPP/Dy8qJq1apZ7nUT4k0jPUVCCIAX3r56nqenJ02aNGHixIk8fPiQ6tWrs3//fv78809GjRqlG0NUo0YNunfvzrJly4iMjKR+/focOnSI+/fvp6tz5syZHDlyhLp16zJo0CAqVapEWFgYly5d4uDBg4SFhWXpOtq2bcs333xDv379qF+/PteuXeO3337Dw8MjTbnevXuzbt06xowZw7lz52jYsCGxsbEcPHiQTz/9lHbt2tGkSRM+/vhjFi9ezL1793S3sk6cOEGTJk0YPnw4kDr9wMyZMxk4cCC1atXi+PHj3L17N9MxW1tb89577zF79mySk5MpVqwY+/fvx8fHJ13Z77//nv3799OoUSMGDx5MxYoVCQwMZNOmTZw8eTLNrc/evXuzePFijhw5wqxZs7L0PgrxRtLbc29CCL15/pH8l/nvI/mKoijR0dHK6NGjFVdXV8XIyEgpW7asMmfOHN3j4M/Ex8crn332mVKkSBHFwsJC8fT0VPz8/NI9pq4oihIcHKwMGzZMcXNzU4yMjBRnZ2elWbNmyvLly3VlsvJI/tixYxUXFxfFzMxMadCggXLmzBmlUaNGSqNGjdKUjYuLUyZOnKiUKlVK126nTp0Ub29vXZmUlBRlzpw5SoUKFRRjY2PF0dFRad26tXLx4sU09QwYMECxsbFRrKyslC5duighISEvfCT/yZMn6eJ+/Pix8tFHHym2traKjY2N0rlzZyUgICDD9+vRo0dK7969FUdHR8XExETx8PBQhg0bpiQmJqart3LlyoparVYeP3780vdNCKEoKkX5T3+tEEKIQuOtt97C3t6eQ4cO6TsUIfI9GVMkhBCF1IULF7h8+TK9e/fWdyhCFAjSUySEEIXM9evXuXjxIvPmzePp06c8ePAAU1NTfYclRL4nPUVCCFHIbN68mX79+pGcnMyGDRskIRIik6SnSAghhBAC6SkSQgghhAD0nBTNmDGD2rVrY2VlhZOTE+3bt0834+1/rVixgoYNG2JnZ4ednR3Nmzfn3Llzacr07dsXlUqVZmvVqlVuXooQQgghCji9Tt547Ngxhg0bRu3atUlJSeGrr76iRYsW3Lx5Ews
"text/plain": [
"<Figure size 640x480 with 1 Axes>"
]
},
"metadata": {},
"output_type": "display_data"
2023-01-06 21:43:41 +01:00
}
],
"source": [
"plt.plot(alex5.history[\"accuracy\"])\n",
"plt.plot(alex5.history['val_accuracy'])\n",
"plt.plot(alex5.history['loss'])\n",
"plt.plot(alex5.history['val_loss'])\n",
"plt.title(\"Model accuracy\")\n",
"plt.ylabel(\"Value\")\n",
"plt.xlabel(\"Epoch\")\n",
"plt.legend([\"Accuracy\",\"Validation Accuracy\",\"Loss\",\"Validation Loss\"])\n",
"plt.show()"
]
},
{
"cell_type": "code",
2023-01-07 00:39:16 +01:00
"execution_count": 38,
2023-01-06 21:43:41 +01:00
"metadata": {},
"outputs": [
2022-12-10 11:12:06 +01:00
{
2023-01-07 00:39:16 +01:00
"name": "stdout",
"output_type": "stream",
"text": [
"8/8 [==============================] - 2s 307ms/step - loss: 1.4823 - accuracy: 0.4531\n"
2023-01-06 21:43:41 +01:00
]
2023-01-07 00:39:16 +01:00
},
{
"data": {
"text/plain": [
"[1.4823071956634521, 0.453125]"
]
},
"execution_count": 38,
"metadata": {},
"output_type": "execute_result"
2022-12-10 11:12:06 +01:00
}
],
2022-12-10 00:55:36 +01:00
"source": [
2023-01-07 00:39:16 +01:00
"model_flat_conv_drop.evaluate(test_ds)"
2022-12-10 00:55:36 +01:00
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Do warstw maxpooling i splotowych"
]
},
{
"cell_type": "code",
2023-01-07 00:39:16 +01:00
"execution_count": 39,
2022-12-10 00:55:36 +01:00
"metadata": {},
2023-01-07 00:39:16 +01:00
"outputs": [],
2022-12-10 00:55:36 +01:00
"source": [
"model_pool_conv_drop = keras.models.Sequential([\n",
" keras.layers.Conv2D(filters=96, kernel_size=(11,11), strides=(4,4), activation='relu', input_shape=(227,227,3)),\n",
" keras.layers.Dropout(.5),\n",
" keras.layers.MaxPool2D(pool_size=(3,3), strides=(2,2)),\n",
" keras.layers.Dropout(.5),\n",
" keras.layers.Conv2D(filters=256, kernel_size=(5,5), strides=(1,1), activation='relu', padding=\"same\"),\n",
" keras.layers.Dropout(.5),\n",
" keras.layers.MaxPool2D(pool_size=(3,3), strides=(2,2)),\n",
" keras.layers.Dropout(.5),\n",
" keras.layers.Conv2D(filters=384, kernel_size=(3,3), strides=(1,1), activation='relu', padding=\"same\"),\n",
" keras.layers.Dropout(.5),\n",
" keras.layers.Conv2D(filters=384, kernel_size=(3,3), strides=(1,1), activation='relu', padding=\"same\"),\n",
" keras.layers.Dropout(.5),\n",
" keras.layers.Conv2D(filters=256, kernel_size=(3,3), strides=(1,1), activation='relu', padding=\"same\"),\n",
" keras.layers.Dropout(.5),\n",
" keras.layers.MaxPool2D(pool_size=(3,3), strides=(2,2)),\n",
" keras.layers.Dropout(.5),\n",
" keras.layers.Flatten(),\n",
" keras.layers.Dense(4096, activation='relu'),\n",
" keras.layers.Dense(4096, activation='relu'),\n",
" keras.layers.Dense(10, activation='softmax')\n",
"])"
]
},
{
"cell_type": "code",
2023-01-07 00:39:16 +01:00
"execution_count": 40,
2022-12-10 00:55:36 +01:00
"metadata": {},
2022-12-10 11:12:06 +01:00
"outputs": [
{
2023-01-07 00:39:16 +01:00
"name": "stderr",
"output_type": "stream",
"text": [
"WARNING:absl:`lr` is deprecated, please use `learning_rate` instead, or use the legacy optimizer, e.g.,tf.keras.optimizers.legacy.SGD.\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Model: \"sequential_5\"\n",
"_________________________________________________________________\n",
" Layer (type) Output Shape Param # \n",
"=================================================================\n",
" conv2d_25 (Conv2D) (None, 55, 55, 96) 34944 \n",
" \n",
" dropout_22 (Dropout) (None, 55, 55, 96) 0 \n",
" \n",
" max_pooling2d_15 (MaxPoolin (None, 27, 27, 96) 0 \n",
" g2D) \n",
" \n",
" dropout_23 (Dropout) (None, 27, 27, 96) 0 \n",
" \n",
" conv2d_26 (Conv2D) (None, 27, 27, 256) 614656 \n",
" \n",
" dropout_24 (Dropout) (None, 27, 27, 256) 0 \n",
" \n",
" max_pooling2d_16 (MaxPoolin (None, 13, 13, 256) 0 \n",
" g2D) \n",
" \n",
" dropout_25 (Dropout) (None, 13, 13, 256) 0 \n",
" \n",
" conv2d_27 (Conv2D) (None, 13, 13, 384) 885120 \n",
" \n",
" dropout_26 (Dropout) (None, 13, 13, 384) 0 \n",
" \n",
" conv2d_28 (Conv2D) (None, 13, 13, 384) 1327488 \n",
" \n",
" dropout_27 (Dropout) (None, 13, 13, 384) 0 \n",
" \n",
" conv2d_29 (Conv2D) (None, 13, 13, 256) 884992 \n",
" \n",
" dropout_28 (Dropout) (None, 13, 13, 256) 0 \n",
" \n",
" max_pooling2d_17 (MaxPoolin (None, 6, 6, 256) 0 \n",
" g2D) \n",
" \n",
" dropout_29 (Dropout) (None, 6, 6, 256) 0 \n",
" \n",
" flatten_5 (Flatten) (None, 9216) 0 \n",
" \n",
" dense_15 (Dense) (None, 4096) 37752832 \n",
" \n",
" dense_16 (Dense) (None, 4096) 16781312 \n",
" \n",
" dense_17 (Dense) (None, 10) 40970 \n",
" \n",
"=================================================================\n",
"Total params: 58,322,314\n",
"Trainable params: 58,322,314\n",
"Non-trainable params: 0\n",
"_________________________________________________________________\n"
2022-12-10 11:12:06 +01:00
]
}
],
2022-12-10 00:55:36 +01:00
"source": [
"model_pool_conv_drop.compile(loss='sparse_categorical_crossentropy', optimizer=tf.optimizers.SGD(lr=.001), metrics=['accuracy'])\n",
"model_pool_conv_drop.summary()"
]
},
{
"cell_type": "code",
2023-01-07 00:39:16 +01:00
"execution_count": 41,
2022-12-10 00:55:36 +01:00
"metadata": {},
2022-12-10 11:12:06 +01:00
"outputs": [
{
2023-01-07 00:39:16 +01:00
"name": "stdout",
"output_type": "stream",
"text": [
"WARNING:tensorflow:`period` argument is deprecated. Please use `save_freq` to specify the frequency in number of batches seen.\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"WARNING:tensorflow:`period` argument is deprecated. Please use `save_freq` to specify the frequency in number of batches seen.\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Epoch 1/25\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/var/folders/6b/j4d60ym516x2s6wymzj707rh0000gn/T/ipykernel_13671/3120705445.py:4: UserWarning: `Model.fit_generator` is deprecated and will be removed in a future version. Please use `Model.fit`, which supports generators.\n",
" alex6 = model_pool_conv_drop.fit_generator(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"25/25 [==============================] - ETA: 0s - loss: 1.8171 - accuracy: 0.2288\n",
"Epoch 1: val_accuracy improved from -inf to 0.27604, saving model to alex_6.h5\n",
"25/25 [==============================] - 29s 1s/step - loss: 1.8171 - accuracy: 0.2288 - val_loss: 2.2332 - val_accuracy: 0.2760\n",
"Epoch 2/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.6441 - accuracy: 0.2512\n",
"Epoch 2: val_accuracy did not improve from 0.27604\n",
"25/25 [==============================] - 28s 1s/step - loss: 1.6441 - accuracy: 0.2512 - val_loss: 2.2203 - val_accuracy: 0.1823\n",
"Epoch 3/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.5645 - accuracy: 0.3013\n",
"Epoch 3: val_accuracy did not improve from 0.27604\n",
"25/25 [==============================] - 28s 1s/step - loss: 1.5645 - accuracy: 0.3013 - val_loss: 2.1670 - val_accuracy: 0.2240\n",
"Epoch 4/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.5076 - accuracy: 0.3237\n",
"Epoch 4: val_accuracy did not improve from 0.27604\n",
"25/25 [==============================] - 28s 1s/step - loss: 1.5076 - accuracy: 0.3237 - val_loss: 2.1759 - val_accuracy: 0.1875\n",
"Epoch 5/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.4085 - accuracy: 0.3913\n",
"Epoch 5: val_accuracy did not improve from 0.27604\n",
"25/25 [==============================] - 29s 1s/step - loss: 1.4085 - accuracy: 0.3913 - val_loss: 2.0652 - val_accuracy: 0.2083\n",
"Epoch 6/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.3140 - accuracy: 0.4263\n",
"Epoch 6: val_accuracy did not improve from 0.27604\n",
"25/25 [==============================] - 29s 1s/step - loss: 1.3140 - accuracy: 0.4263 - val_loss: 2.0968 - val_accuracy: 0.1875\n",
"Epoch 7/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.3008 - accuracy: 0.4275\n",
"Epoch 7: val_accuracy did not improve from 0.27604\n",
"25/25 [==============================] - 28s 1s/step - loss: 1.3008 - accuracy: 0.4275 - val_loss: 1.9457 - val_accuracy: 0.2760\n",
"Epoch 8/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.2462 - accuracy: 0.4700\n",
"Epoch 8: val_accuracy improved from 0.27604 to 0.34375, saving model to alex_6.h5\n",
"25/25 [==============================] - 29s 1s/step - loss: 1.2462 - accuracy: 0.4700 - val_loss: 1.8961 - val_accuracy: 0.3438\n",
"Epoch 9/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.2202 - accuracy: 0.4737\n",
"Epoch 9: val_accuracy did not improve from 0.34375\n",
"25/25 [==============================] - 29s 1s/step - loss: 1.2202 - accuracy: 0.4737 - val_loss: 2.0365 - val_accuracy: 0.1979\n",
"Epoch 10/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.1927 - accuracy: 0.4975\n",
"Epoch 10: val_accuracy did not improve from 0.34375\n",
"25/25 [==============================] - 30s 1s/step - loss: 1.1927 - accuracy: 0.4975 - val_loss: 2.0173 - val_accuracy: 0.2083\n",
"Epoch 11/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.1185 - accuracy: 0.5138\n",
"Epoch 11: val_accuracy did not improve from 0.34375\n",
"25/25 [==============================] - 29s 1s/step - loss: 1.1185 - accuracy: 0.5138 - val_loss: 1.8485 - val_accuracy: 0.3385\n",
"Epoch 12/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.1445 - accuracy: 0.5088\n",
"Epoch 12: val_accuracy did not improve from 0.34375\n",
"25/25 [==============================] - 29s 1s/step - loss: 1.1445 - accuracy: 0.5088 - val_loss: 1.8848 - val_accuracy: 0.2604\n",
"Epoch 13/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.1042 - accuracy: 0.5387\n",
"Epoch 13: val_accuracy did not improve from 0.34375\n",
"25/25 [==============================] - 29s 1s/step - loss: 1.1042 - accuracy: 0.5387 - val_loss: 1.9293 - val_accuracy: 0.2135\n",
"Epoch 14/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.0768 - accuracy: 0.5412\n",
"Epoch 14: val_accuracy did not improve from 0.34375\n",
"25/25 [==============================] - 29s 1s/step - loss: 1.0768 - accuracy: 0.5412 - val_loss: 1.9871 - val_accuracy: 0.1979\n",
"Epoch 15/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.0332 - accuracy: 0.5512\n",
"Epoch 15: val_accuracy did not improve from 0.34375\n",
"25/25 [==============================] - 30s 1s/step - loss: 1.0332 - accuracy: 0.5512 - val_loss: 1.9616 - val_accuracy: 0.1927\n",
"Epoch 16/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.0965 - accuracy: 0.5475\n",
"Epoch 16: val_accuracy did not improve from 0.34375\n",
"25/25 [==============================] - 35s 1s/step - loss: 1.0965 - accuracy: 0.5475 - val_loss: 1.8993 - val_accuracy: 0.2083\n",
"Epoch 17/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.0335 - accuracy: 0.5387\n",
"Epoch 17: val_accuracy did not improve from 0.34375\n",
"25/25 [==============================] - 31s 1s/step - loss: 1.0335 - accuracy: 0.5387 - val_loss: 1.9000 - val_accuracy: 0.2188\n",
"Epoch 18/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.0124 - accuracy: 0.5475\n",
"Epoch 18: val_accuracy did not improve from 0.34375\n",
"25/25 [==============================] - 32s 1s/step - loss: 1.0124 - accuracy: 0.5475 - val_loss: 1.9711 - val_accuracy: 0.1927\n",
"Epoch 19/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.0936 - accuracy: 0.5512\n",
"Epoch 19: val_accuracy did not improve from 0.34375\n",
"25/25 [==============================] - 31s 1s/step - loss: 1.0936 - accuracy: 0.5512 - val_loss: 1.9364 - val_accuracy: 0.1927\n",
"Epoch 20/25\n",
"25/25 [==============================] - ETA: 0s - loss: 0.9696 - accuracy: 0.5775\n",
"Epoch 20: val_accuracy did not improve from 0.34375\n",
"25/25 [==============================] - 31s 1s/step - loss: 0.9696 - accuracy: 0.5775 - val_loss: 1.8897 - val_accuracy: 0.1927\n",
"Epoch 21/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.0047 - accuracy: 0.5288\n",
"Epoch 21: val_accuracy did not improve from 0.34375\n",
"25/25 [==============================] - 29s 1s/step - loss: 1.0047 - accuracy: 0.5288 - val_loss: 1.8192 - val_accuracy: 0.2083\n",
"Epoch 22/25\n",
"25/25 [==============================] - ETA: 0s - loss: 0.9775 - accuracy: 0.5738\n",
"Epoch 22: val_accuracy did not improve from 0.34375\n",
"25/25 [==============================] - 29s 1s/step - loss: 0.9775 - accuracy: 0.5738 - val_loss: 1.9259 - val_accuracy: 0.1875\n",
"Epoch 23/25\n",
"25/25 [==============================] - ETA: 0s - loss: 0.9873 - accuracy: 0.5763\n",
"Epoch 23: val_accuracy did not improve from 0.34375\n",
"25/25 [==============================] - 29s 1s/step - loss: 0.9873 - accuracy: 0.5763 - val_loss: 1.9257 - val_accuracy: 0.1979\n",
"Epoch 24/25\n",
"25/25 [==============================] - ETA: 0s - loss: 0.9560 - accuracy: 0.5938\n",
"Epoch 24: val_accuracy did not improve from 0.34375\n",
"25/25 [==============================] - 29s 1s/step - loss: 0.9560 - accuracy: 0.5938 - val_loss: 1.8322 - val_accuracy: 0.2031\n",
"Epoch 25/25\n",
"25/25 [==============================] - ETA: 0s - loss: 0.9225 - accuracy: 0.6100\n",
"Epoch 25: val_accuracy did not improve from 0.34375\n",
"25/25 [==============================] - 29s 1s/step - loss: 0.9225 - accuracy: 0.6100 - val_loss: 1.7558 - val_accuracy: 0.2448\n"
2022-12-10 11:12:06 +01:00
]
}
],
2022-12-10 00:55:36 +01:00
"source": [
2023-01-06 21:43:41 +01:00
"checkpoint = ModelCheckpoint(\"alex_6.h5\", monitor='val_accuracy', verbose=1, save_best_only=True, save_weights_only=False, mode='auto', period=1)\n",
"early = EarlyStopping(monitor='val_accuracy', min_delta=0, patience=20, verbose=1, mode='auto')\n",
"\n",
"alex6 = model_pool_conv_drop.fit_generator(\n",
" steps_per_epoch=len(train_ds), \n",
" generator=train_ds, \n",
" validation_data= validation_ds, \n",
" validation_steps=len(validation_ds), \n",
" epochs=25, \n",
" callbacks=[checkpoint,early])"
2022-12-10 00:55:36 +01:00
]
},
{
"cell_type": "code",
2023-01-07 00:39:16 +01:00
"execution_count": 42,
2022-12-10 00:55:36 +01:00
"metadata": {},
2022-12-10 11:12:06 +01:00
"outputs": [
{
2023-01-07 00:39:16 +01:00
"data": {
"image/png": "iVBORw0KGgoAAAANSUhEUgAAAjwAAAHHCAYAAAC7soLdAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjYuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8o6BhiAAAACXBIWXMAAA9hAAAPYQGoP6dpAACUIElEQVR4nOzdd3hTZRvH8W+Spuneu5QO9igbZAooiIAoQ0E2Mpwgw/XiAvdAxYFbhjhAhuAAUfYWGbJkQ1va0r1n0iTn/SMQqawW2qYN9+e6eqU5OTnnTozk1+c8Q6UoioIQQgghhB1T27oAIYQQQojKJoFHCCGEEHZPAo8QQggh7J4EHiGEEELYPQk8QgghhLB7EniEEEIIYfck8AghhBDC7kngEUIIIYTdk8AjhBBCCLsngUcIUWFUKhUzZ84s9/NiY2NRqVQsWLCgwmsSQgiQwCOE3VmwYAEqlQqVSsW2bdsueVxRFMLCwlCpVNx11102qFAIIaqeBB4h7JSTkxPff//9Jds3b95MQkICOp3OBlUJIYRtSOARwk716dOHpUuXYjQaS23//vvvad26NUFBQTaq7OZRUFBg6xKEEOdJ4BHCTg0dOpSMjAzWrl1r3WYwGFi2bBnDhg277HMKCgp44oknCAsLQ6fT0aBBA9555x0URSm1n16vZ+rUqfj7++Pu7s7dd99NQkLCZY+ZmJjI2LFjCQwMRKfT0aRJE+bNm3ddrykzM5Mnn3yS6Oho3Nzc8PDwoHfv3hw4cOCSfYuLi5k5cyb169fHycmJ4OBgBg4cyOnTp637mM1mPvjgA6Kjo3FycsLf358777yTPXv2AFfvW/Tf/kozZ85EpVJx5MgRhg0bhre3N507dwbg4MGDjBkzhqioKJycnAgKCmLs2LFkZGRc9v0aN24cISEh6HQ6IiMjeeSRRzAYDJw5cwaVSsXs2bMved6OHTtQqVQsWrSovG+rEDcFB1sXIISoHBEREXTo0IFFixbRu3dvAH777TdycnK4//77+fDDD0vtrygKd999Nxs3bmTcuHG0aNGC33//naeeeorExMRSX7Ljx4/n22+/ZdiwYXTs2JENGzbQt2/fS2pISUmhffv2qFQqJk6ciL+/P7/99hvjxo0jNzeXKVOmlOs1nTlzhpUrV3LfffcRGRlJSkoKn3/+OV27duXIkSOEhIQAYDKZuOuuu1i/fj33338/kydPJi8vj7Vr13L48GHq1KkDwLhx41iwYAG9e/dm/PjxGI1Gtm7dyp9//kmbNm3KVdsF9913H/Xq1eP111+3BsW1a9dy5swZHnjgAYKCgvjnn3/44osv+Oeff/jzzz9RqVQAnDt3jnbt2pGdnc2DDz5Iw4YNSUxMZNmyZRQWFhIVFUWnTp347rvvmDp1aqnzfvfdd7i7u3PPPfdcV91C2D1FCGFX5s+frwDK7t27lTlz5iju7u5KYWGhoiiKct999yndu3dXFEVRwsPDlb59+1qft3LlSgVQXn311VLHu/feexWVSqWcOnVKURRF2b9/vwIojz76aKn9hg0bpgDKjBkzrNvGjRunBAcHK+np6aX2vf/++xVPT09rXTExMQqgzJ8//6qvrbi4WDGZTKW2xcTEKDqdTnn55Zet2+bNm6cAynvvvXfJMcxms6IoirJhwwYFUB5//PEr7nO1uv77WmfMmKEAytChQy/Z98LrvNiiRYsUQNmyZYt126hRoxS1Wq3s3r37ijV9/vnnCqAcPXrU+pjBYFD8/PyU0aNHX/I8IYSFXNISwo4NHjyYoqIifv31V/Ly8vj111+veDlr9erVaDQaHn/88VLbn3jiCRRF4bfffrPuB1yy339baxRFYfny5fTr1w9FUUhPT7f+9OrVi5ycHPbt21eu16PT6VCrLf9smUwmMjIycHNzo0GDBqWOtXz5cvz8/Jg0adIlx7jQmrJ8+XJUKhUzZsy44j7X4+GHH75km7Ozs/X34uJi0tPTad++PYC1brPZzMqVK+nXr99lW5cu1DR48GCcnJz47rvvrI/9/vvvpKenM2LEiOuuWwh7J4FHCDvm7+9Pjx49+P777/nxxx8xmUzce++9l903Li6OkJAQ3N3dS21v1KiR9fELt2q12npZ6IIGDRqUup+WlkZ2djZffPEF/v7+pX4eeOABAFJTU8v1esxmM7Nnz6ZevXrodDr8/Pzw9/fn4MGD5OTkWPc7ffo0DRo0wMHhylftT58+TUhICD4+PuWq4VoiIyMv2ZaZmcnkyZMJDAzE2dkZf39/634X6k5LSyM3N5emTZte9fheXl7069ev1Ai87777jtDQUG677bYKfCVC2BfpwyOEnRs2bBgTJkwgOTmZ3r174+XlVSXnNZvNAIwYMYLRo0dfdp9mzZqV65ivv/46L7zwAmPHjuWVV17Bx8cHtVrNlClTrOerSFdq6TGZTFd8zsWtORcMHjyYHTt28NRTT9GiRQvc3Nwwm83ceeed11X3qFGjWLp0KTt27CA6Opqff/6ZRx991Nr6JYS4lAQeIezcgAEDeOihh/jzzz/54YcfrrhfeHg469atIy8vr1Qrz7Fjx6yPX7g1m83WVpQLjh8/Xup4F0ZwmUwmevToUSGvZdmyZXTv3p25c+eW2p6dnY2fn5/1fp06ddi1axclJSVotdrLHqtOnTr8/vvvZGZmXrGVx9vb23r8i11o7SqLrKws1q9fz0svvcSLL75o3X7y5MlS+/n7++Ph4cHhw4evecw777wTf39/vvvuO2655RYKCwsZOXJkmWsS4mYkfw4IYefc3Nz49NNPmTlzJv369bvifn369MFkMjFnzpxS22fPno1KpbKO9Lpw+99RXu+//36p+xqNhkGDBrF8+fLLfomnpaWV+7VoNJpLhsgvXbqUxMTEUtsGDRpEenr6Ja8FsD5/0KBBKIrCSy+9dMV9PDw88PPzY8uWLaUe/+STT8pV88XHvOC/75daraZ///788ssv1mHxl6sJwMHBgaFDh7JkyRIWLFhAdHR0uVvLhLjZSAuPEDeBK11Suli/fv3o3r07zz33HLGxsTRv3pw//viDn376iSlTplj77LRo0YKhQ4fyySefkJOTQ8eOHVm/fj2nTp265JhvvvkmGzdu5JZbbmHChAk0btyYzMxM9u3bx7p168jMzCzX67jrrrt4+eWXeeCBB+jYsSOHDh3iu+++IyoqqtR+o0aNYuHChUybNo2//vqLLl26UFBQwLp163j00Ue555576N69OyNHjuTDDz/k5MmT1stLW7dupXv37kycOBGwDMF/8803GT9+PG3atGHLli2cOHGizDV7eHhw66238vbbb1NSUkJoaCh//PEHMTExl+z7+uuv88cff9C1a1cefPBBGjVqRFJSEkuXLmXbtm2lLkeOGjWKDz/8kI0bN/LWW2+V630U4qZks/FhQohKcfGw9Kv577B0RVGUvLw8ZerUqUpISIii1WqVevXqKbNmzbIOib6gqKhIefzxxxVfX1/F1dVV6devnxIfH3/JUG1FUZSUlBTlscceU8LCwhStVqsEBQUpt99+u/LFF19Y9ynPsPQnnnhCCQ4OVpydnZVOnTopO3fuVLp27ap07dq11L6FhYXKc889p0RGRlrPe++99yqnT5+27mM0GpVZs2YpDRs2VBwdHRV/f3+ld+/eyt69e0sdZ9y4cYqnp6fi7u6uDB48WElNTb3isPS0tLRL6k5ISFAGDBigeHl5KZ6ensp9992nnDt37rLvV1xcnDJq1CjF399f0el0SlRUlPLYY48per3+kuM2adJEUavVSkJCwlXfNyGEoqgU5T/trEIIIWqEli1b4uPjw/r1621dihDVnvThEUKIGmjPnj3s37+fUaNG2boUIWoEaeERQoga5PDhw+zdu5d3332X9PR0zpw5g5OTk63LEqLakxYeIYSoQZYtW8YDDzxASUkJixYtkrAjRBlJC48QQggh7J608AghhBDC7kngEUIIIYTdu+kmHjSbzZw7dw53d/cbWhFZCCGEEFVHURTy8vIICQm5rnXjbrrAc+7cOcLCwmxdhhBCCCGuQ3x8PLVq1Sr38266wHN
"text/plain": [
"<Figure size 640x480 with 1 Axes>"
]
},
"metadata": {},
"output_type": "display_data"
2023-01-06 21:43:41 +01:00
}
],
"source": [
"plt.plot(alex6.history[\"accuracy\"])\n",
"plt.plot(alex6.history['val_accuracy'])\n",
"plt.plot(alex6.history['loss'])\n",
"plt.plot(alex6.history['val_loss'])\n",
"plt.title(\"Model accuracy\")\n",
"plt.ylabel(\"Value\")\n",
"plt.xlabel(\"Epoch\")\n",
"plt.legend([\"Accuracy\",\"Validation Accuracy\",\"Loss\",\"Validation Loss\"])\n",
"plt.show()"
]
},
{
"cell_type": "code",
2023-01-07 00:39:16 +01:00
"execution_count": 43,
2023-01-06 21:43:41 +01:00
"metadata": {},
"outputs": [
2022-12-10 11:12:06 +01:00
{
2023-01-07 00:39:16 +01:00
"name": "stdout",
"output_type": "stream",
"text": [
"8/8 [==============================] - 2s 306ms/step - loss: 1.7711 - accuracy: 0.2227\n"
2023-01-06 21:43:41 +01:00
]
2023-01-07 00:39:16 +01:00
},
{
"data": {
"text/plain": [
"[1.7710821628570557, 0.22265625]"
]
},
"execution_count": 43,
"metadata": {},
"output_type": "execute_result"
2022-12-10 11:12:06 +01:00
}
],
2022-12-10 00:55:36 +01:00
"source": [
2023-01-07 00:39:16 +01:00
"model_pool_conv_drop.evaluate(test_ds)"
2022-12-10 00:55:36 +01:00
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Do warstw spłaszczonych, maxpooling i splotowych"
]
},
{
"cell_type": "code",
2023-01-07 00:39:16 +01:00
"execution_count": 44,
2022-12-10 00:55:36 +01:00
"metadata": {},
2023-01-07 00:39:16 +01:00
"outputs": [],
2022-12-10 00:55:36 +01:00
"source": [
"model_drop = keras.models.Sequential([\n",
" keras.layers.Conv2D(filters=96, kernel_size=(11,11), strides=(4,4), activation='relu', input_shape=(227,227,3)),\n",
" keras.layers.Dropout(.5),\n",
" keras.layers.MaxPool2D(pool_size=(3,3), strides=(2,2)),\n",
" keras.layers.Dropout(.5),\n",
" keras.layers.Conv2D(filters=256, kernel_size=(5,5), strides=(1,1), activation='relu', padding=\"same\"),\n",
" keras.layers.Dropout(.5),\n",
" keras.layers.MaxPool2D(pool_size=(3,3), strides=(2,2)),\n",
" keras.layers.Dropout(.5),\n",
" keras.layers.Conv2D(filters=384, kernel_size=(3,3), strides=(1,1), activation='relu', padding=\"same\"),\n",
" keras.layers.Dropout(.5),\n",
" keras.layers.Conv2D(filters=384, kernel_size=(3,3), strides=(1,1), activation='relu', padding=\"same\"),\n",
" keras.layers.Dropout(.5),\n",
" keras.layers.Conv2D(filters=256, kernel_size=(3,3), strides=(1,1), activation='relu', padding=\"same\"),\n",
" keras.layers.Dropout(.5),\n",
" keras.layers.MaxPool2D(pool_size=(3,3), strides=(2,2)),\n",
" keras.layers.Dropout(.5),\n",
" keras.layers.Flatten(),\n",
" keras.layers.Dense(4096, activation='relu'),\n",
" keras.layers.Dropout(.5),\n",
" keras.layers.Dense(4096, activation='relu'),\n",
" keras.layers.Dropout(.5),\n",
" keras.layers.Dense(10, activation='softmax')\n",
"])"
]
},
{
"cell_type": "code",
2023-01-07 00:39:16 +01:00
"execution_count": 45,
2022-12-10 00:55:36 +01:00
"metadata": {},
2022-12-10 11:12:06 +01:00
"outputs": [
{
2023-01-07 00:39:16 +01:00
"name": "stderr",
"output_type": "stream",
"text": [
"WARNING:absl:`lr` is deprecated, please use `learning_rate` instead, or use the legacy optimizer, e.g.,tf.keras.optimizers.legacy.SGD.\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Model: \"sequential_6\"\n",
"_________________________________________________________________\n",
" Layer (type) Output Shape Param # \n",
"=================================================================\n",
" conv2d_30 (Conv2D) (None, 55, 55, 96) 34944 \n",
" \n",
" dropout_30 (Dropout) (None, 55, 55, 96) 0 \n",
" \n",
" max_pooling2d_18 (MaxPoolin (None, 27, 27, 96) 0 \n",
" g2D) \n",
" \n",
" dropout_31 (Dropout) (None, 27, 27, 96) 0 \n",
" \n",
" conv2d_31 (Conv2D) (None, 27, 27, 256) 614656 \n",
" \n",
" dropout_32 (Dropout) (None, 27, 27, 256) 0 \n",
" \n",
" max_pooling2d_19 (MaxPoolin (None, 13, 13, 256) 0 \n",
" g2D) \n",
" \n",
" dropout_33 (Dropout) (None, 13, 13, 256) 0 \n",
" \n",
" conv2d_32 (Conv2D) (None, 13, 13, 384) 885120 \n",
" \n",
" dropout_34 (Dropout) (None, 13, 13, 384) 0 \n",
" \n",
" conv2d_33 (Conv2D) (None, 13, 13, 384) 1327488 \n",
" \n",
" dropout_35 (Dropout) (None, 13, 13, 384) 0 \n",
" \n",
" conv2d_34 (Conv2D) (None, 13, 13, 256) 884992 \n",
" \n",
" dropout_36 (Dropout) (None, 13, 13, 256) 0 \n",
" \n",
" max_pooling2d_20 (MaxPoolin (None, 6, 6, 256) 0 \n",
" g2D) \n",
" \n",
" dropout_37 (Dropout) (None, 6, 6, 256) 0 \n",
" \n",
" flatten_6 (Flatten) (None, 9216) 0 \n",
" \n",
" dense_18 (Dense) (None, 4096) 37752832 \n",
" \n",
" dropout_38 (Dropout) (None, 4096) 0 \n",
" \n",
" dense_19 (Dense) (None, 4096) 16781312 \n",
" \n",
" dropout_39 (Dropout) (None, 4096) 0 \n",
" \n",
" dense_20 (Dense) (None, 10) 40970 \n",
" \n",
"=================================================================\n",
"Total params: 58,322,314\n",
"Trainable params: 58,322,314\n",
"Non-trainable params: 0\n",
"_________________________________________________________________\n"
]
}
],
"source": [
"model_drop.compile(loss='sparse_categorical_crossentropy', optimizer=tf.optimizers.SGD(lr=.001), metrics=['accuracy'])\n",
"model_drop.summary()"
]
},
{
"cell_type": "code",
"execution_count": 46,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"WARNING:tensorflow:`period` argument is deprecated. Please use `save_freq` to specify the frequency in number of batches seen.\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"WARNING:tensorflow:`period` argument is deprecated. Please use `save_freq` to specify the frequency in number of batches seen.\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Epoch 1/25\n"
2022-12-10 11:12:06 +01:00
]
2023-01-07 00:39:16 +01:00
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/var/folders/6b/j4d60ym516x2s6wymzj707rh0000gn/T/ipykernel_13671/2699219498.py:4: UserWarning: `Model.fit_generator` is deprecated and will be removed in a future version. Please use `Model.fit`, which supports generators.\n",
" alex7 = model_drop.fit_generator(\n"
]
},
2022-12-10 11:12:06 +01:00
{
2023-01-07 00:39:16 +01:00
"name": "stdout",
"output_type": "stream",
"text": [
"25/25 [==============================] - ETA: 0s - loss: 1.9261 - accuracy: 0.2025\n",
"Epoch 1: val_accuracy improved from -inf to 0.18229, saving model to alex_7.h5\n",
"25/25 [==============================] - 30s 1s/step - loss: 1.9261 - accuracy: 0.2025 - val_loss: 2.2480 - val_accuracy: 0.1823\n",
"Epoch 2/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.7103 - accuracy: 0.1963\n",
"Epoch 2: val_accuracy improved from 0.18229 to 0.18750, saving model to alex_7.h5\n",
"25/25 [==============================] - 29s 1s/step - loss: 1.7103 - accuracy: 0.1963 - val_loss: 2.2290 - val_accuracy: 0.1875\n",
"Epoch 3/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.6472 - accuracy: 0.2362\n",
"Epoch 3: val_accuracy improved from 0.18750 to 0.19271, saving model to alex_7.h5\n",
"25/25 [==============================] - 29s 1s/step - loss: 1.6472 - accuracy: 0.2362 - val_loss: 2.1991 - val_accuracy: 0.1927\n",
"Epoch 4/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.5965 - accuracy: 0.2675\n",
"Epoch 4: val_accuracy did not improve from 0.19271\n",
"25/25 [==============================] - 29s 1s/step - loss: 1.5965 - accuracy: 0.2675 - val_loss: 2.1612 - val_accuracy: 0.1927\n",
"Epoch 5/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.5649 - accuracy: 0.2862\n",
"Epoch 5: val_accuracy did not improve from 0.19271\n",
"25/25 [==============================] - 29s 1s/step - loss: 1.5649 - accuracy: 0.2862 - val_loss: 2.1174 - val_accuracy: 0.1927\n",
"Epoch 6/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.4497 - accuracy: 0.3750\n",
"Epoch 6: val_accuracy improved from 0.19271 to 0.20312, saving model to alex_7.h5\n",
"25/25 [==============================] - 29s 1s/step - loss: 1.4497 - accuracy: 0.3750 - val_loss: 2.0352 - val_accuracy: 0.2031\n",
"Epoch 7/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.3833 - accuracy: 0.3787\n",
"Epoch 7: val_accuracy did not improve from 0.20312\n",
"25/25 [==============================] - 28s 1s/step - loss: 1.3833 - accuracy: 0.3787 - val_loss: 2.0280 - val_accuracy: 0.1771\n",
"Epoch 8/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.3506 - accuracy: 0.4025\n",
"Epoch 8: val_accuracy did not improve from 0.20312\n",
"25/25 [==============================] - 27s 1s/step - loss: 1.3506 - accuracy: 0.4025 - val_loss: 1.9642 - val_accuracy: 0.1979\n",
"Epoch 9/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.3013 - accuracy: 0.4212\n",
"Epoch 9: val_accuracy did not improve from 0.20312\n",
"25/25 [==============================] - 27s 1s/step - loss: 1.3013 - accuracy: 0.4212 - val_loss: 1.9955 - val_accuracy: 0.1927\n",
"Epoch 10/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.3089 - accuracy: 0.4387\n",
"Epoch 10: val_accuracy did not improve from 0.20312\n",
"25/25 [==============================] - 30s 1s/step - loss: 1.3089 - accuracy: 0.4387 - val_loss: 2.0652 - val_accuracy: 0.1875\n",
"Epoch 11/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.3030 - accuracy: 0.4400\n",
"Epoch 11: val_accuracy improved from 0.20312 to 0.20833, saving model to alex_7.h5\n",
"25/25 [==============================] - 29s 1s/step - loss: 1.3030 - accuracy: 0.4400 - val_loss: 1.9548 - val_accuracy: 0.2083\n",
"Epoch 12/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.1538 - accuracy: 0.4913\n",
"Epoch 12: val_accuracy did not improve from 0.20833\n",
"25/25 [==============================] - 28s 1s/step - loss: 1.1538 - accuracy: 0.4913 - val_loss: 1.8886 - val_accuracy: 0.2083\n",
"Epoch 13/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.1939 - accuracy: 0.4913\n",
"Epoch 13: val_accuracy did not improve from 0.20833\n",
"25/25 [==============================] - 27s 1s/step - loss: 1.1939 - accuracy: 0.4913 - val_loss: 1.9482 - val_accuracy: 0.1875\n",
"Epoch 14/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.1846 - accuracy: 0.4775\n",
"Epoch 14: val_accuracy did not improve from 0.20833\n",
"25/25 [==============================] - 27s 1s/step - loss: 1.1846 - accuracy: 0.4775 - val_loss: 2.0470 - val_accuracy: 0.1927\n",
"Epoch 15/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.1359 - accuracy: 0.5075\n",
"Epoch 15: val_accuracy did not improve from 0.20833\n",
"25/25 [==============================] - 29s 1s/step - loss: 1.1359 - accuracy: 0.5075 - val_loss: 1.9831 - val_accuracy: 0.1875\n",
"Epoch 16/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.1575 - accuracy: 0.4963\n",
"Epoch 16: val_accuracy did not improve from 0.20833\n",
"25/25 [==============================] - 96s 4s/step - loss: 1.1575 - accuracy: 0.4963 - val_loss: 1.9085 - val_accuracy: 0.2083\n",
"Epoch 17/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.1165 - accuracy: 0.5113\n",
"Epoch 17: val_accuracy did not improve from 0.20833\n",
"25/25 [==============================] - 110s 4s/step - loss: 1.1165 - accuracy: 0.5113 - val_loss: 1.9389 - val_accuracy: 0.1979\n",
"Epoch 18/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.1276 - accuracy: 0.5163\n",
"Epoch 18: val_accuracy did not improve from 0.20833\n",
"25/25 [==============================] - 107s 4s/step - loss: 1.1276 - accuracy: 0.5163 - val_loss: 1.9441 - val_accuracy: 0.1875\n",
"Epoch 19/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.1038 - accuracy: 0.5238\n",
"Epoch 19: val_accuracy did not improve from 0.20833\n",
"25/25 [==============================] - 69s 3s/step - loss: 1.1038 - accuracy: 0.5238 - val_loss: 2.0581 - val_accuracy: 0.1875\n",
"Epoch 20/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.1174 - accuracy: 0.5250\n",
"Epoch 20: val_accuracy did not improve from 0.20833\n",
"25/25 [==============================] - 68s 3s/step - loss: 1.1174 - accuracy: 0.5250 - val_loss: 1.9579 - val_accuracy: 0.1823\n",
"Epoch 21/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.0253 - accuracy: 0.5575\n",
"Epoch 21: val_accuracy did not improve from 0.20833\n",
"25/25 [==============================] - 69s 3s/step - loss: 1.0253 - accuracy: 0.5575 - val_loss: 1.9376 - val_accuracy: 0.1979\n",
"Epoch 22/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.1088 - accuracy: 0.5450\n",
"Epoch 22: val_accuracy did not improve from 0.20833\n",
"25/25 [==============================] - 72s 3s/step - loss: 1.1088 - accuracy: 0.5450 - val_loss: 2.0030 - val_accuracy: 0.1875\n",
"Epoch 23/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.0789 - accuracy: 0.5475\n",
"Epoch 23: val_accuracy did not improve from 0.20833\n",
"25/25 [==============================] - 59s 2s/step - loss: 1.0789 - accuracy: 0.5475 - val_loss: 1.9403 - val_accuracy: 0.1979\n",
"Epoch 24/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.0523 - accuracy: 0.5500\n",
"Epoch 24: val_accuracy did not improve from 0.20833\n",
"25/25 [==============================] - 56s 2s/step - loss: 1.0523 - accuracy: 0.5500 - val_loss: 2.0287 - val_accuracy: 0.1875\n",
"Epoch 25/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.0160 - accuracy: 0.5587\n",
"Epoch 25: val_accuracy did not improve from 0.20833\n",
"25/25 [==============================] - 52s 2s/step - loss: 1.0160 - accuracy: 0.5587 - val_loss: 1.9327 - val_accuracy: 0.1979\n"
2022-12-10 11:12:06 +01:00
]
}
],
2022-12-10 00:55:36 +01:00
"source": [
2023-01-06 21:43:41 +01:00
"checkpoint = ModelCheckpoint(\"alex_7.h5\", monitor='val_accuracy', verbose=1, save_best_only=True, save_weights_only=False, mode='auto', period=1)\n",
"early = EarlyStopping(monitor='val_accuracy', min_delta=0, patience=20, verbose=1, mode='auto')\n",
"\n",
"alex7 = model_drop.fit_generator(\n",
" steps_per_epoch=len(train_ds), \n",
" generator=train_ds, \n",
" validation_data= validation_ds, \n",
" validation_steps=len(validation_ds), \n",
" epochs=25, \n",
" callbacks=[checkpoint,early])"
2022-12-10 00:55:36 +01:00
]
},
{
"cell_type": "code",
2023-01-07 00:39:16 +01:00
"execution_count": 47,
2022-12-10 00:55:36 +01:00
"metadata": {},
2022-12-10 11:12:06 +01:00
"outputs": [
{
2023-01-07 00:39:16 +01:00
"data": {
"image/png": "iVBORw0KGgoAAAANSUhEUgAAAjwAAAHHCAYAAAC7soLdAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjYuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8o6BhiAAAACXBIWXMAAA9hAAAPYQGoP6dpAACKrElEQVR4nOzdd3hTZf/H8XeSpuneGwqUvUFApixF2cpQEFRAxvNTAQUciAvQx4lbcCuoj6AigltBRECWDFmyy2zp3k3bNOP+/ZESKS3QQtu06fd1XbmanHNyzjfpafLpfe5zbo1SSiGEEEII4cK0zi5ACCGEEKKySeARQgghhMuTwCOEEEIIlyeBRwghhBAuTwKPEEIIIVyeBB4hhBBCuDwJPEIIIYRweRJ4hBBCCOHyJPAIIYQQwuVJ4BFCVBiNRsO8efPK/byTJ0+i0WhYsmRJhdckhBAggUcIl7NkyRI0Gg0ajYY///yzxHylFNHR0Wg0GoYMGeKECoUQoupJ4BHCRXl4eLB06dIS09evX09cXBwGg8EJVQkhhHNI4BHCRQ0aNIjly5djsViKTV+6dCkdO3YkIiLCSZXVHkaj0dklCCGKSOARwkWNGTOGtLQ01qxZ45hWWFjI119/zdixY0t9jtFo5MEHHyQ6OhqDwUCzZs14+eWXUUoVW85kMjFz5kxCQ0Px9fXl5ptvJi4urtR1xsfHM3HiRMLDwzEYDLRq1YqPP/74il5Teno6Dz30EG3atMHHxwc/Pz8GDhzInj17SixbUFDAvHnzaNq0KR4eHkRGRjJixAhiY2Mdy9hsNt544w3atGmDh4cHoaGhDBgwgB07dgCX7lt0YX+lefPmodFoOHDgAGPHjiUwMJDrrrsOgL179zJhwgQaNmyIh4cHERERTJw4kbS0tFLfr0mTJhEVFYXBYCAmJoZ7772XwsJCjh8/jkaj4bXXXivxvM2bN6PRaFi2bFl531YhagU3ZxcghKgcDRo0oFu3bixbtoyBAwcC8PPPP5OVlcXtt9/Om2++WWx5pRQ333wz69atY9KkSbRv355ff/2Vhx9+mPj4+GJfspMnT+Z///sfY8eOpXv37vz+++8MHjy4RA1JSUl07doVjUbDtGnTCA0N5eeff2bSpElkZ2czY8aMcr2m48ePs2rVKm677TZiYmJISkrivffeo3fv3hw4cICoqCgArFYrQ4YMYe3atdx+++088MAD5OTksGbNGvbv30+jRo0AmDRpEkuWLGHgwIFMnjwZi8XCxo0b2bp1K506dSpXbefcdtttNGnShOeee84RFNesWcPx48e5++67iYiI4J9//uH999/nn3/+YevWrWg0GgDOnj1L586dyczM5D//+Q/NmzcnPj6er7/+mry8PBo2bEiPHj34/PPPmTlzZrHtfv755/j6+nLLLbdcUd1CuDwlhHApixcvVoDavn27WrhwofL19VV5eXlKKaVuu+021bdvX6WUUvXr11eDBw92PG/VqlUKUP/973+Lre/WW29VGo1GHTt2TCml1O7duxWg7rvvvmLLjR07VgFq7ty5jmmTJk1SkZGRKjU1tdiyt99+u/L393fUdeLECQWoxYsXX/K1FRQUKKvVWmzaiRMnlMFgUE8//bRj2scff6wA9eqrr5ZYh81mU0op9fvvvytA3X///Rdd5lJ1Xfha586dqwA1ZsyYEsuee53nW7ZsmQLUhg0bHNPGjRuntFqt2r59+0Vreu+99xSgDh486JhXWFioQkJC1Pjx40s8TwhhJ4e0hHBho0aNIj8/nx9++IGcnBx++OGHix7O+umnn9DpdNx///3Fpj/44IMopfj5558dywEllruwtUYpxYoVKxg6dChKKVJTUx23/v37k5WVxa5du8r1egwGA1qt/WPLarWSlpaGj48PzZo1K7auFStWEBISwvTp00us41xryooVK9BoNMydO/eiy1yJe+65p8Q0T09Px/2CggJSU1Pp2rUrgKNum83GqlWrGDp0aKmtS+dqGjVqFB4eHnz++eeOeb/++iupqanceeedV1y3EK5OAo8QLiw0NJR+/fqxdOlSvvnmG6xWK7feemupy546dYqoqCh8fX2LTW/RooVj/rmfWq3WcVjonGbNmhV7nJKSQmZmJu+//z6hoaHFbnfffTcAycnJ5Xo9NpuN1157jSZNmmAwGAgJCSE0NJS9e/eSlZXlWC42NpZmzZrh5nbxo/axsbFERUURFBRUrhouJyYmpsS09PR0HnjgAcLDw/H09CQ0NNSx3Lm6U1JSyM7OpnXr1pdcf0BAAEOHDi12Bt7nn39OnTp1uP766yvwlQjhWqQPjxAubuzYsUyZMoXExEQGDhxIQEBAlWzXZrMBcOeddzJ+/PhSl2nbtm251vncc8/x5JNPMnHiRJ555hmCgoLQarXMmDHDsb2KdLGWHqvVetHnnN+ac86oUaPYvHkzDz/8MO3bt8fHxwebzcaAAQOuqO5x48axfPlyNm/eTJs2bfjuu++47777HK1fQoiSJPAI4eKGDx/O//3f/7F161a+/PLLiy5Xv359fvvtN3Jycoq18hw6dMgx/9xPm83maEU55/Dhw8XWd+4MLqvVSr9+/SrktXz99df07duXjz76qNj0zMxMQkJCHI8bNWrEtm3bMJvN6PX6UtfVqFEjfv31V9LT0y/ayhMYGOhY//nOtXaVRUZGBmvXrmX+/Pk89dRTjulHjx4ttlxoaCh+fn7s37//suscMGAAoaGhfP7553Tp0oW8vDzuuuuuMtckRG0k/w4I4eJ8fHx45513mDdvHkOHDr3ocoMGDcJqtbJw4cJi01977TU0Go3jTK9zPy88y+v1118v9lin0zFy5EhWrFhR6pd4SkpKuV+LTqcrcYr88uXLiY+PLzZt5MiRpKamlngtgOP5I0eORCnF/PnzL7qMn58fISEhbNiwodj8t99+u1w1n7/Ocy58v7RaLcOGDeP77793nBZfWk0Abm5ujBkzhq+++oolS5bQpk2bcreWCVHbSAuPELXAxQ4pnW/o0KH07duXxx9/nJMnT9KuXTtWr17Nt99+y4wZMxx9dtq3b8+YMWN4++23ycrKonv37qxdu5Zjx46VWOcLL7zAunXr6NKlC1OmTKFly5akp6eza9cufvvtN9LT08v1OoYMGcLTTz/N3XffTffu3dm3bx+ff/45DRs2LLbcuHHj+PTTT5k1axZ//fUXPXv2xGg08ttvv3Hfffdxyy230LdvX+666y7efPNNjh496ji8tHHjRvr27cu0adMA+yn4L7zwApMnT6ZTp05s2LCBI0eOlLlmPz8/evXqxUsvvYTZbKZOnTqsXr2aEydOlFj2ueeeY/Xq1fTu3Zv//Oc/tGjRgoSEBJYvX86ff/5Z7HDkuHHjePPNN1m3bh0vvvhiud5HIWolp50fJoSoFOefln4pF56WrpRSOTk5aubMmSoqKkrp9XrVpEkTtWDBAscp0efk5+er+++/XwUHBytvb281dOhQdebMmRKnaiulVFJSkpo6daqKjo5Wer1eRUREqBtuuEG9//77jmXKc1r6gw8+qCIjI5Wnp6fq0aOH2rJli+rdu7fq3bt3sWXz8vLU448/rmJiYhzbvfXWW1VsbKxjGYvFohYsWKCaN2+u3N3dVWhoqBo4cKDauXNnsfVMmjRJ+fv7K19fXzVq1CiVnJx80dPSU1JSStQdFxenhg8frgICApS/v7+67bbb1NmzZ0t9v06dOqXGjRunQkNDlcFgUA0bNlRTp05VJpOpxHpbtWqltFqtiouLu+T7JoRQSqPUBe2sQgghaoRrrrmGoKAg1q5d6+xShKj2pA+PEELUQDt27GD37t2MGzfO2aUIUSNIC48QQtQg+/fvZ+fOnbzyyiukpqZy/PhxPDw8nF2WENWetPAIIUQN8vXXX3P33XdjNptZtmyZhB0hykhaeIQQQgjh8qSFRwghhBAuTwKPEEIIIVxerbvwoM1m4+zZs/j6+l7ViMhCCCGEqDpKKXJycoiKirqiceNqXeA5e/Ys0dHRzi5DCCGEEFfgzJkz1K1bt9zPq3WB59ygiGfOnMH
"text/plain": [
"<Figure size 640x480 with 1 Axes>"
]
},
"metadata": {},
"output_type": "display_data"
2023-01-06 21:43:41 +01:00
}
],
"source": [
"plt.plot(alex7.history[\"accuracy\"])\n",
"plt.plot(alex7.history['val_accuracy'])\n",
"plt.plot(alex7.history['loss'])\n",
"plt.plot(alex7.history['val_loss'])\n",
"plt.title(\"Model accuracy\")\n",
"plt.ylabel(\"Value\")\n",
"plt.xlabel(\"Epoch\")\n",
"plt.legend([\"Accuracy\",\"Validation Accuracy\",\"Loss\",\"Validation Loss\"])\n",
"plt.show()"
]
},
{
"cell_type": "code",
2023-01-07 00:39:16 +01:00
"execution_count": 48,
2023-01-06 21:43:41 +01:00
"metadata": {},
"outputs": [
2022-12-10 11:12:06 +01:00
{
2023-01-07 00:39:16 +01:00
"name": "stdout",
"output_type": "stream",
"text": [
"8/8 [==============================] - 4s 534ms/step - loss: 1.9357 - accuracy: 0.2070\n"
2023-01-06 21:43:41 +01:00
]
2023-01-07 00:39:16 +01:00
},
{
"data": {
"text/plain": [
"[1.9356722831726074, 0.20703125]"
]
},
"execution_count": 48,
"metadata": {},
"output_type": "execute_result"
2022-12-10 11:12:06 +01:00
}
],
2022-12-10 00:55:36 +01:00
"source": [
2023-01-07 00:39:16 +01:00
"model_drop.evaluate(test_ds)"
2022-12-10 00:55:36 +01:00
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Batch Regularization"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Bez dropoutu"
]
},
{
"cell_type": "code",
2023-01-07 00:39:16 +01:00
"execution_count": 49,
2022-12-10 00:55:36 +01:00
"metadata": {},
2023-01-07 00:39:16 +01:00
"outputs": [],
2022-12-10 00:55:36 +01:00
"source": [
"model_batch = keras.models.Sequential([\n",
" keras.layers.Conv2D(filters=96, kernel_size=(11,11), strides=(4,4), activation='relu', input_shape=(227,227,3)),\n",
" keras.layers.BatchNormalization(),\n",
" keras.layers.MaxPool2D(pool_size=(3,3), strides=(2,2)),\n",
" keras.layers.Conv2D(filters=256, kernel_size=(5,5), strides=(1,1), activation='relu', padding=\"same\"),\n",
" keras.layers.BatchNormalization(),\n",
" keras.layers.MaxPool2D(pool_size=(3,3), strides=(2,2)),\n",
" keras.layers.Conv2D(filters=384, kernel_size=(3,3), strides=(1,1), activation='relu', padding=\"same\"),\n",
" keras.layers.BatchNormalization(),\n",
" keras.layers.Conv2D(filters=384, kernel_size=(3,3), strides=(1,1), activation='relu', padding=\"same\"),\n",
" keras.layers.BatchNormalization(),\n",
" keras.layers.Conv2D(filters=256, kernel_size=(3,3), strides=(1,1), activation='relu', padding=\"same\"),\n",
" keras.layers.BatchNormalization(),\n",
" keras.layers.MaxPool2D(pool_size=(3,3), strides=(2,2)),\n",
" keras.layers.Flatten(),\n",
" keras.layers.Dense(4096, activation='relu'),\n",
" keras.layers.Dense(4096, activation='relu'),\n",
" keras.layers.Dense(10, activation='softmax')\n",
"])"
]
},
{
"cell_type": "code",
2023-01-07 00:39:16 +01:00
"execution_count": 50,
2022-12-10 00:55:36 +01:00
"metadata": {},
2022-12-10 11:12:06 +01:00
"outputs": [
{
2023-01-07 00:39:16 +01:00
"name": "stderr",
"output_type": "stream",
"text": [
"WARNING:absl:`lr` is deprecated, please use `learning_rate` instead, or use the legacy optimizer, e.g.,tf.keras.optimizers.legacy.SGD.\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Model: \"sequential_7\"\n",
"_________________________________________________________________\n",
" Layer (type) Output Shape Param # \n",
"=================================================================\n",
" conv2d_35 (Conv2D) (None, 55, 55, 96) 34944 \n",
" \n",
" batch_normalization (BatchN (None, 55, 55, 96) 384 \n",
" ormalization) \n",
" \n",
" max_pooling2d_21 (MaxPoolin (None, 27, 27, 96) 0 \n",
" g2D) \n",
" \n",
" conv2d_36 (Conv2D) (None, 27, 27, 256) 614656 \n",
" \n",
" batch_normalization_1 (Batc (None, 27, 27, 256) 1024 \n",
" hNormalization) \n",
" \n",
" max_pooling2d_22 (MaxPoolin (None, 13, 13, 256) 0 \n",
" g2D) \n",
" \n",
" conv2d_37 (Conv2D) (None, 13, 13, 384) 885120 \n",
" \n",
" batch_normalization_2 (Batc (None, 13, 13, 384) 1536 \n",
" hNormalization) \n",
" \n",
" conv2d_38 (Conv2D) (None, 13, 13, 384) 1327488 \n",
" \n",
" batch_normalization_3 (Batc (None, 13, 13, 384) 1536 \n",
" hNormalization) \n",
" \n",
" conv2d_39 (Conv2D) (None, 13, 13, 256) 884992 \n",
" \n",
" batch_normalization_4 (Batc (None, 13, 13, 256) 1024 \n",
" hNormalization) \n",
" \n",
" max_pooling2d_23 (MaxPoolin (None, 6, 6, 256) 0 \n",
" g2D) \n",
" \n",
" flatten_7 (Flatten) (None, 9216) 0 \n",
" \n",
" dense_21 (Dense) (None, 4096) 37752832 \n",
" \n",
" dense_22 (Dense) (None, 4096) 16781312 \n",
" \n",
" dense_23 (Dense) (None, 10) 40970 \n",
" \n",
"=================================================================\n",
"Total params: 58,327,818\n",
"Trainable params: 58,325,066\n",
"Non-trainable params: 2,752\n",
"_________________________________________________________________\n"
2022-12-10 11:12:06 +01:00
]
}
],
2022-12-10 00:55:36 +01:00
"source": [
"model_batch.compile(loss='sparse_categorical_crossentropy', optimizer=tf.optimizers.SGD(lr=.001), metrics=['accuracy'])\n",
"model_batch.summary()"
]
},
{
"cell_type": "code",
2023-01-07 00:39:16 +01:00
"execution_count": 51,
2022-12-10 00:55:36 +01:00
"metadata": {},
2022-12-10 11:12:06 +01:00
"outputs": [
{
2023-01-07 00:39:16 +01:00
"name": "stdout",
"output_type": "stream",
"text": [
"WARNING:tensorflow:`period` argument is deprecated. Please use `save_freq` to specify the frequency in number of batches seen.\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"WARNING:tensorflow:`period` argument is deprecated. Please use `save_freq` to specify the frequency in number of batches seen.\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Epoch 1/25\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/var/folders/6b/j4d60ym516x2s6wymzj707rh0000gn/T/ipykernel_13671/2334374023.py:4: UserWarning: `Model.fit_generator` is deprecated and will be removed in a future version. Please use `Model.fit`, which supports generators.\n",
" alex8 = model_batch.fit_generator(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"25/25 [==============================] - ETA: 0s - loss: 3.5162 - accuracy: 0.4512\n",
"Epoch 1: val_accuracy improved from -inf to 0.20833, saving model to alex_8.h5\n",
"25/25 [==============================] - 51s 2s/step - loss: 3.5162 - accuracy: 0.4512 - val_loss: 2.1169 - val_accuracy: 0.2083\n",
"Epoch 2/25\n",
"25/25 [==============================] - ETA: 0s - loss: 0.6702 - accuracy: 0.7425\n",
"Epoch 2: val_accuracy did not improve from 0.20833\n",
"25/25 [==============================] - 52s 2s/step - loss: 0.6702 - accuracy: 0.7425 - val_loss: 2.1916 - val_accuracy: 0.1771\n",
"Epoch 3/25\n",
"25/25 [==============================] - ETA: 0s - loss: 0.3823 - accuracy: 0.8637\n",
"Epoch 3: val_accuracy did not improve from 0.20833\n",
"25/25 [==============================] - 52s 2s/step - loss: 0.3823 - accuracy: 0.8637 - val_loss: 2.5290 - val_accuracy: 0.1823\n",
"Epoch 4/25\n",
"25/25 [==============================] - ETA: 0s - loss: 0.2204 - accuracy: 0.9388\n",
"Epoch 4: val_accuracy did not improve from 0.20833\n",
"25/25 [==============================] - 52s 2s/step - loss: 0.2204 - accuracy: 0.9388 - val_loss: 3.1773 - val_accuracy: 0.1771\n",
"Epoch 5/25\n",
"25/25 [==============================] - ETA: 0s - loss: 0.1337 - accuracy: 0.9712\n",
"Epoch 5: val_accuracy did not improve from 0.20833\n",
"25/25 [==============================] - 53s 2s/step - loss: 0.1337 - accuracy: 0.9712 - val_loss: 3.4835 - val_accuracy: 0.1875\n",
"Epoch 6/25\n",
"25/25 [==============================] - ETA: 0s - loss: 0.0836 - accuracy: 0.9875\n",
"Epoch 6: val_accuracy did not improve from 0.20833\n",
"25/25 [==============================] - 52s 2s/step - loss: 0.0836 - accuracy: 0.9875 - val_loss: 4.0837 - val_accuracy: 0.1927\n",
"Epoch 7/25\n",
"25/25 [==============================] - ETA: 0s - loss: 0.0911 - accuracy: 0.9775\n",
"Epoch 7: val_accuracy improved from 0.20833 to 0.24479, saving model to alex_8.h5\n",
"25/25 [==============================] - 56s 2s/step - loss: 0.0911 - accuracy: 0.9775 - val_loss: 4.6900 - val_accuracy: 0.2448\n",
"Epoch 8/25\n",
"25/25 [==============================] - ETA: 0s - loss: 0.0658 - accuracy: 0.9862\n",
"Epoch 8: val_accuracy improved from 0.24479 to 0.28646, saving model to alex_8.h5\n",
"25/25 [==============================] - 52s 2s/step - loss: 0.0658 - accuracy: 0.9862 - val_loss: 4.7919 - val_accuracy: 0.2865\n",
"Epoch 9/25\n",
"25/25 [==============================] - ETA: 0s - loss: 0.0362 - accuracy: 0.9975\n",
"Epoch 9: val_accuracy improved from 0.28646 to 0.30729, saving model to alex_8.h5\n",
"25/25 [==============================] - 53s 2s/step - loss: 0.0362 - accuracy: 0.9975 - val_loss: 5.1122 - val_accuracy: 0.3073\n",
"Epoch 10/25\n",
"25/25 [==============================] - ETA: 0s - loss: 0.0309 - accuracy: 0.9962\n",
"Epoch 10: val_accuracy did not improve from 0.30729\n",
"25/25 [==============================] - 52s 2s/step - loss: 0.0309 - accuracy: 0.9962 - val_loss: 5.5180 - val_accuracy: 0.2760\n",
"Epoch 11/25\n",
"25/25 [==============================] - ETA: 0s - loss: 0.0250 - accuracy: 1.0000\n",
"Epoch 11: val_accuracy did not improve from 0.30729\n",
"25/25 [==============================] - 51s 2s/step - loss: 0.0250 - accuracy: 1.0000 - val_loss: 5.7030 - val_accuracy: 0.2969\n",
"Epoch 12/25\n",
"25/25 [==============================] - ETA: 0s - loss: 0.0243 - accuracy: 0.9962\n",
"Epoch 12: val_accuracy did not improve from 0.30729\n",
"25/25 [==============================] - 49s 2s/step - loss: 0.0243 - accuracy: 0.9962 - val_loss: 5.8668 - val_accuracy: 0.2917\n",
"Epoch 13/25\n",
"25/25 [==============================] - ETA: 0s - loss: 0.0163 - accuracy: 1.0000\n",
"Epoch 13: val_accuracy did not improve from 0.30729\n",
"25/25 [==============================] - 47s 2s/step - loss: 0.0163 - accuracy: 1.0000 - val_loss: 6.0192 - val_accuracy: 0.3021\n",
"Epoch 14/25\n",
"25/25 [==============================] - ETA: 0s - loss: 0.0121 - accuracy: 0.9987\n",
"Epoch 14: val_accuracy improved from 0.30729 to 0.32292, saving model to alex_8.h5\n",
"25/25 [==============================] - 45s 2s/step - loss: 0.0121 - accuracy: 0.9987 - val_loss: 5.2193 - val_accuracy: 0.3229\n",
"Epoch 15/25\n",
"25/25 [==============================] - ETA: 0s - loss: 0.0131 - accuracy: 1.0000\n",
"Epoch 15: val_accuracy did not improve from 0.32292\n",
"25/25 [==============================] - 43s 2s/step - loss: 0.0131 - accuracy: 1.0000 - val_loss: 5.9107 - val_accuracy: 0.3073\n",
"Epoch 16/25\n",
"25/25 [==============================] - ETA: 0s - loss: 0.0113 - accuracy: 1.0000\n",
"Epoch 16: val_accuracy did not improve from 0.32292\n",
"25/25 [==============================] - 43s 2s/step - loss: 0.0113 - accuracy: 1.0000 - val_loss: 5.8355 - val_accuracy: 0.2969\n",
"Epoch 17/25\n",
"25/25 [==============================] - ETA: 0s - loss: 0.0097 - accuracy: 1.0000\n",
"Epoch 17: val_accuracy did not improve from 0.32292\n",
"25/25 [==============================] - 45s 2s/step - loss: 0.0097 - accuracy: 1.0000 - val_loss: 5.1658 - val_accuracy: 0.3125\n",
"Epoch 18/25\n",
"25/25 [==============================] - ETA: 0s - loss: 0.0104 - accuracy: 0.9987\n",
"Epoch 18: val_accuracy did not improve from 0.32292\n",
"25/25 [==============================] - 44s 2s/step - loss: 0.0104 - accuracy: 0.9987 - val_loss: 4.9559 - val_accuracy: 0.3073\n",
"Epoch 19/25\n",
"25/25 [==============================] - ETA: 0s - loss: 0.0083 - accuracy: 1.0000\n",
"Epoch 19: val_accuracy improved from 0.32292 to 0.33333, saving model to alex_8.h5\n",
"25/25 [==============================] - 45s 2s/step - loss: 0.0083 - accuracy: 1.0000 - val_loss: 4.3347 - val_accuracy: 0.3333\n",
"Epoch 20/25\n",
"25/25 [==============================] - ETA: 0s - loss: 0.0076 - accuracy: 1.0000\n",
"Epoch 20: val_accuracy improved from 0.33333 to 0.36979, saving model to alex_8.h5\n",
"25/25 [==============================] - 46s 2s/step - loss: 0.0076 - accuracy: 1.0000 - val_loss: 3.3916 - val_accuracy: 0.3698\n",
"Epoch 21/25\n",
"25/25 [==============================] - ETA: 0s - loss: 0.0076 - accuracy: 1.0000\n",
"Epoch 21: val_accuracy improved from 0.36979 to 0.39062, saving model to alex_8.h5\n",
"25/25 [==============================] - 46s 2s/step - loss: 0.0076 - accuracy: 1.0000 - val_loss: 2.8197 - val_accuracy: 0.3906\n",
"Epoch 22/25\n",
"25/25 [==============================] - ETA: 0s - loss: 0.0056 - accuracy: 1.0000\n",
"Epoch 22: val_accuracy improved from 0.39062 to 0.45312, saving model to alex_8.h5\n",
"25/25 [==============================] - 45s 2s/step - loss: 0.0056 - accuracy: 1.0000 - val_loss: 2.2279 - val_accuracy: 0.4531\n",
"Epoch 23/25\n",
"25/25 [==============================] - ETA: 0s - loss: 0.0066 - accuracy: 1.0000\n",
"Epoch 23: val_accuracy improved from 0.45312 to 0.57292, saving model to alex_8.h5\n",
"25/25 [==============================] - 46s 2s/step - loss: 0.0066 - accuracy: 1.0000 - val_loss: 1.3994 - val_accuracy: 0.5729\n",
"Epoch 24/25\n",
"25/25 [==============================] - ETA: 0s - loss: 0.0052 - accuracy: 1.0000\n",
"Epoch 24: val_accuracy improved from 0.57292 to 0.63542, saving model to alex_8.h5\n",
"25/25 [==============================] - 49s 2s/step - loss: 0.0052 - accuracy: 1.0000 - val_loss: 1.2914 - val_accuracy: 0.6354\n",
"Epoch 25/25\n",
"25/25 [==============================] - ETA: 0s - loss: 0.0059 - accuracy: 1.0000\n",
"Epoch 25: val_accuracy improved from 0.63542 to 0.71354, saving model to alex_8.h5\n",
"25/25 [==============================] - 49s 2s/step - loss: 0.0059 - accuracy: 1.0000 - val_loss: 1.0022 - val_accuracy: 0.7135\n"
2022-12-10 11:12:06 +01:00
]
}
],
2022-12-10 00:55:36 +01:00
"source": [
2023-01-06 21:43:41 +01:00
"checkpoint = ModelCheckpoint(\"alex_8.h5\", monitor='val_accuracy', verbose=1, save_best_only=True, save_weights_only=False, mode='auto', period=1)\n",
"early = EarlyStopping(monitor='val_accuracy', min_delta=0, patience=20, verbose=1, mode='auto')\n",
"\n",
"alex8 = model_batch.fit_generator(\n",
" steps_per_epoch=len(train_ds), \n",
" generator=train_ds, \n",
" validation_data= validation_ds, \n",
" validation_steps=len(validation_ds), \n",
" epochs=25, \n",
" callbacks=[checkpoint,early])"
2022-12-10 00:55:36 +01:00
]
},
{
"cell_type": "code",
2023-01-07 00:39:16 +01:00
"execution_count": 52,
2022-12-10 00:55:36 +01:00
"metadata": {},
2022-12-10 11:12:06 +01:00
"outputs": [
{
2023-01-07 00:39:16 +01:00
"data": {
"image/png": "iVBORw0KGgoAAAANSUhEUgAAAi8AAAHHCAYAAAB3K7g2AAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjYuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8o6BhiAAAACXBIWXMAAA9hAAAPYQGoP6dpAACKOElEQVR4nOzdd3wU1frH8c/sJtn0npAEQuhF+qWJqHRpoiDSe7UAClgQK9yfih1UFBUR9EoXQcBCkyZFEA1FEOktIb2XTbI7vz82WbIkQAJJJps873v3tdmZ2ZnvLmv2yZkz5yiqqqoIIYQQQtgJndYBhBBCCCGKQ4oXIYQQQtgVKV6EEEIIYVekeBFCCCGEXZHiRQghhBB2RYoXIYQQQtgVKV6EEEIIYVekeBFCCCGEXZHiRQghhBB2RYoXIUShFEVh1qxZxX7e+fPnURSFJUuWlHgmIYQAKV6EKNeWLFmCoigoisJvv/1WYL2qqoSGhqIoCg8++KAGCYUQouxJ8SKEHXB2dmbZsmUFlu/cuZPLly9jMBg0SCWEENqQ4kUIO9CrVy9Wr15NTk6OzfJly5bRsmVLgoKCNEpWeaSlpWkdQQiRS4oXIezAkCFDiIuLY8uWLdZlWVlZfPfddwwdOrTQ56SlpfHMM88QGhqKwWCgfv36vPfee1w/kbzRaGTatGkEBATg4eHBQw89xOXLlwvd55UrVxg7dixVqlTBYDDQqFEjvvrqq9t6TfHx8Tz77LM0adIEd3d3PD096dmzJ4cPHy6wbWZmJrNmzaJevXo4OzsTHBzMI488wpkzZ6zbmM1mPvzwQ5o0aYKzszMBAQH06NGDP/74A7h5X5zr+/fMmjULRVE4fvw4Q4cOxcfHh3vvvReAI0eOMHr0aGrVqoWzszNBQUGMHTuWuLi4Qt+vcePGERISgsFgoGbNmjzxxBNkZWVx9uxZFEVh7ty5BZ63d+9eFEVh+fLlxX1bhagUHLQOIIS4tRo1atCuXTuWL19Oz549Afj5559JSkpi8ODBfPTRRzbbq6rKQw89xPbt2xk3bhzNmzdn06ZNPPfcc1y5csXmC3P8+PF8++23DB06lHvuuYdff/2V3r17F8gQFRXF3XffjaIoTJ48mYCAAH7++WfGjRtHcnIyU6dOLdZrOnv2LOvWrWPAgAHUrFmTqKgoPv/8czp06MDx48cJCQkBwGQy8eCDD7Jt2zYGDx7M008/TUpKClu2bOHYsWPUrl0bgHHjxrFkyRJ69uzJ+PHjycnJYffu3ezfv59WrVoVK1ueAQMGULduXd58801r0bdlyxbOnj3LmDFjCAoK4u+//+aLL77g77//Zv/+/SiKAkBERARt2rQhMTGRiRMn0qBBA65cucJ3331Heno6tWrVon379ixdupRp06bZHHfp0qV4eHjw8MMP31ZuISo8VQhRbi1evFgF1IMHD6rz589XPTw81PT0dFVVVXXAgAFqp06dVFVV1bCwMLV3797W561bt04F1Ndff91mf48++qiqKIp6+vRpVVVVNTw8XAXUJ5980ma7oUOHqoD62muvWZeNGzdODQ4OVmNjY222HTx4sOrl5WXNde7cORVQFy9efNPXlpmZqZpMJptl586dUw0Gg/rf//7Xuuyrr75SAfWDDz4osA+z2ayqqqr++uuvKqA+9dRTN9zmZrmuf62vvfaaCqhDhgwpsG3e68xv+fLlKqDu2rXLumzkyJGqTqdTDx48eMNMn3/+uQqoJ06csK7LyspS/f391VGjRhV4nhDCQk4bCWEnBg4cSEZGBhs3biQlJYWNGzfe8JTRTz/9hF6v56mnnrJZ/swzz6CqKj///LN1O6DAdte3oqiqypo1a+jTpw+qqhIbG2u9de/enaSkJP78889ivR6DwYBOZ/kVZDKZiIuLw93dnfr169vsa82aNfj7+zNlypQC+8hr5VizZg2KovDaa6/dcJvb8fjjjxdY5uLiYv05MzOT2NhY7r77bgBrbrPZzLp16+jTp0+hrT55mQYOHIizszNLly61rtu0aROxsbEMHz78tnMLUdFJ8SKEnQgICKBr164sW7aM77//HpPJxKOPPlrothcuXCAkJAQPDw+b5Q0bNrSuz7vX6XTWUy956tevb/M4JiaGxMREvvjiCwICAmxuY8aMASA6OrpYr8dsNjN37lzq1q2LwWDA39+fgIAAjhw5QlJSknW7M2fOUL9+fRwcbnyW+8yZM4SEhODr61usDLdSs2bNAsvi4+N5+umnqVKlCi4uLgQEBFi3y8sdExNDcnIyjRs3vun+vb296dOnj82VZEuXLqVq1ap07ty5BF+JEBWL9HkRwo4MHTqUCRMmcPXqVXr27Im3t3eZHNdsNgMwfPhwRo0aVeg2TZs2LdY+33zzTV555RXGjh3L//3f/+Hr64tOp2Pq1KnW45WkG7XAmEymGz4nfytLnoEDB7J3716ee+45mjdvjru7O2azmR49etxW7pEjR7J69Wr27t1LkyZNWL9+PU8++aS1VUoIUZAUL0LYkX79+vHYY4+xf/9+Vq5cecPtwsLC2Lp1KykpKTatL//88491fd692Wy2tm7kOXnypM3+8q5EMplMdO3atURey3fffUenTp1YtGiRzfLExET8/f2tj2vXrs3vv/9OdnY2jo6Ohe6rdu3abNq0ifj4+Bu2vvj4+Fj3n19eK1RRJCQksG3bNmbPns2rr75qXX7q1Cmb7QICAvD09OTYsWO33GePHj0ICAhg6dKltG3blvT0dEaMGFHkTEJURlLaC2FH3N3dWbBgAbNmzaJPnz433K5Xr16YTCbmz59vs3zu3LkoimK9Yinv/vqrlebNm2fzWK/X079/f9asWVPoF3JMTEyxX4tery9w2fbq1au5cuWKzbL+/fsTGxtb4LUA1uf3798fVVWZPXv2Dbfx9PTE39+fXbt22az/9NNPi5U5/z7zXP9+6XQ6+vbty4YNG6yXaheWCcDBwYEhQ4awatUqlixZQpMmTYrdiiVEZSMtL0LYmRudtsmvT58+dOrUiZdeeonz58/TrFkzNm/ezA8//MDUqVOtfVyaN2/OkCFD+PTTT0lKSuKee+5h27ZtnD59usA+33rrLbZv307btm2ZMGECd911F/Hx8fz5559s3bqV+Pj4Yr2OBx98kP/+97+MGTOGe+65h6NHj7J06VJq1apls93IkSP55ptvmD59OgcOHOC+++4jLS2NrVu38uSTT/Lwww/TqVMnRowYwUcffcSpU6esp3B2795Np06dmDx5MmC5LPytt95i/PjxtGrVil27dvHvv/8WObOnpyf3338/77zzDtnZ2VStWpXNmzdz7ty5Atu++eabbN68mQ4dOjBx4kQaNmxIZGQkq1ev5rfffrM55Tdy5Eg++ugjtm/fzttvv12s91GISkmz65yEELeU/1Lpm7n+UmlVVdWUlBR12rRpakhIiOro6KjWrVtXfffdd62X6ebJyMhQn3rqKdXPz091c3NT+/Tpo166dKnA5cOqqqpRUVHqpEmT1NDQUNXR0VENCgpSu3Tpon7xxRfWbYpzqfQzzzyjBgcHqy4uLmr79u3Vffv2qR06dFA7dOhgs216err60ksvqTVr1rQe99FHH1XPnDlj3SYnJ0d999131QYNGqhOTk5qQECA2rNnT/XQoUM2+xk3bpzq5eWlenh4qAMHDlSjo6NveKl0TExMgdyXL19W+/Xrp3p7e6teXl7qgAED1IiIiELfrwsXLqgjR45UAwICVIPBoNaqVUudNGmSajQaC+y3UaNGqk6nUy9fvnzT900IoaqKql7X/imEEKLMtWjRAl9fX7Zt26Z1FCHKPenzIoQQGvvjjz8IDw9n5MiRWkcRwi5Iy4sQQmjk2LFjHDp0iPfff5/Y2FjOnj2Ls7Oz1rGEKPek5UUIITTy3XffMWbMGLKzs1m+fLkULkIUkbS8CCGEEMKuSMuLEEIIIeyKFC9CCCGEsCt2PUid2WwmIiICDw+PO5o5VgghhBBlR1VVUlJSCAkJua15vOy6eImIiCA0NFTrGEIIIYS4DZcuXaJatWrFfp5dFy95E85dunQ
"text/plain": [
"<Figure size 640x480 with 1 Axes>"
]
},
"metadata": {},
"output_type": "display_data"
2023-01-06 21:43:41 +01:00
}
],
"source": [
"plt.plot(alex8.history[\"accuracy\"])\n",
"plt.plot(alex8.history['val_accuracy'])\n",
"plt.plot(alex8.history['loss'])\n",
"plt.plot(alex8.history['val_loss'])\n",
"plt.title(\"Model accuracy\")\n",
"plt.ylabel(\"Value\")\n",
"plt.xlabel(\"Epoch\")\n",
"plt.legend([\"Accuracy\",\"Validation Accuracy\",\"Loss\",\"Validation Loss\"])\n",
"plt.show()"
]
},
{
"cell_type": "code",
2023-01-07 00:39:16 +01:00
"execution_count": 53,
2023-01-06 21:43:41 +01:00
"metadata": {},
"outputs": [
2022-12-10 11:12:06 +01:00
{
2023-01-07 00:39:16 +01:00
"name": "stdout",
"output_type": "stream",
"text": [
"8/8 [==============================] - 4s 557ms/step - loss: 0.8515 - accuracy: 0.7383\n"
2023-01-06 21:43:41 +01:00
]
2023-01-07 00:39:16 +01:00
},
{
"data": {
"text/plain": [
"[0.8515095114707947, 0.73828125]"
]
},
"execution_count": 53,
"metadata": {},
"output_type": "execute_result"
2022-12-10 11:12:06 +01:00
}
],
2022-12-10 00:55:36 +01:00
"source": [
2023-01-07 00:39:16 +01:00
"model_batch.evaluate(test_ds)"
2022-12-10 00:55:36 +01:00
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Z dropoutem"
]
},
{
"cell_type": "code",
2023-01-07 00:39:16 +01:00
"execution_count": 54,
2022-12-10 00:55:36 +01:00
"metadata": {},
2023-01-07 00:39:16 +01:00
"outputs": [],
2022-12-10 00:55:36 +01:00
"source": [
"model_batch_drop = keras.models.Sequential([\n",
" keras.layers.Conv2D(filters=96, kernel_size=(11,11), strides=(4,4), activation='relu', input_shape=(227,227,3)),\n",
" keras.layers.BatchNormalization(),\n",
" keras.layers.MaxPool2D(pool_size=(3,3), strides=(2,2)),\n",
" keras.layers.Conv2D(filters=256, kernel_size=(5,5), strides=(1,1), activation='relu', padding=\"same\"),\n",
" keras.layers.BatchNormalization(),\n",
" keras.layers.MaxPool2D(pool_size=(3,3), strides=(2,2)),\n",
" keras.layers.Conv2D(filters=384, kernel_size=(3,3), strides=(1,1), activation='relu', padding=\"same\"),\n",
" keras.layers.BatchNormalization(),\n",
" keras.layers.Conv2D(filters=384, kernel_size=(3,3), strides=(1,1), activation='relu', padding=\"same\"),\n",
" keras.layers.BatchNormalization(),\n",
" keras.layers.Conv2D(filters=256, kernel_size=(3,3), strides=(1,1), activation='relu', padding=\"same\"),\n",
" keras.layers.BatchNormalization(),\n",
" keras.layers.MaxPool2D(pool_size=(3,3), strides=(2,2)),\n",
" keras.layers.Flatten(),\n",
" keras.layers.Dense(4096, activation='relu'),\n",
" keras.layers.Dropout(.5),\n",
" keras.layers.Dense(4096, activation='relu'),\n",
" keras.layers.Dropout(.5),\n",
" keras.layers.Dense(10, activation='softmax')\n",
"])"
]
},
{
"cell_type": "code",
2023-01-07 00:39:16 +01:00
"execution_count": 55,
2022-12-10 00:55:36 +01:00
"metadata": {},
2022-12-10 11:12:06 +01:00
"outputs": [
{
2023-01-07 00:39:16 +01:00
"name": "stderr",
"output_type": "stream",
"text": [
"WARNING:absl:`lr` is deprecated, please use `learning_rate` instead, or use the legacy optimizer, e.g.,tf.keras.optimizers.legacy.SGD.\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Model: \"sequential_8\"\n",
"_________________________________________________________________\n",
" Layer (type) Output Shape Param # \n",
"=================================================================\n",
" conv2d_40 (Conv2D) (None, 55, 55, 96) 34944 \n",
" \n",
" batch_normalization_5 (Batc (None, 55, 55, 96) 384 \n",
" hNormalization) \n",
" \n",
" max_pooling2d_24 (MaxPoolin (None, 27, 27, 96) 0 \n",
" g2D) \n",
" \n",
" conv2d_41 (Conv2D) (None, 27, 27, 256) 614656 \n",
" \n",
" batch_normalization_6 (Batc (None, 27, 27, 256) 1024 \n",
" hNormalization) \n",
" \n",
" max_pooling2d_25 (MaxPoolin (None, 13, 13, 256) 0 \n",
" g2D) \n",
" \n",
" conv2d_42 (Conv2D) (None, 13, 13, 384) 885120 \n",
" \n",
" batch_normalization_7 (Batc (None, 13, 13, 384) 1536 \n",
" hNormalization) \n",
" \n",
" conv2d_43 (Conv2D) (None, 13, 13, 384) 1327488 \n",
" \n",
" batch_normalization_8 (Batc (None, 13, 13, 384) 1536 \n",
" hNormalization) \n",
" \n",
" conv2d_44 (Conv2D) (None, 13, 13, 256) 884992 \n",
" \n",
" batch_normalization_9 (Batc (None, 13, 13, 256) 1024 \n",
" hNormalization) \n",
" \n",
" max_pooling2d_26 (MaxPoolin (None, 6, 6, 256) 0 \n",
" g2D) \n",
" \n",
" flatten_8 (Flatten) (None, 9216) 0 \n",
" \n",
" dense_24 (Dense) (None, 4096) 37752832 \n",
" \n",
" dropout_40 (Dropout) (None, 4096) 0 \n",
" \n",
" dense_25 (Dense) (None, 4096) 16781312 \n",
" \n",
" dropout_41 (Dropout) (None, 4096) 0 \n",
" \n",
" dense_26 (Dense) (None, 10) 40970 \n",
" \n",
"=================================================================\n",
"Total params: 58,327,818\n",
"Trainable params: 58,325,066\n",
"Non-trainable params: 2,752\n",
"_________________________________________________________________\n"
2022-12-10 11:12:06 +01:00
]
}
],
2022-12-10 00:55:36 +01:00
"source": [
"model_batch_drop.compile(loss='sparse_categorical_crossentropy', optimizer=tf.optimizers.SGD(lr=.001), metrics=['accuracy'])\n",
"model_batch_drop.summary()"
]
},
{
"cell_type": "code",
2023-01-07 00:39:16 +01:00
"execution_count": 56,
2022-12-10 00:55:36 +01:00
"metadata": {},
2022-12-10 11:12:06 +01:00
"outputs": [
{
2023-01-07 00:39:16 +01:00
"name": "stdout",
"output_type": "stream",
"text": [
"WARNING:tensorflow:`period` argument is deprecated. Please use `save_freq` to specify the frequency in number of batches seen.\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"WARNING:tensorflow:`period` argument is deprecated. Please use `save_freq` to specify the frequency in number of batches seen.\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Epoch 1/25\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/var/folders/6b/j4d60ym516x2s6wymzj707rh0000gn/T/ipykernel_13671/3373435413.py:4: UserWarning: `Model.fit_generator` is deprecated and will be removed in a future version. Please use `Model.fit`, which supports generators.\n",
" alex9 = model_batch_drop.fit_generator(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"25/25 [==============================] - ETA: 0s - loss: 5.1567 - accuracy: 0.3462\n",
"Epoch 1: val_accuracy improved from -inf to 0.39583, saving model to alex_9.h5\n",
"25/25 [==============================] - 53s 2s/step - loss: 5.1567 - accuracy: 0.3462 - val_loss: 1.8424 - val_accuracy: 0.3958\n",
"Epoch 2/25\n",
"25/25 [==============================] - ETA: 0s - loss: 1.5037 - accuracy: 0.5688\n",
"Epoch 2: val_accuracy did not improve from 0.39583\n",
"25/25 [==============================] - 48s 2s/step - loss: 1.5037 - accuracy: 0.5688 - val_loss: 2.2144 - val_accuracy: 0.2396\n",
"Epoch 3/25\n",
"25/25 [==============================] - ETA: 0s - loss: 0.9447 - accuracy: 0.6812\n",
"Epoch 3: val_accuracy did not improve from 0.39583\n",
"25/25 [==============================] - 45s 2s/step - loss: 0.9447 - accuracy: 0.6812 - val_loss: 3.3665 - val_accuracy: 0.1823\n",
"Epoch 4/25\n",
"25/25 [==============================] - ETA: 0s - loss: 0.7950 - accuracy: 0.7287\n",
"Epoch 4: val_accuracy did not improve from 0.39583\n",
"25/25 [==============================] - 45s 2s/step - loss: 0.7950 - accuracy: 0.7287 - val_loss: 4.1486 - val_accuracy: 0.3125\n",
"Epoch 5/25\n",
"25/25 [==============================] - ETA: 0s - loss: 0.7825 - accuracy: 0.7600\n",
"Epoch 5: val_accuracy did not improve from 0.39583\n",
"25/25 [==============================] - 44s 2s/step - loss: 0.7825 - accuracy: 0.7600 - val_loss: 5.0991 - val_accuracy: 0.2448\n",
"Epoch 6/25\n",
"25/25 [==============================] - ETA: 0s - loss: 0.4594 - accuracy: 0.8425\n",
"Epoch 6: val_accuracy did not improve from 0.39583\n",
"25/25 [==============================] - 45s 2s/step - loss: 0.4594 - accuracy: 0.8425 - val_loss: 5.7482 - val_accuracy: 0.1771\n",
"Epoch 7/25\n",
"25/25 [==============================] - ETA: 0s - loss: 0.4009 - accuracy: 0.8600\n",
"Epoch 7: val_accuracy did not improve from 0.39583\n",
"25/25 [==============================] - 48s 2s/step - loss: 0.4009 - accuracy: 0.8600 - val_loss: 7.0191 - val_accuracy: 0.2135\n",
"Epoch 8/25\n",
"25/25 [==============================] - ETA: 0s - loss: 0.2893 - accuracy: 0.9075\n",
"Epoch 8: val_accuracy did not improve from 0.39583\n",
"25/25 [==============================] - 49s 2s/step - loss: 0.2893 - accuracy: 0.9075 - val_loss: 7.8847 - val_accuracy: 0.1979\n",
"Epoch 9/25\n",
"25/25 [==============================] - ETA: 0s - loss: 0.2533 - accuracy: 0.8950\n",
"Epoch 9: val_accuracy did not improve from 0.39583\n",
"25/25 [==============================] - 47s 2s/step - loss: 0.2533 - accuracy: 0.8950 - val_loss: 8.0985 - val_accuracy: 0.2500\n",
"Epoch 10/25\n",
"25/25 [==============================] - ETA: 0s - loss: 0.2697 - accuracy: 0.9013\n",
"Epoch 10: val_accuracy did not improve from 0.39583\n",
"25/25 [==============================] - 50s 2s/step - loss: 0.2697 - accuracy: 0.9013 - val_loss: 8.7342 - val_accuracy: 0.2865\n",
"Epoch 11/25\n",
"25/25 [==============================] - ETA: 0s - loss: 0.2353 - accuracy: 0.9212\n",
"Epoch 11: val_accuracy did not improve from 0.39583\n",
"25/25 [==============================] - 48s 2s/step - loss: 0.2353 - accuracy: 0.9212 - val_loss: 8.8148 - val_accuracy: 0.3021\n",
"Epoch 12/25\n",
"25/25 [==============================] - ETA: 0s - loss: 0.1378 - accuracy: 0.9525\n",
"Epoch 12: val_accuracy did not improve from 0.39583\n",
"25/25 [==============================] - 47s 2s/step - loss: 0.1378 - accuracy: 0.9525 - val_loss: 7.8579 - val_accuracy: 0.3177\n",
"Epoch 13/25\n",
"25/25 [==============================] - ETA: 0s - loss: 0.1722 - accuracy: 0.9450\n",
"Epoch 13: val_accuracy did not improve from 0.39583\n",
"25/25 [==============================] - 47s 2s/step - loss: 0.1722 - accuracy: 0.9450 - val_loss: 7.5631 - val_accuracy: 0.3125\n",
"Epoch 14/25\n",
"25/25 [==============================] - ETA: 0s - loss: 0.1326 - accuracy: 0.9500\n",
"Epoch 14: val_accuracy did not improve from 0.39583\n",
"25/25 [==============================] - 48s 2s/step - loss: 0.1326 - accuracy: 0.9500 - val_loss: 7.8681 - val_accuracy: 0.2760\n",
"Epoch 15/25\n",
"25/25 [==============================] - ETA: 0s - loss: 0.1235 - accuracy: 0.9538\n",
"Epoch 15: val_accuracy did not improve from 0.39583\n",
"25/25 [==============================] - 46s 2s/step - loss: 0.1235 - accuracy: 0.9538 - val_loss: 8.4553 - val_accuracy: 0.3021\n",
"Epoch 16/25\n",
"25/25 [==============================] - ETA: 0s - loss: 0.0752 - accuracy: 0.9737\n",
"Epoch 16: val_accuracy did not improve from 0.39583\n",
"25/25 [==============================] - 44s 2s/step - loss: 0.0752 - accuracy: 0.9737 - val_loss: 6.6568 - val_accuracy: 0.3229\n",
"Epoch 17/25\n",
"25/25 [==============================] - ETA: 0s - loss: 0.0540 - accuracy: 0.9862\n",
"Epoch 17: val_accuracy did not improve from 0.39583\n",
"25/25 [==============================] - 46s 2s/step - loss: 0.0540 - accuracy: 0.9862 - val_loss: 6.9686 - val_accuracy: 0.3229\n",
"Epoch 18/25\n",
"25/25 [==============================] - ETA: 0s - loss: 0.0681 - accuracy: 0.9750\n",
"Epoch 18: val_accuracy did not improve from 0.39583\n",
"25/25 [==============================] - 45s 2s/step - loss: 0.0681 - accuracy: 0.9750 - val_loss: 5.2376 - val_accuracy: 0.3281\n",
"Epoch 19/25\n",
"25/25 [==============================] - ETA: 0s - loss: 0.0530 - accuracy: 0.9800\n",
"Epoch 19: val_accuracy improved from 0.39583 to 0.42708, saving model to alex_9.h5\n",
"25/25 [==============================] - 53s 2s/step - loss: 0.0530 - accuracy: 0.9800 - val_loss: 3.4478 - val_accuracy: 0.4271\n",
"Epoch 20/25\n",
"25/25 [==============================] - ETA: 0s - loss: 0.0605 - accuracy: 0.9850\n",
"Epoch 20: val_accuracy improved from 0.42708 to 0.44792, saving model to alex_9.h5\n",
"25/25 [==============================] - 50s 2s/step - loss: 0.0605 - accuracy: 0.9850 - val_loss: 2.8303 - val_accuracy: 0.4479\n",
"Epoch 21/25\n",
"25/25 [==============================] - ETA: 0s - loss: 0.0447 - accuracy: 0.9862\n",
"Epoch 21: val_accuracy improved from 0.44792 to 0.47396, saving model to alex_9.h5\n",
"25/25 [==============================] - 51s 2s/step - loss: 0.0447 - accuracy: 0.9862 - val_loss: 3.0949 - val_accuracy: 0.4740\n",
"Epoch 22/25\n",
"25/25 [==============================] - ETA: 0s - loss: 0.0601 - accuracy: 0.9825\n",
"Epoch 22: val_accuracy improved from 0.47396 to 0.70312, saving model to alex_9.h5\n",
"25/25 [==============================] - 78s 3s/step - loss: 0.0601 - accuracy: 0.9825 - val_loss: 1.2678 - val_accuracy: 0.7031\n",
"Epoch 23/25\n",
"25/25 [==============================] - ETA: 0s - loss: 0.0483 - accuracy: 0.9850\n",
"Epoch 23: val_accuracy improved from 0.70312 to 0.76562, saving model to alex_9.h5\n",
"25/25 [==============================] - 55s 2s/step - loss: 0.0483 - accuracy: 0.9850 - val_loss: 1.0314 - val_accuracy: 0.7656\n",
"Epoch 24/25\n",
"25/25 [==============================] - ETA: 0s - loss: 0.0412 - accuracy: 0.9862\n",
"Epoch 24: val_accuracy did not improve from 0.76562\n",
"25/25 [==============================] - 60s 2s/step - loss: 0.0412 - accuracy: 0.9862 - val_loss: 1.1687 - val_accuracy: 0.7083\n",
"Epoch 25/25\n",
"25/25 [==============================] - ETA: 0s - loss: 0.0650 - accuracy: 0.9725\n",
"Epoch 25: val_accuracy did not improve from 0.76562\n",
"25/25 [==============================] - 48s 2s/step - loss: 0.0650 - accuracy: 0.9725 - val_loss: 1.4878 - val_accuracy: 0.6719\n"
2022-12-10 11:12:06 +01:00
]
}
],
2022-12-10 00:55:36 +01:00
"source": [
2023-01-06 21:43:41 +01:00
"checkpoint = ModelCheckpoint(\"alex_9.h5\", monitor='val_accuracy', verbose=1, save_best_only=True, save_weights_only=False, mode='auto', period=1)\n",
"early = EarlyStopping(monitor='val_accuracy', min_delta=0, patience=20, verbose=1, mode='auto')\n",
"\n",
"alex9 = model_batch_drop.fit_generator(\n",
" steps_per_epoch=len(train_ds), \n",
" generator=train_ds, \n",
" validation_data= validation_ds, \n",
" validation_steps=len(validation_ds), \n",
" epochs=25, \n",
" callbacks=[checkpoint,early])"
2022-12-10 00:55:36 +01:00
]
},
{
"cell_type": "code",
2023-01-07 00:39:16 +01:00
"execution_count": 57,
2022-12-10 00:55:36 +01:00
"metadata": {},
2022-12-10 11:12:06 +01:00
"outputs": [
{
2023-01-07 00:39:16 +01:00
"data": {
"image/png": "iVBORw0KGgoAAAANSUhEUgAAAi8AAAHHCAYAAAB3K7g2AAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjYuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8o6BhiAAAACXBIWXMAAA9hAAAPYQGoP6dpAACPTElEQVR4nOzdd3gUVdvH8e9sTbLpDRIIvQuCUgUVFBRREAQRUQQEsVEErLwWQB9BxS5WHgV5FAREVGyICEiXIghKr4EEkpDets77xyZLQgIkIclkk/tzXXvtZnZ25t41sr+cc+YcRVVVFSGEEEIIL6HTugAhhBBCiNKQ8CKEEEIIryLhRQghhBBeRcKLEEIIIbyKhBchhBBCeBUJL0IIIYTwKhJehBBCCOFVJLwIIYQQwqtIeBFCCCGEV5HwIoQolqIoTJs2rdSvO3bsGIqiMG/evHKvSQghQMKLEFXavHnzUBQFRVFYv359kedVVSUmJgZFUejbt68GFQohROWT8CKEF/Dx8WHBggVFtq9du5aTJ09iNps1qEoIIbQh4UUIL3DrrbeyZMkSHA5Hoe0LFiygffv21K5dW6PKao6srCytSxBC5JHwIoQXGDp0KGfPnmXlypWebTabja+//pp77rmn2NdkZWXx+OOPExMTg9lspnnz5rz++uucv5C81Wpl0qRJREREEBAQwO23387JkyeLPeapU6cYNWoUtWrVwmw2c8UVV/DZZ5+V6T0lJyfzxBNP0KZNG/z9/QkMDKRPnz7s2rWryL65ublMmzaNZs2a4ePjQ1RUFAMHDuTw4cOefVwuF++88w5t2rTBx8eHiIgIbrnlFrZt2wZcfCzO+eN7pk2bhqIo/Pvvv9xzzz2EhIRw7bXXAvD3338zcuRIGjVqhI+PD7Vr12bUqFGcPXu22M9r9OjRREdHYzabadiwIY888gg2m40jR46gKApvvfVWkddt3LgRRVFYuHBhaT9WIWoEg9YFCCEurUGDBlxzzTUsXLiQPn36APDzzz+TlpbG3Xffzbvvvltof1VVuf3221m9ejWjR4+mXbt2rFixgieffJJTp04V+sJ84IEH+OKLL7jnnnvo2rUrv//+O7fddluRGs6cOUOXLl1QFIVx48YRERHBzz//zOjRo0lPT2fixImlek9Hjhzh22+/ZfDgwTRs2JAzZ87w8ccf0717d/7991+io6MBcDqd9O3bl1WrVnH33Xfz2GOPkZGRwcqVK9mzZw+NGzcGYPTo0cybN48+ffrwwAMP4HA4WLduHZs3b6ZDhw6lqi3f4MGDadq0KTNmzPCEvpUrV3LkyBHuv/9+ateuzT///MMnn3zCP//8w+bNm1EUBYC4uDg6depEamoqDz74IC1atODUqVN8/fXXZGdn06hRI7p168aXX37JpEmTCp33yy+/JCAggP79+5epbiGqPVUIUWXNnTtXBdStW7eqs2fPVgMCAtTs7GxVVVV18ODB6g033KCqqqrWr19fve222zyv+/bbb1VA/c9//lPoeHfeeaeqKIp66NAhVVVVdefOnSqgPvroo4X2u+eee1RAnTp1qmfb6NGj1aioKDUpKanQvnfffbcaFBTkqevo0aMqoM6dO/ei7y03N1d1Op2Fth09elQ1m83qiy++6Nn22WefqYD65ptvFjmGy+VSVVVVf//9dxVQJ0yYcMF9LlbX+e916tSpKqAOHTq0yL7577OghQsXqoD6xx9/eLYNHz5c1el06tatWy9Y08cff6wC6t69ez3P2Ww2NTw8XB0xYkSR1wkh3KTbSAgvcdddd5GTk8MPP/xARkYGP/zwwwW7jH766Sf0ej0TJkwotP3xxx9HVVV+/vlnz35Akf3Ob0VRVZWlS5fSr18/VFUlKSnJc+vduzdpaWns2LGjVO/HbDaj07n/CXI6nZw9exZ/f3+aN29e6FhLly4lPDyc8ePHFzlGfivH0qVLURSFqVOnXnCfsnj44YeLbPP19fU8zs3NJSkpiS5dugB46na5XHz77bf069ev2Faf/JruuusufHx8+PLLLz3PrVixgqSkJIYNG1bmuoWo7iS8COElIiIi6NWrFwsWLOCbb77B6XRy5513Frvv8ePHiY6OJiAgoND2li1bep7Pv9fpdJ6ul3zNmzcv9HNiYiKpqal88sknREREFLrdf//9ACQkJJTq/bhcLt566y2aNm2K2WwmPDyciIgI/v77b9LS0jz7HT58mObNm2MwXLiX+/Dhw0RHRxMaGlqqGi6lYcOGRbYlJyfz2GOPUatWLXx9fYmIiPDsl193YmIi6enptG7d+qLHDw4Opl+/foWuJPvyyy+pU6cON954Yzm+EyGqFxnzIoQXueeeexgzZgynT5+mT58+BAcHV8p5XS4XAMOGDWPEiBHF7nPllVeW6pgzZszg+eefZ9SoUbz00kuEhoai0+mYOHGi53zl6UItME6n84KvKdjKku+uu+5i48aNPPnkk7Rr1w5/f39cLhe33HJLmeoePnw4S5YsYePGjbRp04bvv/+eRx991NMqJYQoSsKLEF7kjjvu4KGHHmLz5s0sWrTogvvVr1+f3377jYyMjEKtL/v27fM8n3/vcrk8rRv59u/fX+h4+VciOZ1OevXqVS7v5euvv+aGG27g008/LbQ9NTWV8PBwz8+NGzdmy5Yt2O12jEZjscdq3LgxK1asIDk5+YKtLyEhIZ7jF5TfClUSKSkprFq1iunTp/PCCy94th88eLDQfhEREQQGBrJnz55LHvOWW24hIiKCL7/8ks6dO5Odnc19991X4pqEqIkk2gvhRfz9/fnwww+ZNm0a/fr1u+B+t956K06nk9mzZxfa/tZbb6EoiueKpfz7869Wevvttwv9rNfrGTRoEEuXLi32CzkxMbHU70Wv1xe5bHvJkiWcOnWq0LZBgwaRlJRU5L0AntcPGjQIVVWZPn36BfcJDAwkPDycP/74o9DzH3zwQalqLnjMfOd/XjqdjgEDBrB8+XLPpdrF1QRgMBgYOnQoixcvZt68ebRp06bUrVhC1DTS8iKEl7lQt01B/fr144YbbuDZZ5/l2LFjtG3bll9//ZXvvvuOiRMnesa4tGvXjqFDh/LBBx+QlpZG165dWbVqFYcOHSpyzFdeeYXVq1fTuXNnxowZQ6tWrUhOTmbHjh389ttvJCcnl+p99O3blxdffJH777+frl27snv3br788ksaNWpUaL/hw4czf/58Jk+ezJ9//sl1111HVlYWv/32G48++ij9+/fnhhtu4L777uPdd9/l4MGDni6cdevWccMNNzBu3DjAfVn4K6+8wgMPPECHDh34448/OHDgQIlrDgwM5Prrr+e1117DbrdTp04dfv31V44ePVpk3xkzZvDrr7/SvXt3HnzwQVq2bEl8fDxLlixh/fr1hbr8hg8fzrvvvsvq1at59dVXS/U5ClEjaXadkxDikgpeKn0x518qraqqmpGRoU6aNEmNjo5WjUaj2rRpU3XWrFmey3Tz5eTkqBMmTFDDwsJUi8Wi9uvXT42NjS1y+bCqquqZM2fUsWPHqjExMarRaFRr166t9uzZU/3kk088+5TmUunHH39cjYqKUn19fdVu3bqpmzZtUrt3765279690L7Z2dnqs88+qzZs2NBz3jvvvFM9fPiwZx+Hw6HOmjVLbdGihWoymdSIiAi1T58+6vbt2wsdZ/To0WpQUJAaEBCg3nXXXWpCQsIFL5VOTEwsUvfJkyfVO+64Qw0ODlaDgoLUwYMHq3FxccV+XsePH1eHDx+uRkREqGazWW3UqJE6duxY1Wq1FjnuFVdcoep0OvXkyZMX/dyEEKqqqOp57Z9CCCEq3VVXXUVoaCirVq3SuhQhqjwZ8yKEEBrbtm0bO3fuZPjw4VqXIoRXkJYXIYTQyJ49e9i+fTtvvPEGSUlJHDlyBB8fH63LEqLKk5YXIYTQyNdff83999+P3W5n4cKFElyEKCFpeRFCCCGEV5GWFyGEEEJ4FQkvQgghhPAqXj1JncvlIi4ujoCAgMtaOVYIIYQQlUdVVTIyMoiOji7TOl5eHV7i4uKIiYnRugwhhBBClEF
"text/plain": [
"<Figure size 640x480 with 1 Axes>"
]
},
"metadata": {},
"output_type": "display_data"
2023-01-06 21:43:41 +01:00
}
],
"source": [
"plt.plot(alex9.history[\"accuracy\"])\n",
"plt.plot(alex9.history['val_accuracy'])\n",
"plt.plot(alex9.history['loss'])\n",
"plt.plot(alex9.history['val_loss'])\n",
"plt.title(\"Model accuracy\")\n",
"plt.ylabel(\"Value\")\n",
"plt.xlabel(\"Epoch\")\n",
"plt.legend([\"Accuracy\",\"Validation Accuracy\",\"Loss\",\"Validation Loss\"])\n",
"plt.show()"
]
},
{
"cell_type": "code",
2023-01-07 00:39:16 +01:00
"execution_count": 58,
2023-01-06 21:43:41 +01:00
"metadata": {},
"outputs": [
2022-12-10 11:12:06 +01:00
{
2023-01-07 00:39:16 +01:00
"name": "stdout",
"output_type": "stream",
"text": [
"8/8 [==============================] - 4s 493ms/step - loss: 1.3864 - accuracy: 0.6953\n"
2023-01-06 21:43:41 +01:00
]
2023-01-07 00:39:16 +01:00
},
{
"data": {
"text/plain": [
"[1.386448621749878, 0.6953125]"
]
},
"execution_count": 58,
"metadata": {},
"output_type": "execute_result"
2022-12-10 11:12:06 +01:00
}
],
2022-12-10 00:55:36 +01:00
"source": [
2023-01-07 00:39:16 +01:00
"model_batch_drop.evaluate(test_ds)"
2022-12-10 00:55:36 +01:00
]
}
],
"metadata": {
"kernelspec": {
2022-12-12 14:21:10 +01:00
"display_name": "Python 3",
2022-12-10 00:55:36 +01:00
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
2023-01-06 21:43:41 +01:00
"version": "3.9.6"
2022-12-10 00:55:36 +01:00
},
"orig_nbformat": 4,
"vscode": {
"interpreter": {
2023-01-06 21:43:41 +01:00
"hash": "31f2aee4e71d21fbe5cf8b01ff0e069b9275f58929596ceb00d14d90e3e16cd6"
2022-12-10 00:55:36 +01:00
}
}
},
"nbformat": 4,
"nbformat_minor": 2
}