Symulowanie-wizualne/sw_lab9-10_4.ipynb

1088 lines
208 KiB
Plaintext
Raw Normal View History

2023-01-11 16:25:40 +01:00
{
"cells": [
{
"cell_type": "markdown",
"id": "dd9a88f0",
"metadata": {},
"source": [
"#### Aleksandra Jonas, Aleksandra Gronowska, Iwona Christop"
]
},
{
"cell_type": "markdown",
"id": "acda0087",
"metadata": {},
"source": [
"### Generowanie dodatkowych zdjęć w oparciu o filtry krawędziowe"
]
},
{
"cell_type": "code",
2023-01-11 20:37:08 +01:00
"execution_count": 11,
2023-01-11 16:25:40 +01:00
"id": "f790226b",
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"import cv2 as cv\n",
"import matplotlib.pyplot as plt\n",
"import numpy as np\n",
"import json\n",
"from tensorflow import keras\n",
"%matplotlib inline"
]
},
{
"cell_type": "code",
2023-01-11 20:37:08 +01:00
"execution_count": 12,
2023-01-11 16:25:40 +01:00
"id": "44319623",
"metadata": {},
"outputs": [],
"source": [
"def alex(filter_name, train_ds, test_ds, validation_ds):\n",
" from keras.callbacks import ModelCheckpoint, EarlyStopping\n",
" import matplotlib.pyplot as plt\n",
" import tensorflow as tf\n",
"\n",
" alexnet = keras.models.Sequential([\n",
" keras.layers.Conv2D(filters=96, kernel_size=(11,11), strides=(4,4), activation='relu', input_shape=(227,227,3)),\n",
" keras.layers.BatchNormalization(),\n",
" keras.layers.MaxPool2D(pool_size=(3,3), strides=(2,2)),\n",
" keras.layers.Conv2D(filters=256, kernel_size=(5,5), strides=(1,1), activation='relu', padding=\"same\"),\n",
" keras.layers.BatchNormalization(),\n",
" keras.layers.MaxPool2D(pool_size=(3,3), strides=(2,2)),\n",
" keras.layers.Conv2D(filters=384, kernel_size=(3,3), strides=(1,1), activation='relu', padding=\"same\"),\n",
" keras.layers.BatchNormalization(),\n",
" keras.layers.Conv2D(filters=384, kernel_size=(3,3), strides=(1,1), activation='relu', padding=\"same\"),\n",
" keras.layers.BatchNormalization(),\n",
" keras.layers.Conv2D(filters=256, kernel_size=(3,3), strides=(1,1), activation='relu', padding=\"same\"),\n",
" keras.layers.BatchNormalization(),\n",
" keras.layers.MaxPool2D(pool_size=(3,3), strides=(2,2)),\n",
" keras.layers.Flatten(),\n",
" keras.layers.Dense(4096, activation='relu'),\n",
" keras.layers.Dropout(.5),\n",
" keras.layers.Dense(4096, activation='relu'),\n",
" keras.layers.Dropout(.5),\n",
" keras.layers.Dense(10, activation='softmax')\n",
" ])\n",
"\n",
" alexnet.compile(loss='sparse_categorical_crossentropy', optimizer=tf.optimizers.SGD(lr=.001), metrics=['accuracy'])\n",
" alexnet.summary()\n",
"\n",
" checkpoint = ModelCheckpoint(\"alex_2.h5\", monitor='val_accuracy', verbose=1, save_best_only=True, save_weights_only=False, mode='auto', period=1)\n",
" early = EarlyStopping(monitor='val_accuracy', min_delta=0, patience=20, verbose=1, mode='auto')\n",
" \n",
" alex = alexnet.fit_generator(\n",
" steps_per_epoch=len(train_ds), \n",
" generator=train_ds, \n",
" validation_data= validation_ds, \n",
" validation_steps=len(validation_ds), \n",
" epochs=25, \n",
" callbacks=[checkpoint,early])\n",
"\n",
" plt.plot(alex.history[\"accuracy\"])\n",
" plt.plot(alex.history['val_accuracy'])\n",
" plt.plot(alex.history['loss'])\n",
" plt.plot(alex.history['val_loss'])\n",
" plt.title(f\"Model accuracy - {filter_name}\")\n",
" plt.ylabel(\"Value\")\n",
" plt.xlabel(\"Epoch\")\n",
" plt.legend([\"Accuracy\",\"Validation Accuracy\",\"Loss\",\"Validation Loss\"])\n",
" plt.show()\n",
"\n",
" alexnet.evaluate(test_ds)\n"
]
},
{
"cell_type": "code",
2023-01-11 20:37:08 +01:00
"execution_count": 13,
2023-01-11 16:25:40 +01:00
"id": "4e3ebfd0",
"metadata": {},
"outputs": [],
"source": [
"def fix_float_img(img):\n",
" img_normed = 255 * (img - img.min()) / (img.max() - img.min())\n",
" img_normed = np.array(img_normed, np.int)\n",
" return img_normed"
]
},
{
"cell_type": "code",
2023-01-11 20:37:08 +01:00
"execution_count": 14,
2023-01-11 16:25:40 +01:00
"id": "ffeda62d",
"metadata": {},
"outputs": [],
"source": [
"# directory = r\"train_test_sw/train_sw_kontrast\"\n",
"# subdirs = [r\"/Tomato\", r\"/Lemon\", r\"/Beech\", r\"/Mean\", r\"/Gardenia\"]\n",
"\n",
"# json_entries = []\n",
"\n",
"# for sub in subdirs:\n",
"# path = directory + sub\n",
" \n",
"# for filename in os.listdir(path):\n",
"# f = os.path.join(path, filename)\n",
" \n",
"# if os.path.isfile(f):\n",
"# img = cv.imread(f)\n",
"\n",
"\n",
"# lab= cv.cvtColor(img, cv.COLOR_BGR2LAB)\n",
"# l_channel, a, b = cv.split(lab)\n",
"# # Applying CLAHE to L-channel\n",
"# # feel free to try different values for the limit and grid size:\n",
"# clahe = cv.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))\n",
"# cl = clahe.apply(l_channel)\n",
"# # merge the CLAHE enhanced L-channel with the a and b channel\n",
"# limg = cv.merge((cl,a,b))\n",
"# # Converting image from LAB Color model to BGR color spcae\n",
"# enhanced_img = cv.cvtColor(limg, cv.COLOR_LAB2BGR)\n",
"# filename_edge = f[:-4] + '_kontrast.png'\n",
"# #final_edge = fix_float_img(adjusted)\n",
"# cv.imwrite(filename_edge, enhanced_img)\n",
" \n",
" \n"
]
},
{
"cell_type": "code",
2023-01-11 20:37:08 +01:00
"execution_count": 15,
2023-01-11 16:25:40 +01:00
"id": "72c68d57",
"metadata": {},
"outputs": [],
"source": [
"# directory = r\"train_test_sw/train_sw_saturacja\"\n",
"# subdirs = [r\"/Tomato\", r\"/Lemon\", r\"/Beech\", r\"/Mean\", r\"/Gardenia\"]\n",
"\n",
"# json_entries = []\n",
"\n",
"# for sub in subdirs:\n",
"# path = directory + sub\n",
" \n",
"# for filename in os.listdir(path):\n",
"# f = os.path.join(path, filename)\n",
" \n",
"# if os.path.isfile(f):\n",
"# img = cv.imread(f)\n",
"# hsv = cv.cvtColor(img, cv.COLOR_BGR2HSV)\n",
"# greenMask = cv.inRange(hsv, (26, 10, 30), (97, 100, 255))\n",
"# hsv[:,:,1] = greenMask\n",
"# back = cv.cvtColor(hsv, cv.COLOR_HSV2RGB)\n",
"# filename_edge = f[:-4] + '_saturacja.png'\n",
"# #final_edge = fix_float_img(adjusted)\n",
"# cv.imwrite(filename_edge, back)"
]
},
{
"cell_type": "code",
2023-01-11 20:37:08 +01:00
"execution_count": 16,
2023-01-11 16:25:40 +01:00
"id": "6a3f8c81",
"metadata": {},
"outputs": [],
"source": [
"# directory = r\"train_test_sw/train_sw_cartoon\"\n",
"# subdirs = [r\"/Tomato\", r\"/Lemon\", r\"/Beech\", r\"/Mean\", r\"/Gardenia\"]\n",
"\n",
"# json_entries = []\n",
"\n",
"# for sub in subdirs:\n",
"# path = directory + sub\n",
" \n",
"# for filename in os.listdir(path):\n",
"# f = os.path.join(path, filename)\n",
" \n",
"# if os.path.isfile(f):\n",
"# img = cv.imread(f)\n",
"\n",
"# edges1 = cv.bitwise_not(cv.Canny(img, 100, 200)) # for thin edges and inverting the mask obatined\n",
"# gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\n",
"# gray = cv.medianBlur(gray, 5) # applying median blur with kernel size of 5\n",
"# edges2 = cv.adaptiveThreshold(gray, 255, cv.ADAPTIVE_THRESH_MEAN_C, cv.THRESH_BINARY, 7, 7) # thick edges\n",
"# dst = cv.edgePreservingFilter(img, flags=2, sigma_s=20, sigma_r=0.1) # you can also use bilateral filter but that is slow\n",
"# # flag = 1 for RECURS_FILTER (Recursive Filtering) and 2 for NORMCONV_FILTER (Normalized Convolution). NORMCONV_FILTER produces sharpening of the edges but is slower.\n",
"# # sigma_s controls the size of the neighborhood. Range 1 - 200\n",
"# # sigma_r controls the how dissimilar colors within the neighborhood will be averaged. A larger sigma_r results in large regions of constant color. Range 0 - 1\n",
"# cartoon = cv.bitwise_and(dst, dst, mask=edges1) # adding thin edges to smoothened imag\n",
"\n",
"\n",
"# filename_edge = f[:-4] + '_cartoon.png'\n",
"# #final_edge = fix_float_img(adjusted)\n",
"# cv.imwrite(filename_edge, cartoon)\n"
]
},
{
"attachments": {},
"cell_type": "markdown",
"id": "0c0cd453",
"metadata": {},
"source": [
"## Data"
]
},
{
"cell_type": "code",
2023-01-11 20:37:08 +01:00
"execution_count": 17,
2023-01-11 16:25:40 +01:00
"id": "c4f0f653",
"metadata": {},
"outputs": [],
"source": [
"import sys\n",
"import subprocess\n",
"import pkg_resources\n",
"import numpy as np\n",
"peachy = []\n",
"\n",
"required = { 'scikit-image'}\n",
"installed = {pkg.key for pkg in pkg_resources.working_set}\n",
"missing = required - installed\n",
"\n",
"if missing: \n",
" python = sys.executable\n",
" subprocess.check_call([python, '-m', 'pip', 'install', *missing], stdout=subprocess.DEVNULL)\n",
"\n",
"def load_train_data(input_dir, newSize=(227,227)):\n",
" import numpy as np\n",
" import pandas as pd\n",
" import os\n",
" from skimage.io import imread\n",
" import cv2 as cv\n",
" from pathlib import Path\n",
" import random\n",
" from shutil import copyfile, rmtree\n",
" import json\n",
"\n",
" import seaborn as sns\n",
" import matplotlib.pyplot as plt\n",
"\n",
" import matplotlib\n",
" \n",
" image_dir = Path(input_dir)\n",
" categories_name = []\n",
" for file in os.listdir(image_dir):\n",
" d = os.path.join(image_dir, file)\n",
" if os.path.isdir(d):\n",
" categories_name.append(file)\n",
"\n",
" folders = [directory for directory in image_dir.iterdir() if directory.is_dir()]\n",
"\n",
" train_img = []\n",
" categories_count=[]\n",
" labels=[]\n",
" for i, direc in enumerate(folders):\n",
" count = 0\n",
" \n",
" for obj in direc.iterdir():\n",
" if os.path.isfile(obj) and os.path.basename(os.path.normpath(obj)) != 'desktop.ini':\n",
" labels.append(os.path.basename(os.path.normpath(direc)))\n",
" count += 1\n",
" img = imread(obj)#zwraca ndarry postaci xSize x ySize x colorDepth\n",
" if img.shape[-1] == 256:\n",
" img = np.repeat(img[..., np.newaxis], 3, axis=2)\n",
" elif img.shape[-1] == 4:\n",
" img = img[:, :, :3]\n",
" img = cv.resize(img, newSize, interpolation=cv.INTER_AREA)# zwraca ndarray\n",
" img = img / 255#normalizacja\n",
" train_img.append(img)\n",
" categories_count.append(count)\n",
" X={}\n",
" X[\"values\"] = np.array(train_img)\n",
" X[\"categories_name\"] = categories_name\n",
" X[\"categories_count\"] = categories_count\n",
" X[\"labels\"]=labels\n",
" return X\n",
"\n",
"def load_test_data(input_dir, newSize=(227,227)):\n",
" import numpy as np\n",
" import pandas as pd\n",
" import os\n",
" from skimage.io import imread\n",
" import cv2 as cv\n",
" from pathlib import Path\n",
" import random\n",
" from shutil import copyfile, rmtree\n",
" import json\n",
"\n",
" import seaborn as sns\n",
" import matplotlib.pyplot as plt\n",
"\n",
" import matplotlib\n",
"\n",
" image_path = Path(input_dir)\n",
"\n",
" labels_path = image_path.parents[0] / 'test_labels.json'\n",
"\n",
" jsonString = labels_path.read_text()\n",
" objects = json.loads(jsonString)\n",
"\n",
" categories_name = []\n",
" categories_count=[]\n",
" count = 0\n",
" c = objects[0]['value']\n",
" for e in objects:\n",
" if e['value'] != c:\n",
" categories_count.append(count)\n",
" c = e['value']\n",
" count = 1\n",
" else:\n",
" count += 1\n",
" if not e['value'] in categories_name:\n",
" categories_name.append(e['value'])\n",
"\n",
" categories_count.append(count)\n",
" \n",
" test_img = []\n",
"\n",
" labels=[]\n",
" for e in objects:\n",
" p = image_path / e['filename']\n",
" img = imread(p)#zwraca ndarry postaci xSize x ySize x colorDepth\n",
2023-01-11 20:37:08 +01:00
" if img.shape[-1] == 4:\n",
" img = img[:, :, :3]\n",
2023-01-11 16:25:40 +01:00
" img = cv.resize(img, newSize, interpolation=cv.INTER_AREA)# zwraca ndarray\n",
" img = img / 255#normalizacja\n",
" test_img.append(img)\n",
" labels.append(e['value'])\n",
"\n",
" X={}\n",
" X[\"values\"] = np.array(test_img)\n",
" X[\"categories_name\"] = categories_name\n",
" X[\"categories_count\"] = categories_count\n",
" X[\"labels\"]=labels\n",
" return X\n",
"\n"
]
},
{
"cell_type": "code",
2023-01-11 20:37:08 +01:00
"execution_count": 18,
2023-01-11 16:25:40 +01:00
"id": "b0dceacc",
"metadata": {},
"outputs": [],
"source": [
"def data_prep_alex(filter_name):\n",
" from sklearn.model_selection import train_test_split\n",
" from sklearn.preprocessing import LabelEncoder\n",
" import tensorflow as tf\n",
"\n",
" data_train = load_train_data(f\"./train_test_sw/train_sw_{filter_name}\")\n",
" values_train = data_train['values']\n",
" labels_train = data_train['labels']\n",
" data_test = load_test_data(\"./train_test_sw/test_sw\")\n",
" X_test = data_test['values']\n",
" y_test = data_test['labels']\n",
"\n",
" X_train, X_validate, y_train, y_validate = train_test_split(values_train, labels_train, test_size=0.2, random_state=42)\n",
"\n",
" class_le = LabelEncoder()\n",
" y_train_enc = class_le.fit_transform(y_train)\n",
" y_validate_enc = class_le.fit_transform(y_validate)\n",
" y_test_enc = class_le.fit_transform(y_test)\n",
"\n",
" train_ds = tf.data.Dataset.from_tensor_slices((X_train, y_train_enc))\n",
" validation_ds = tf.data.Dataset.from_tensor_slices((X_validate, y_validate_enc))\n",
" test_ds = tf.data.Dataset.from_tensor_slices((X_test, y_test_enc))\n",
"\n",
" train_ds_size = tf.data.experimental.cardinality(train_ds).numpy()\n",
" test_ds_size = tf.data.experimental.cardinality(test_ds).numpy()\n",
" validation_ds_size = tf.data.experimental.cardinality(validation_ds).numpy()\n",
"\n",
" train_ds = (train_ds\n",
" .shuffle(buffer_size=train_ds_size)\n",
" .batch(batch_size=32, drop_remainder=True))\n",
" test_ds = (test_ds\n",
" .shuffle(buffer_size=train_ds_size)\n",
" .batch(batch_size=32, drop_remainder=True))\n",
" validation_ds = (validation_ds\n",
" .shuffle(buffer_size=train_ds_size)\n",
" .batch(batch_size=32, drop_remainder=True))\n",
"\n",
" return train_ds, test_ds, validation_ds\n",
" "
]
},
{
"attachments": {},
"cell_type": "markdown",
"id": "47d3b363",
"metadata": {},
"source": [
"# ALEXNET"
]
},
{
"cell_type": "code",
2023-01-11 20:37:08 +01:00
"execution_count": 19,
2023-01-11 16:25:40 +01:00
"id": "108a46e4",
"metadata": {},
"outputs": [],
"source": [
2023-01-11 20:37:08 +01:00
"filters = ['kontrast', 'cartoon', 'saturacja']"
2023-01-11 16:25:40 +01:00
]
},
{
"cell_type": "code",
2023-01-11 20:37:08 +01:00
"execution_count": 20,
2023-01-11 16:25:40 +01:00
"id": "12a16bca",
"metadata": {},
"outputs": [
2023-01-11 20:37:08 +01:00
{
"name": "stdout",
"output_type": "stream",
"text": [
"kontrast ---------------------------------------\n"
]
},
2023-01-11 16:25:40 +01:00
{
"name": "stderr",
"output_type": "stream",
"text": [
"WARNING:absl:`lr` is deprecated, please use `learning_rate` instead, or use the legacy optimizer, e.g.,tf.keras.optimizers.legacy.SGD.\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
2023-01-11 20:37:08 +01:00
"Model: \"sequential_1\"\n",
2023-01-11 16:25:40 +01:00
"_________________________________________________________________\n",
" Layer (type) Output Shape Param # \n",
"=================================================================\n",
2023-01-11 20:37:08 +01:00
" conv2d_5 (Conv2D) (None, 55, 55, 96) 34944 \n",
2023-01-11 16:25:40 +01:00
" \n",
2023-01-11 20:37:08 +01:00
" batch_normalization_5 (Batc (None, 55, 55, 96) 384 \n",
" hNormalization) \n",
2023-01-11 16:25:40 +01:00
" \n",
2023-01-11 20:37:08 +01:00
" max_pooling2d_3 (MaxPooling (None, 27, 27, 96) 0 \n",
" 2D) \n",
2023-01-11 16:25:40 +01:00
" \n",
2023-01-11 20:37:08 +01:00
" conv2d_6 (Conv2D) (None, 27, 27, 256) 614656 \n",
2023-01-11 16:25:40 +01:00
" \n",
2023-01-11 20:37:08 +01:00
" batch_normalization_6 (Batc (None, 27, 27, 256) 1024 \n",
2023-01-11 16:25:40 +01:00
" hNormalization) \n",
" \n",
2023-01-11 20:37:08 +01:00
" max_pooling2d_4 (MaxPooling (None, 13, 13, 256) 0 \n",
2023-01-11 16:25:40 +01:00
" 2D) \n",
" \n",
2023-01-11 20:37:08 +01:00
" conv2d_7 (Conv2D) (None, 13, 13, 384) 885120 \n",
2023-01-11 16:25:40 +01:00
" \n",
2023-01-11 20:37:08 +01:00
" batch_normalization_7 (Batc (None, 13, 13, 384) 1536 \n",
2023-01-11 16:25:40 +01:00
" hNormalization) \n",
" \n",
2023-01-11 20:37:08 +01:00
" conv2d_8 (Conv2D) (None, 13, 13, 384) 1327488 \n",
2023-01-11 16:25:40 +01:00
" \n",
2023-01-11 20:37:08 +01:00
" batch_normalization_8 (Batc (None, 13, 13, 384) 1536 \n",
2023-01-11 16:25:40 +01:00
" hNormalization) \n",
" \n",
2023-01-11 20:37:08 +01:00
" conv2d_9 (Conv2D) (None, 13, 13, 256) 884992 \n",
2023-01-11 16:25:40 +01:00
" \n",
2023-01-11 20:37:08 +01:00
" batch_normalization_9 (Batc (None, 13, 13, 256) 1024 \n",
2023-01-11 16:25:40 +01:00
" hNormalization) \n",
" \n",
2023-01-11 20:37:08 +01:00
" max_pooling2d_5 (MaxPooling (None, 6, 6, 256) 0 \n",
2023-01-11 16:25:40 +01:00
" 2D) \n",
" \n",
2023-01-11 20:37:08 +01:00
" flatten_1 (Flatten) (None, 9216) 0 \n",
2023-01-11 16:25:40 +01:00
" \n",
2023-01-11 20:37:08 +01:00
" dense_3 (Dense) (None, 4096) 37752832 \n",
2023-01-11 16:25:40 +01:00
" \n",
2023-01-11 20:37:08 +01:00
" dropout_2 (Dropout) (None, 4096) 0 \n",
2023-01-11 16:25:40 +01:00
" \n",
2023-01-11 20:37:08 +01:00
" dense_4 (Dense) (None, 4096) 16781312 \n",
2023-01-11 16:25:40 +01:00
" \n",
2023-01-11 20:37:08 +01:00
" dropout_3 (Dropout) (None, 4096) 0 \n",
2023-01-11 16:25:40 +01:00
" \n",
2023-01-11 20:37:08 +01:00
" dense_5 (Dense) (None, 10) 40970 \n",
2023-01-11 16:25:40 +01:00
" \n",
"=================================================================\n",
"Total params: 58,327,818\n",
"Trainable params: 58,325,066\n",
"Non-trainable params: 2,752\n",
"_________________________________________________________________\n",
"WARNING:tensorflow:`period` argument is deprecated. Please use `save_freq` to specify the frequency in number of batches seen.\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
2023-01-11 20:37:08 +01:00
"WARNING:tensorflow:`period` argument is deprecated. Please use `save_freq` to specify the frequency in number of batches seen.\n"
2023-01-11 16:25:40 +01:00
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Epoch 1/25\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
2023-01-11 20:37:08 +01:00
"/var/folders/6b/j4d60ym516x2s6wymzj707rh0000gn/T/ipykernel_35974/3983922004.py:34: UserWarning: `Model.fit_generator` is deprecated and will be removed in a future version. Please use `Model.fit`, which supports generators.\n",
" alex = alexnet.fit_generator(\n"
2023-01-11 16:25:40 +01:00
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
2023-01-11 20:37:08 +01:00
"51/51 [==============================] - ETA: 0s - loss: 3.8345 - accuracy: 0.3658\n",
"Epoch 1: val_accuracy improved from -inf to 0.22656, saving model to alex_2.h5\n",
"51/51 [==============================] - 46s 891ms/step - loss: 3.8345 - accuracy: 0.3658 - val_loss: 2.1574 - val_accuracy: 0.2266\n",
2023-01-11 16:25:40 +01:00
"Epoch 2/25\n",
2023-01-11 20:37:08 +01:00
"51/51 [==============================] - ETA: 0s - loss: 1.3397 - accuracy: 0.5362\n",
"Epoch 2: val_accuracy improved from 0.22656 to 0.23177, saving model to alex_2.h5\n",
"51/51 [==============================] - 48s 945ms/step - loss: 1.3397 - accuracy: 0.5362 - val_loss: 2.7271 - val_accuracy: 0.2318\n",
2023-01-11 16:25:40 +01:00
"Epoch 3/25\n",
2023-01-11 20:37:08 +01:00
"51/51 [==============================] - ETA: 0s - loss: 0.9793 - accuracy: 0.6428\n",
"Epoch 3: val_accuracy improved from 0.23177 to 0.34635, saving model to alex_2.h5\n",
"51/51 [==============================] - 49s 954ms/step - loss: 0.9793 - accuracy: 0.6428 - val_loss: 3.4108 - val_accuracy: 0.3464\n",
2023-01-11 16:25:40 +01:00
"Epoch 4/25\n",
2023-01-11 20:37:08 +01:00
"51/51 [==============================] - ETA: 0s - loss: 0.6715 - accuracy: 0.7273\n",
"Epoch 4: val_accuracy did not improve from 0.34635\n",
"51/51 [==============================] - 51s 1s/step - loss: 0.6715 - accuracy: 0.7273 - val_loss: 4.2069 - val_accuracy: 0.3411\n",
2023-01-11 16:25:40 +01:00
"Epoch 5/25\n",
2023-01-11 20:37:08 +01:00
"51/51 [==============================] - ETA: 0s - loss: 0.5853 - accuracy: 0.7917\n",
"Epoch 5: val_accuracy did not improve from 0.34635\n",
"51/51 [==============================] - 49s 962ms/step - loss: 0.5853 - accuracy: 0.7917 - val_loss: 4.3773 - val_accuracy: 0.2839\n",
2023-01-11 16:25:40 +01:00
"Epoch 6/25\n",
2023-01-11 20:37:08 +01:00
"51/51 [==============================] - ETA: 0s - loss: 0.4176 - accuracy: 0.8413\n",
"Epoch 6: val_accuracy did not improve from 0.34635\n",
"51/51 [==============================] - 49s 961ms/step - loss: 0.4176 - accuracy: 0.8413 - val_loss: 5.1601 - val_accuracy: 0.3281\n",
2023-01-11 16:25:40 +01:00
"Epoch 7/25\n",
2023-01-11 20:37:08 +01:00
"51/51 [==============================] - ETA: 0s - loss: 0.3224 - accuracy: 0.8756\n",
2023-01-11 16:25:40 +01:00
"Epoch 7: val_accuracy did not improve from 0.34635\n",
2023-01-11 20:37:08 +01:00
"51/51 [==============================] - 49s 957ms/step - loss: 0.3224 - accuracy: 0.8756 - val_loss: 5.2943 - val_accuracy: 0.3307\n",
2023-01-11 16:25:40 +01:00
"Epoch 8/25\n",
2023-01-11 20:37:08 +01:00
"51/51 [==============================] - ETA: 0s - loss: 0.2591 - accuracy: 0.9026\n",
"Epoch 8: val_accuracy improved from 0.34635 to 0.41406, saving model to alex_2.h5\n",
"51/51 [==============================] - 50s 985ms/step - loss: 0.2591 - accuracy: 0.9026 - val_loss: 3.7030 - val_accuracy: 0.4141\n",
2023-01-11 16:25:40 +01:00
"Epoch 9/25\n",
2023-01-11 20:37:08 +01:00
"51/51 [==============================] - ETA: 0s - loss: 0.2748 - accuracy: 0.8964\n",
"Epoch 9: val_accuracy improved from 0.41406 to 0.50000, saving model to alex_2.h5\n",
"51/51 [==============================] - 53s 1s/step - loss: 0.2748 - accuracy: 0.8964 - val_loss: 2.1064 - val_accuracy: 0.5000\n",
2023-01-11 16:25:40 +01:00
"Epoch 10/25\n",
2023-01-11 20:37:08 +01:00
"51/51 [==============================] - ETA: 0s - loss: 0.2015 - accuracy: 0.9240\n",
"Epoch 10: val_accuracy improved from 0.50000 to 0.62500, saving model to alex_2.h5\n",
"51/51 [==============================] - 52s 1s/step - loss: 0.2015 - accuracy: 0.9240 - val_loss: 1.3254 - val_accuracy: 0.6250\n",
2023-01-11 16:25:40 +01:00
"Epoch 11/25\n",
2023-01-11 20:37:08 +01:00
"51/51 [==============================] - ETA: 0s - loss: 0.1754 - accuracy: 0.9350\n",
"Epoch 11: val_accuracy improved from 0.62500 to 0.74740, saving model to alex_2.h5\n",
"51/51 [==============================] - 54s 1s/step - loss: 0.1754 - accuracy: 0.9350 - val_loss: 0.7914 - val_accuracy: 0.7474\n",
2023-01-11 16:25:40 +01:00
"Epoch 12/25\n",
2023-01-11 20:37:08 +01:00
"51/51 [==============================] - ETA: 0s - loss: 0.1711 - accuracy: 0.9412 \n",
"Epoch 12: val_accuracy did not improve from 0.74740\n",
"51/51 [==============================] - 738s 15s/step - loss: 0.1711 - accuracy: 0.9412 - val_loss: 1.0148 - val_accuracy: 0.7031\n",
2023-01-11 16:25:40 +01:00
"Epoch 13/25\n",
2023-01-11 20:37:08 +01:00
"51/51 [==============================] - ETA: 0s - loss: 0.1424 - accuracy: 0.9498\n",
"Epoch 13: val_accuracy improved from 0.74740 to 0.82031, saving model to alex_2.h5\n",
"51/51 [==============================] - 49s 951ms/step - loss: 0.1424 - accuracy: 0.9498 - val_loss: 0.5437 - val_accuracy: 0.8203\n",
2023-01-11 16:25:40 +01:00
"Epoch 14/25\n",
2023-01-11 20:37:08 +01:00
"51/51 [==============================] - ETA: 0s - loss: 0.1434 - accuracy: 0.9418\n",
"Epoch 14: val_accuracy improved from 0.82031 to 0.83594, saving model to alex_2.h5\n",
"51/51 [==============================] - 53s 1s/step - loss: 0.1434 - accuracy: 0.9418 - val_loss: 0.4773 - val_accuracy: 0.8359\n",
2023-01-11 16:25:40 +01:00
"Epoch 15/25\n",
2023-01-11 20:37:08 +01:00
"51/51 [==============================] - ETA: 0s - loss: 0.0943 - accuracy: 0.9681\n",
"Epoch 15: val_accuracy did not improve from 0.83594\n",
"51/51 [==============================] - 50s 974ms/step - loss: 0.0943 - accuracy: 0.9681 - val_loss: 0.6302 - val_accuracy: 0.8125\n",
2023-01-11 16:25:40 +01:00
"Epoch 16/25\n",
2023-01-11 20:37:08 +01:00
"51/51 [==============================] - ETA: 0s - loss: 0.0859 - accuracy: 0.9669\n",
"Epoch 16: val_accuracy improved from 0.83594 to 0.93229, saving model to alex_2.h5\n",
"51/51 [==============================] - 48s 948ms/step - loss: 0.0859 - accuracy: 0.9669 - val_loss: 0.2049 - val_accuracy: 0.9323\n",
2023-01-11 16:25:40 +01:00
"Epoch 17/25\n",
2023-01-11 20:37:08 +01:00
"51/51 [==============================] - ETA: 0s - loss: 0.0849 - accuracy: 0.9688\n",
"Epoch 17: val_accuracy did not improve from 0.93229\n",
"51/51 [==============================] - 55s 1s/step - loss: 0.0849 - accuracy: 0.9688 - val_loss: 0.3428 - val_accuracy: 0.8932\n",
2023-01-11 16:25:40 +01:00
"Epoch 18/25\n",
2023-01-11 20:37:08 +01:00
"51/51 [==============================] - ETA: 0s - loss: 0.0876 - accuracy: 0.9712\n",
"Epoch 18: val_accuracy did not improve from 0.93229\n",
"51/51 [==============================] - 78s 2s/step - loss: 0.0876 - accuracy: 0.9712 - val_loss: 0.7060 - val_accuracy: 0.8151\n",
2023-01-11 16:25:40 +01:00
"Epoch 19/25\n",
2023-01-11 20:37:08 +01:00
"51/51 [==============================] - ETA: 0s - loss: 0.0708 - accuracy: 0.9737\n",
"Epoch 19: val_accuracy improved from 0.93229 to 0.94271, saving model to alex_2.h5\n",
"51/51 [==============================] - 53s 1s/step - loss: 0.0708 - accuracy: 0.9737 - val_loss: 0.1935 - val_accuracy: 0.9427\n",
2023-01-11 16:25:40 +01:00
"Epoch 20/25\n",
2023-01-11 20:37:08 +01:00
"51/51 [==============================] - ETA: 0s - loss: 0.0829 - accuracy: 0.9657\n",
"Epoch 20: val_accuracy did not improve from 0.94271\n",
"51/51 [==============================] - 67s 1s/step - loss: 0.0829 - accuracy: 0.9657 - val_loss: 0.1955 - val_accuracy: 0.9375\n",
2023-01-11 16:25:40 +01:00
"Epoch 21/25\n",
2023-01-11 20:37:08 +01:00
"51/51 [==============================] - ETA: 0s - loss: 0.0404 - accuracy: 0.9865\n",
"Epoch 21: val_accuracy improved from 0.94271 to 0.95312, saving model to alex_2.h5\n",
"51/51 [==============================] - 140s 3s/step - loss: 0.0404 - accuracy: 0.9865 - val_loss: 0.1493 - val_accuracy: 0.9531\n",
2023-01-11 16:25:40 +01:00
"Epoch 22/25\n",
2023-01-11 20:37:08 +01:00
"51/51 [==============================] - ETA: 0s - loss: 0.0370 - accuracy: 0.9877\n",
"Epoch 22: val_accuracy did not improve from 0.95312\n",
"51/51 [==============================] - 64s 1s/step - loss: 0.0370 - accuracy: 0.9877 - val_loss: 0.1635 - val_accuracy: 0.9505\n",
2023-01-11 16:25:40 +01:00
"Epoch 23/25\n",
2023-01-11 20:37:08 +01:00
"51/51 [==============================] - ETA: 0s - loss: 0.0353 - accuracy: 0.9865\n",
"Epoch 23: val_accuracy did not improve from 0.95312\n",
"51/51 [==============================] - 89s 2s/step - loss: 0.0353 - accuracy: 0.9865 - val_loss: 0.4217 - val_accuracy: 0.8932\n",
2023-01-11 16:25:40 +01:00
"Epoch 24/25\n",
2023-01-11 20:37:08 +01:00
"51/51 [==============================] - ETA: 0s - loss: 0.0308 - accuracy: 0.9920\n",
"Epoch 24: val_accuracy did not improve from 0.95312\n",
"51/51 [==============================] - 133s 3s/step - loss: 0.0308 - accuracy: 0.9920 - val_loss: 0.2005 - val_accuracy: 0.9349\n",
2023-01-11 16:25:40 +01:00
"Epoch 25/25\n",
2023-01-11 20:37:08 +01:00
"51/51 [==============================] - ETA: 0s - loss: 0.0203 - accuracy: 0.9957\n",
"Epoch 25: val_accuracy improved from 0.95312 to 0.95573, saving model to alex_2.h5\n",
"51/51 [==============================] - 54s 1s/step - loss: 0.0203 - accuracy: 0.9957 - val_loss: 0.1394 - val_accuracy: 0.9557\n"
2023-01-11 16:25:40 +01:00
]
},
{
"data": {
2023-01-11 20:37:08 +01:00
"image/png": "iVBORw0KGgoAAAANSUhEUgAAAi8AAAHHCAYAAAB3K7g2AAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjYuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8o6BhiAAAACXBIWXMAAA9hAAAPYQGoP6dpAACY+UlEQVR4nOzdd3xT9f7H8ddJ0ibdmw52WWWUITiQK1uRJUtQBNk4ca+LE9x7XZSfchHkKoKoIKKCgCxFtiDIhgIFCt0jHWnG+f2RJrS2QAtNT1s+z/vIo83JyTmfll777ncqqqqqCCGEEELUEDqtCxBCCCGEqAgJL0IIIYSoUSS8CCGEEKJGkfAihBBCiBpFwosQQgghahQJL0IIIYSoUSS8CCGEEKJGkfAihBBCiBpFwosQQgghahQJL0JUMUVRmDZtWoXfd+zYMRRFYe7cuZVek7g0rn+Tt99+W+tShLiiSHgRV6S5c+eiKAqKovDbb7+Vel1VVerXr4+iKAwYMECDCoU4v7179zJt2jSOHTumyf3z8vKYNm0aa9eu1eT+Qkh4EVc0k8nE/PnzSx1ft24dJ0+exGg0alCVEBe2d+9epk+frml4mT59uoQXoRkJL+KK1q9fPxYtWoTNZitxfP78+XTs2JGoqCiNKrty5Obmal1CraaqKvn5+VqXIUSlkvAirmgjR44kLS2NlStXuo8VFhbyzTffcMcdd5T5ntzcXB577DHq16+P0WikRYsWvP322/xzg3aLxcIjjzxCREQEAQEB3HLLLZw8ebLMa546dYoJEyYQGRmJ0WikdevWfPbZZ5f0NaWnp/P4448THx+Pv78/gYGB9O3bl127dpU6t6CggGnTptG8eXNMJhPR0dEMHTqUI0eOuM9xOBx88MEHxMfHYzKZiIiI4Oabb2bbtm3Ahcfi/HN8z7Rp01AUhb1793LHHXcQEhLCv/71LwD++usvxo0bR2xsLCaTiaioKCZMmEBaWlqZ36+JEycSExOD0WikcePG3HvvvRQWFnL06FEUReG9994r9b6NGzeiKApfffVVRb+t5aaqKnfddRfe3t589913ANhsNl566SWaNGmC0WikUaNGPP3001gslhLvbdSoEQMGDOC3337jmmuuwWQyERsby7x589znzJ07l+HDhwPQo0cPd/enqxXEdY0VK1bQqVMnfHx8+OSTTwCYM2cOPXv2pE6dOhiNRlq1asXMmTNLfQ3btm2jT58+hIeH4+PjQ+PGjZkwYQLg/PeOiIgAYPr06e77X8o4LiEulUHrAoTQUqNGjejcuTNfffUVffv2BeDnn38mKyuL22+/nQ8//LDE+aqqcsstt7BmzRomTpxI+/btWbFiBU888QSnTp0q8Qtz0qRJfPHFF9xxxx1cf/31/Prrr/Tv379UDWfPnuW6665DURSmTJlCREQEP//8MxMnTiQ7O5uHH364Ql/T0aNHWbJkCcOHD6dx48acPXuWTz75hG7durF3715iYmIAsNvtDBgwgNWrV3P77bfz0EMPkZOTw8qVK9mzZw9NmjQBYOLEicydO5e+ffsyadIkbDYbGzZsYNOmTXTq1KlCtbkMHz6cZs2a8eqrr7pD38qVKzl69Cjjx48nKiqKv//+m08//ZS///6bTZs2oSgKAKdPn+aaa64hMzOTu+66i7i4OE6dOsU333xDXl4esbGxdOnShS+//JJHHnmkxH2//PJLAgICGDRo0CXVfTF2u50JEyawcOFCFi9e7P73njRpEp9//jm33norjz32GJs3b+a1115j3759LF68uMQ1Dh8+zK233srEiRMZO3Ysn332GePGjaNjx460bt2arl278uCDD/Lhhx/y9NNP07JlSwD3R4ADBw4wcuRI7r77biZPnkyLFi0AmDlzJq1bt+aWW27BYDDwww8/cN999+FwOLj//vsBSE5O5qabbiIiIoJ///vfBAcHc+zYMXcQi4iIYObMmdx7770MGTKEoUOHAtC2bVuPfE+FKJMqxBVozpw5KqBu3bpVnTFjhhoQEKDm5eWpqqqqw4cPV3v06KGqqqo2bNhQ7d+/v/t9S5YsUQH15ZdfLnG9W2+9VVUURT18+LCqqqq6c+dOFVDvu+++EufdcccdKqC+8MIL7mMTJ05Uo6Oj1dTU1BLn3n777WpQUJC7roSEBBVQ58yZc8GvraCgQLXb7SWOJSQkqEajUX3xxRfdxz777DMVUN99991S13A4HKqqquqvv/6qAuqDDz543nMuVNc/v9YXXnhBBdSRI0eWOtf1dRb31VdfqYC6fv1697ExY8aoOp1O3bp163lr+uSTT1RA3bdvn/u1wsJCNTw8XB07dmyp910q19f+1ltvqVarVb3ttttUHx8fdcWKFe5zXD8LkyZNKvHexx9/XAXUX3/91X2sYcOGpb7e5ORk1Wg0qo899pj72KJFi1RAXbNmTamaXNdYvnx5qdfK+h736dNHjY2NdT9fvHix+/8b55OSklLq31aIqiTdRuKKN2LECPLz81m2bBk5OTksW7bsvF1GP/30E3q9ngcffLDE8cceewxVVfn555/d5wGlzvtnK4qqqnz77bcMHDgQVVVJTU11P/r06UNWVhY7duyo0NdjNBrR6Zz/17bb7aSlpeHv70+LFi1KXOvbb78lPDycBx54oNQ1XK0c3377LYqi8MILL5z3nEtxzz33lDrm4+Pj/rygoIDU1FSuu+46AHfdDoeDJUuWMHDgwDJbfVw1jRgxApPJxJdfful+bcWKFaSmpjJ69OhLrvt8CgsLGT58OMuWLeOnn37ipptucr/m+ll49NFHS7znscceA+DHH38scbxVq1bccMMN7ucRERG0aNGCo0ePlruexo0b06dPn1LHi3+Ps7KySE1NpVu3bhw9epSsrCwAgoODAVi2bBlWq7Xc9xSiKkl4EVe8iIgIevfuzfz58/nuu++w2+3ceuutZZ57/PhxYmJiCAgIKHHc1WR//Phx90edTufuenFxNd+7pKSkkJmZyaeffkpERESJx/jx4wFnM35FOBwO3nvvPZo1a4bRaCQ8PJyIiAj++usv9y8ogCNHjtCiRQsMhvP3Hh85coSYmBhCQ0MrVMPFNG7cuNSx9PR0HnroISIjI/Hx8SEiIsJ9nqvulJQUsrOzadOmzQWvHxwczMCBA0vMJPvyyy+pW7cuPXv2vOB7z5w5U+JRnsGur732GkuWLOGbb76he/fuJV5z/Sw0bdq0xPGoqCiCg4PdPzMuDRo0KHX9kJAQMjIyLlqHS1nfX4Dff/+d3r174+fnR3BwMBERETz99NPAue9xt27dGDZsGNOnTyc8PJxBgwYxZ86cUuNzhNCSjHkRArjjjjuYPHkyZ86coW/fvu6/Pj3N4XAAMHr0aMaOHVvmORUdS/Dqq6/y3HPPMWHCBF566SVCQ0PR6XQ8/PDD7vtVpvO1wNjt9vO+p3gLgMuIESPYuHEjTzzxBO3bt8ff3x+Hw8HNN998SXWPGTOGRYsWsXHjRuLj41m6dCn33Xefu1XqfKKjo0s8nzNnDuPGjbvge/r06cPy5ct588036d69OyaTqdQ55W2p0uv1ZR5X/zEg/ELK+v4eOXKEXr16ERcXx7vvvkv9+vXx9vbmp59+4r333nN/jxVF4ZtvvmHTpk388MMPrFixggkTJvDOO++wadMm/P39y12HEJ4i4UUIYMiQIdx9991s2rSJhQsXnve8hg0bsmrVKnJyckq0vuzfv9/9uuujw+Fwt264HDhwoMT1XDOR7HY7vXv3rpSv5ZtvvqFHjx7Mnj27xPHMzEzCw8Pdz5s0acLmzZuxWq14eXmVea0mTZqwYsUK0tPTz9v6EhIS4r5+cf9sUbiQjIwMVq9ezfTp03n++efdxw8dOlTivIiICAIDA9mzZ89Fr3nzzTcTERHBl19+ybXXXkteXh533nnnRd9XfOYZQOvWrS/6nuuuu4577rmHAQMGMHz4cBYvXuxu0XL9LBw6dKjEoNqzZ8+SmZnp/pmpiEvpsvvhhx+wWCwsXbq0ROvOmjVryjz/uuuu47rrruOVV15h/vz5jBo1igULFjBp0qT
2023-01-11 16:25:40 +01:00
"text/plain": [
"<Figure size 640x480 with 1 Axes>"
]
},
"metadata": {},
"output_type": "display_data"
},
2023-01-11 20:37:08 +01:00
{
"name": "stdout",
"output_type": "stream",
"text": [
"8/8 [==============================] - 2s 256ms/step - loss: 0.2136 - accuracy: 0.9375\n",
"cartoon ---------------------------------------\n"
]
},
2023-01-11 16:25:40 +01:00
{
"name": "stderr",
"output_type": "stream",
"text": [
"WARNING:absl:`lr` is deprecated, please use `learning_rate` instead, or use the legacy optimizer, e.g.,tf.keras.optimizers.legacy.SGD.\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
2023-01-11 20:37:08 +01:00
"Model: \"sequential_2\"\n",
2023-01-11 16:25:40 +01:00
"_________________________________________________________________\n",
" Layer (type) Output Shape Param # \n",
"=================================================================\n",
2023-01-11 20:37:08 +01:00
" conv2d_10 (Conv2D) (None, 55, 55, 96) 34944 \n",
2023-01-11 16:25:40 +01:00
" \n",
2023-01-11 20:37:08 +01:00
" batch_normalization_10 (Bat (None, 55, 55, 96) 384 \n",
" chNormalization) \n",
2023-01-11 16:25:40 +01:00
" \n",
2023-01-11 20:37:08 +01:00
" max_pooling2d_6 (MaxPooling (None, 27, 27, 96) 0 \n",
2023-01-11 16:25:40 +01:00
" 2D) \n",
" \n",
2023-01-11 20:37:08 +01:00
" conv2d_11 (Conv2D) (None, 27, 27, 256) 614656 \n",
2023-01-11 16:25:40 +01:00
" \n",
2023-01-11 20:37:08 +01:00
" batch_normalization_11 (Bat (None, 27, 27, 256) 1024 \n",
" chNormalization) \n",
2023-01-11 16:25:40 +01:00
" \n",
2023-01-11 20:37:08 +01:00
" max_pooling2d_7 (MaxPooling (None, 13, 13, 256) 0 \n",
2023-01-11 16:25:40 +01:00
" 2D) \n",
" \n",
2023-01-11 20:37:08 +01:00
" conv2d_12 (Conv2D) (None, 13, 13, 384) 885120 \n",
2023-01-11 16:25:40 +01:00
" \n",
2023-01-11 20:37:08 +01:00
" batch_normalization_12 (Bat (None, 13, 13, 384) 1536 \n",
" chNormalization) \n",
2023-01-11 16:25:40 +01:00
" \n",
2023-01-11 20:37:08 +01:00
" conv2d_13 (Conv2D) (None, 13, 13, 384) 1327488 \n",
2023-01-11 16:25:40 +01:00
" \n",
2023-01-11 20:37:08 +01:00
" batch_normalization_13 (Bat (None, 13, 13, 384) 1536 \n",
" chNormalization) \n",
2023-01-11 16:25:40 +01:00
" \n",
2023-01-11 20:37:08 +01:00
" conv2d_14 (Conv2D) (None, 13, 13, 256) 884992 \n",
2023-01-11 16:25:40 +01:00
" \n",
2023-01-11 20:37:08 +01:00
" batch_normalization_14 (Bat (None, 13, 13, 256) 1024 \n",
" chNormalization) \n",
2023-01-11 16:25:40 +01:00
" \n",
2023-01-11 20:37:08 +01:00
" max_pooling2d_8 (MaxPooling (None, 6, 6, 256) 0 \n",
2023-01-11 16:25:40 +01:00
" 2D) \n",
" \n",
2023-01-11 20:37:08 +01:00
" flatten_2 (Flatten) (None, 9216) 0 \n",
2023-01-11 16:25:40 +01:00
" \n",
2023-01-11 20:37:08 +01:00
" dense_6 (Dense) (None, 4096) 37752832 \n",
2023-01-11 16:25:40 +01:00
" \n",
2023-01-11 20:37:08 +01:00
" dropout_4 (Dropout) (None, 4096) 0 \n",
2023-01-11 16:25:40 +01:00
" \n",
2023-01-11 20:37:08 +01:00
" dense_7 (Dense) (None, 4096) 16781312 \n",
2023-01-11 16:25:40 +01:00
" \n",
2023-01-11 20:37:08 +01:00
" dropout_5 (Dropout) (None, 4096) 0 \n",
2023-01-11 16:25:40 +01:00
" \n",
2023-01-11 20:37:08 +01:00
" dense_8 (Dense) (None, 10) 40970 \n",
2023-01-11 16:25:40 +01:00
" \n",
"=================================================================\n",
"Total params: 58,327,818\n",
"Trainable params: 58,325,066\n",
"Non-trainable params: 2,752\n",
"_________________________________________________________________\n",
"WARNING:tensorflow:`period` argument is deprecated. Please use `save_freq` to specify the frequency in number of batches seen.\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"WARNING:tensorflow:`period` argument is deprecated. Please use `save_freq` to specify the frequency in number of batches seen.\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Epoch 1/25\n",
2023-01-11 20:37:08 +01:00
"51/51 [==============================] - ETA: 0s - loss: 3.3183 - accuracy: 0.4295\n",
"Epoch 1: val_accuracy improved from -inf to 0.23177, saving model to alex_2.h5\n",
"51/51 [==============================] - 49s 942ms/step - loss: 3.3183 - accuracy: 0.4295 - val_loss: 2.0209 - val_accuracy: 0.2318\n",
2023-01-11 16:25:40 +01:00
"Epoch 2/25\n",
2023-01-11 20:37:08 +01:00
"51/51 [==============================] - ETA: 0s - loss: 1.0712 - accuracy: 0.6654\n",
"Epoch 2: val_accuracy did not improve from 0.23177\n",
"51/51 [==============================] - 52s 1s/step - loss: 1.0712 - accuracy: 0.6654 - val_loss: 2.9587 - val_accuracy: 0.2188\n",
2023-01-11 16:25:40 +01:00
"Epoch 3/25\n",
2023-01-11 20:37:08 +01:00
"51/51 [==============================] - ETA: 0s - loss: 0.6603 - accuracy: 0.7739\n",
"Epoch 3: val_accuracy improved from 0.23177 to 0.31250, saving model to alex_2.h5\n",
"51/51 [==============================] - 55s 1s/step - loss: 0.6603 - accuracy: 0.7739 - val_loss: 3.3996 - val_accuracy: 0.3125\n",
2023-01-11 16:25:40 +01:00
"Epoch 4/25\n",
2023-01-11 20:37:08 +01:00
"51/51 [==============================] - ETA: 0s - loss: 0.5013 - accuracy: 0.8070\n",
"Epoch 4: val_accuracy improved from 0.31250 to 0.32031, saving model to alex_2.h5\n",
"51/51 [==============================] - 54s 1s/step - loss: 0.5013 - accuracy: 0.8070 - val_loss: 4.6634 - val_accuracy: 0.3203\n",
"Epoch 5/25\n",
"51/51 [==============================] - ETA: 0s - loss: 0.3286 - accuracy: 0.8762\n",
"Epoch 5: val_accuracy did not improve from 0.32031\n",
"51/51 [==============================] - 53s 1s/step - loss: 0.3286 - accuracy: 0.8762 - val_loss: 5.9495 - val_accuracy: 0.2109\n",
"Epoch 6/25\n",
"51/51 [==============================] - ETA: 0s - loss: 0.2392 - accuracy: 0.9124\n",
"Epoch 6: val_accuracy did not improve from 0.32031\n",
"51/51 [==============================] - 59s 1s/step - loss: 0.2392 - accuracy: 0.9124 - val_loss: 6.1043 - val_accuracy: 0.2760\n",
"Epoch 7/25\n",
"51/51 [==============================] - ETA: 0s - loss: 0.2096 - accuracy: 0.9216\n",
"Epoch 7: val_accuracy did not improve from 0.32031\n",
"51/51 [==============================] - 51s 995ms/step - loss: 0.2096 - accuracy: 0.9216 - val_loss: 6.5559 - val_accuracy: 0.2422\n",
"Epoch 8/25\n",
"51/51 [==============================] - ETA: 0s - loss: 0.1786 - accuracy: 0.9387\n",
"Epoch 8: val_accuracy improved from 0.32031 to 0.34115, saving model to alex_2.h5\n",
"51/51 [==============================] - 47s 913ms/step - loss: 0.1786 - accuracy: 0.9387 - val_loss: 5.2047 - val_accuracy: 0.3411\n",
"Epoch 9/25\n",
"51/51 [==============================] - ETA: 0s - loss: 0.1700 - accuracy: 0.9387\n",
"Epoch 9: val_accuracy improved from 0.34115 to 0.41667, saving model to alex_2.h5\n",
"51/51 [==============================] - 47s 914ms/step - loss: 0.1700 - accuracy: 0.9387 - val_loss: 3.7162 - val_accuracy: 0.4167\n",
"Epoch 10/25\n",
"51/51 [==============================] - ETA: 0s - loss: 0.1615 - accuracy: 0.9430\n",
"Epoch 10: val_accuracy improved from 0.41667 to 0.59375, saving model to alex_2.h5\n",
"51/51 [==============================] - 47s 915ms/step - loss: 0.1615 - accuracy: 0.9430 - val_loss: 1.8405 - val_accuracy: 0.5938\n",
"Epoch 11/25\n",
"51/51 [==============================] - ETA: 0s - loss: 0.1049 - accuracy: 0.9602\n",
"Epoch 11: val_accuracy improved from 0.59375 to 0.66406, saving model to alex_2.h5\n",
"51/51 [==============================] - 51s 1s/step - loss: 0.1049 - accuracy: 0.9602 - val_loss: 1.1911 - val_accuracy: 0.6641\n",
"Epoch 12/25\n",
"51/51 [==============================] - ETA: 0s - loss: 0.0944 - accuracy: 0.9657\n",
"Epoch 12: val_accuracy improved from 0.66406 to 0.76823, saving model to alex_2.h5\n",
"51/51 [==============================] - 54s 1s/step - loss: 0.0944 - accuracy: 0.9657 - val_loss: 0.8048 - val_accuracy: 0.7682\n",
"Epoch 13/25\n",
"51/51 [==============================] - ETA: 0s - loss: 0.0714 - accuracy: 0.9761\n",
"Epoch 13: val_accuracy improved from 0.76823 to 0.96615, saving model to alex_2.h5\n",
"51/51 [==============================] - 112s 2s/step - loss: 0.0714 - accuracy: 0.9761 - val_loss: 0.0924 - val_accuracy: 0.9661\n",
"Epoch 14/25\n",
"51/51 [==============================] - ETA: 0s - loss: 0.0788 - accuracy: 0.9694\n",
"Epoch 14: val_accuracy did not improve from 0.96615\n",
"51/51 [==============================] - 109s 2s/step - loss: 0.0788 - accuracy: 0.9694 - val_loss: 0.1619 - val_accuracy: 0.9323\n",
"Epoch 15/25\n",
"51/51 [==============================] - ETA: 0s - loss: 0.0630 - accuracy: 0.9847\n",
"Epoch 15: val_accuracy did not improve from 0.96615\n",
"51/51 [==============================] - 59s 1s/step - loss: 0.0630 - accuracy: 0.9847 - val_loss: 0.3735 - val_accuracy: 0.8750\n",
"Epoch 16/25\n",
"51/51 [==============================] - ETA: 0s - loss: 0.0662 - accuracy: 0.9779\n",
"Epoch 16: val_accuracy did not improve from 0.96615\n",
"51/51 [==============================] - 49s 967ms/step - loss: 0.0662 - accuracy: 0.9779 - val_loss: 0.1856 - val_accuracy: 0.9193\n",
"Epoch 17/25\n",
"51/51 [==============================] - ETA: 0s - loss: 0.0492 - accuracy: 0.9816\n",
"Epoch 17: val_accuracy did not improve from 0.96615\n",
"51/51 [==============================] - 48s 945ms/step - loss: 0.0492 - accuracy: 0.9816 - val_loss: 0.2103 - val_accuracy: 0.9271\n",
"Epoch 18/25\n",
"51/51 [==============================] - ETA: 0s - loss: 0.0420 - accuracy: 0.9871\n",
"Epoch 18: val_accuracy did not improve from 0.96615\n",
"51/51 [==============================] - 48s 946ms/step - loss: 0.0420 - accuracy: 0.9871 - val_loss: 0.7410 - val_accuracy: 0.8411\n",
"Epoch 19/25\n",
"51/51 [==============================] - ETA: 0s - loss: 0.0580 - accuracy: 0.9792\n",
"Epoch 19: val_accuracy improved from 0.96615 to 0.98958, saving model to alex_2.h5\n",
"51/51 [==============================] - 51s 993ms/step - loss: 0.0580 - accuracy: 0.9792 - val_loss: 0.0379 - val_accuracy: 0.9896\n",
"Epoch 20/25\n",
"51/51 [==============================] - ETA: 0s - loss: 0.0471 - accuracy: 0.9853\n",
"Epoch 20: val_accuracy did not improve from 0.98958\n",
"51/51 [==============================] - 49s 961ms/step - loss: 0.0471 - accuracy: 0.9853 - val_loss: 1.3082 - val_accuracy: 0.7526\n",
"Epoch 21/25\n",
"51/51 [==============================] - ETA: 0s - loss: 0.0391 - accuracy: 0.9890\n",
"Epoch 21: val_accuracy did not improve from 0.98958\n",
"51/51 [==============================] - 55s 1s/step - loss: 0.0391 - accuracy: 0.9890 - val_loss: 0.1507 - val_accuracy: 0.9323\n",
"Epoch 22/25\n",
"51/51 [==============================] - ETA: 0s - loss: 0.0351 - accuracy: 0.9896\n",
"Epoch 22: val_accuracy did not improve from 0.98958\n",
"51/51 [==============================] - 55s 1s/step - loss: 0.0351 - accuracy: 0.9896 - val_loss: 0.1305 - val_accuracy: 0.9479\n",
"Epoch 23/25\n",
"51/51 [==============================] - ETA: 0s - loss: 0.0231 - accuracy: 0.9933\n",
"Epoch 23: val_accuracy did not improve from 0.98958\n",
"51/51 [==============================] - 53s 1s/step - loss: 0.0231 - accuracy: 0.9933 - val_loss: 0.0865 - val_accuracy: 0.9635\n",
"Epoch 24/25\n",
"51/51 [==============================] - ETA: 0s - loss: 0.0201 - accuracy: 0.9933\n",
"Epoch 24: val_accuracy did not improve from 0.98958\n",
"51/51 [==============================] - 56s 1s/step - loss: 0.0201 - accuracy: 0.9933 - val_loss: 0.5474 - val_accuracy: 0.8281\n",
"Epoch 25/25\n",
"51/51 [==============================] - ETA: 0s - loss: 0.0346 - accuracy: 0.9896\n",
"Epoch 25: val_accuracy did not improve from 0.98958\n",
"51/51 [==============================] - 56s 1s/step - loss: 0.0346 - accuracy: 0.9896 - val_loss: 0.0609 - val_accuracy: 0.9844\n"
]
},
{
"data": {
"image/png": "iVBORw0KGgoAAAANSUhEUgAAAi8AAAHHCAYAAAB3K7g2AAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjYuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8o6BhiAAAACXBIWXMAAA9hAAAPYQGoP6dpAACcdUlEQVR4nOzdd3hTZfvA8e9JmqRNd0spLRRa9h4yZAjIUERBQYYiU4aLIbhx4utA3APlp74I8iqigCg4mLJk77337IDukWac3x9pQ0sLtJD2dNyf68qV5pyTc+6E0tx5nvt5HkVVVRUhhBBCiFJCp3UAQgghhBCFIcmLEEIIIUoVSV6EEEIIUapI8iKEEEKIUkWSFyGEEEKUKpK8CCGEEKJUkeRFCCGEEKWKJC9CCCGEKFUkeRFCCCFEqSLJixAaURSFSZMmFfp5J0+eRFEUZs6c6faYhBCiNJDkRZRrM2fORFEUFEXh33//zbNfVVUiIiJQFIUePXpoEKEQub377rv89ttvWochhKYkeREC8PT0ZPbs2Xm2r169mrNnz2IymTSISoi8JHkRQpIXIQC49957mTt3LjabLdf22bNn07x5cypVqqRRZOVHamqq1iGUWKqqkp6ernUYQpQYkrwIAQwYMIBLly6xbNky17bMzEzmzZvHI488ku9zUlNTefbZZ4mIiMBkMlGnTh0+/PBDrl6o3WKxMGHCBEJCQvD19eX+++/n7Nmz+Z7z3LlzDB8+nNDQUEwmEw0aNOC77767qdd0+fJlnnvuORo1aoSPjw9+fn50796dXbt25Tk2IyODSZMmUbt2bTw9PQkLC+PBBx/k2LFjrmMcDgefffYZjRo1wtPTk5CQEO655x62bt0KXL8W5+r6nkmTJqEoCvv37+eRRx4hMDCQO+64A4Ddu3czbNgwqlevjqenJ5UqVWL48OFcunQp3/drxIgRhIeHYzKZiIqK4sknnyQzM5Pjx4+jKAqffPJJnuetX78eRVH46aefCvu2XtON3h+AGTNm0LlzZypWrIjJZKJ+/fpMmzYtz7kiIyPp0aMHS5YsoUWLFnh5efH111+jKAqpqal8//33ru7OYcOGuZ63Y8cOunfvjp+fHz4+PnTp0oWNGzfmOf/x48fp168fQUFBmM1mWrduzZ9//pnrmFWrVqEoCr/88gvvvPMOVapUwdPTky5dunD06FG3vW9C3AwPrQMQoiSIjIykTZs2/PTTT3Tv3h2Av//+m8TERB5++GE+//zzXMerqsr999/PypUrGTFiBE2bNmXJkiU8//zznDt3LtcH5siRI/nhhx945JFHaNu2Lf/88w/33Xdfnhiio6Np3bo1iqIwZswYQkJC+PvvvxkxYgRJSUmMHz++UK/p+PHj/Pbbb/Tr14+oqCiio6P5+uuv6dixI/v37yc8PBwAu91Ojx49WLFiBQ8//DBPP/00ycnJLFu2jL1791KjRg0ARowYwcyZM+nevTsjR47EZrOxdu1aNm7cSIsWLQoVW7Z+/fpRq1Yt3n33XVfSt2zZMo4fP86jjz5KpUqV2LdvH9988w379u1j48aNKIoCwPnz52nVqhUJCQk89thj1K1bl3PnzjFv3jzS0tKoXr067dq148cff2TChAm5rvvjjz/i6+vLAw88cFNx56cg78+0adNo0KAB999/Px4eHixatIinnnoKh8PB6NGjc53v0KFDDBgwgMcff5xRo0ZRp04d/ve//zFy5EhatWrFY489BuD699m3bx/t27fHz8+PF154AYPBwNdff82dd97J6tWruf322wHn71nbtm1JS0tj3LhxBAcH8/3333P//fczb948evfunSuO9957D51Ox3PPPUdiYiLvv/8+AwcOZNOmTW5774QoNFWIcmzGjBkqoG7ZskWdOnWq6uvrq6alpamqqqr9+vVTO3XqpKqqqlarVk297777XM/77bffVEB9++23c52vb9++qqIo6tGjR1VVVdWdO3eqgPrUU0/lOu6RRx5RAfWNN95wbRsxYoQaFhamxsXF5Tr24YcfVv39/V1xnThxQgXUGTNmXPe1ZWRkqHa7Pde2EydOqCaTSf3Pf/7j2vbdd9+pgPrxxx/nOYfD4VBVVVX/+ecfFVDHjRt3zWOuF9fVr/WNN95QAXXAgAF5js1+nTn99NNPKqCuWbPGtW3IkCGqTqdTt2zZcs2Yvv76axVQDxw44NqXmZmpVqhQQR06dGie592sgrw/qpr/a+vWrZtavXr1XNuqVaumAurixYvzHO/t7Z1v7L169VKNRqN67Ngx17bz58+rvr6+aocOHVzbxo8frwLq2rVrXduSk5PVqKgoNTIy0vU7s3LlShVQ69Wrp1osFtexn332mQqoe/bsye+tEKJYSLeREFn69+9Peno6f/zxB8nJyfzxxx/X7DL666+/0Ov1jBs3Ltf2Z599FlVV+fvvv13HAXmOu7oVRVVV5s+fT8+ePVFVlbi4ONetW7duJCYmsn379kK9HpPJhE7n/C9ut9u5dOkSPj4+1KlTJ9e55s+fT4UKFRg7dmyec2S3csyfPx9FUXjjjTeueczNeOKJJ/Js8/Lycv2ckZFBXFwcrVu3BnDF7XA4+O233+jZs2e+rT7ZMfXv3x9PT09+/PFH174lS5YQFxfHoEGDbjruqxX0/cn52hITE4mLi6Njx44cP36cxMTEXM+LioqiW7duBbq+3W5n6dKl9OrVi+rVq7u2h4WF8cgjj/Dvv/+SlJQEOH8nW7Vq5eqmA/Dx8eGxxx7j5MmT7N+/P9e5H330UYxGo+tx+/btAWfLnhBakeRFiCwhISF07dqV2bNn8+uvv2K32+nbt2++x546dYrw8HB8fX1zba9Xr55rf/a9TqdzNe1nq1OnTq7HsbGxJCQk8M033xASEpLr9uijjwIQExNTqNfjcDj45JNPqFWrFiaTiQoVKhASEsLu3btzfVAeO3aMOnXq4OFx7V7kY8eOER4eTlBQUKFiuJGoqKg82y5fvszTTz9NaGgoXl5ehISEuI7Ljjs2NpakpCQaNmx43fMHBATQs2fPXCPJfvzxRypXrkznzp2v+9yLFy/mul2vYLag78+6devo2rUr3t7eBAQEEBISwssvv5zrtWXL7725ltjYWNLS0vL8XoHzd9LhcHDmzBnA+Tt5reOy9+dUtWrVXI8DAwMBiI+PL3B8Qrib1LwIkcMjjzzCqFGjuHjxIt27dycgIKBYrutwOAAYNGgQQ4cOzfeYxo0bF+qc7777Lq+99hrDhw/nrbfeIigoCJ1Ox/jx413Xc6drtcDY7fZrPidnS0S2/v37s379ep5//nmaNm2Kj48PDoeDe+6556biHjJkCHPnzmX9+vU0atSIhQsX8tRTT7lapa4lLCws1+MZM2bkKo4trGPHjtGlSxfq1q3Lxx9/TEREBEajkb/++otPPvkkz2vL773Rgl6vz3e7elVhuhDFSZIXIXLo3bs3jz/+OBs3buTnn3++5nHVqlVj+fLlJCcn52p9OXjwoGt/9r3D4XC1bmQ7dOhQrvNlj0Sy2+107drVLa9l3rx5dOrUienTp+fanpCQQIUKFVyPa9SowaZNm7BarRgMhnzPVaNGDZYsWcLly5ev2bqQ/Y08ISEh1/arv8lfT3x8PCtWrODNN9/k9ddfd20/cuRIruNCQkLw8/Nj7969NzznPffcQ0hICD/++CO33347aWlpDB48+IbPyznyDKBBgwbXPLYg78+iRYuwWCwsXLgwV2vGypUrbxhLTvkliSEhIZjN5jy/V+D8ndTpdERERADO38lrHZe9X4iSTrqNhMjBx8eHadOmMWnSJHr27HnN4+69917sdjtTp07Ntf2TTz5BURTXiKXs+6tHK3366ae5Huv1evr06cP8+fPz/UCOjY0t9GvR6/V5vh3PnTuXc+fO5drWp08f4uLi8rwWuPLtuk+fPqiqyptvvnnNY/z8/KhQoQJr1qzJtf+rr74qVMw5z5nt6vdLp9PRq1cvFi1alGso8tUxAXh4eDBgwAB++eUXZs6cSaNGjQrUitW1a9dct6tbYnIqyPuT32tLTExkxowZN4w
"text/plain": [
"<Figure size 640x480 with 1 Axes>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"8/8 [==============================] - 3s 318ms/step - loss: 0.2321 - accuracy: 0.9297\n",
"saturacja ---------------------------------------\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"WARNING:absl:`lr` is deprecated, please use `learning_rate` instead, or use the legacy optimizer, e.g.,tf.keras.optimizers.legacy.SGD.\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Model: \"sequential_3\"\n",
"_________________________________________________________________\n",
" Layer (type) Output Shape Param # \n",
"=================================================================\n",
" conv2d_15 (Conv2D) (None, 55, 55, 96) 34944 \n",
" \n",
" batch_normalization_15 (Bat (None, 55, 55, 96) 384 \n",
" chNormalization) \n",
" \n",
" max_pooling2d_9 (MaxPooling (None, 27, 27, 96) 0 \n",
" 2D) \n",
" \n",
" conv2d_16 (Conv2D) (None, 27, 27, 256) 614656 \n",
" \n",
" batch_normalization_16 (Bat (None, 27, 27, 256) 1024 \n",
" chNormalization) \n",
" \n",
" max_pooling2d_10 (MaxPoolin (None, 13, 13, 256) 0 \n",
" g2D) \n",
" \n",
" conv2d_17 (Conv2D) (None, 13, 13, 384) 885120 \n",
" \n",
" batch_normalization_17 (Bat (None, 13, 13, 384) 1536 \n",
" chNormalization) \n",
" \n",
" conv2d_18 (Conv2D) (None, 13, 13, 384) 1327488 \n",
" \n",
" batch_normalization_18 (Bat (None, 13, 13, 384) 1536 \n",
" chNormalization) \n",
" \n",
" conv2d_19 (Conv2D) (None, 13, 13, 256) 884992 \n",
" \n",
" batch_normalization_19 (Bat (None, 13, 13, 256) 1024 \n",
" chNormalization) \n",
" \n",
" max_pooling2d_11 (MaxPoolin (None, 6, 6, 256) 0 \n",
" g2D) \n",
" \n",
" flatten_3 (Flatten) (None, 9216) 0 \n",
" \n",
" dense_9 (Dense) (None, 4096) 37752832 \n",
" \n",
" dropout_6 (Dropout) (None, 4096) 0 \n",
" \n",
" dense_10 (Dense) (None, 4096) 16781312 \n",
" \n",
" dropout_7 (Dropout) (None, 4096) 0 \n",
" \n",
" dense_11 (Dense) (None, 10) 40970 \n",
" \n",
"=================================================================\n",
"Total params: 58,327,818\n",
"Trainable params: 58,325,066\n",
"Non-trainable params: 2,752\n",
"_________________________________________________________________\n",
"WARNING:tensorflow:`period` argument is deprecated. Please use `save_freq` to specify the frequency in number of batches seen.\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"WARNING:tensorflow:`period` argument is deprecated. Please use `save_freq` to specify the frequency in number of batches seen.\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Epoch 1/25\n",
"51/51 [==============================] - ETA: 0s - loss: 3.6670 - accuracy: 0.3793\n",
"Epoch 1: val_accuracy improved from -inf to 0.38542, saving model to alex_2.h5\n",
"51/51 [==============================] - 49s 953ms/step - loss: 3.6670 - accuracy: 0.3793 - val_loss: 1.8499 - val_accuracy: 0.3854\n",
"Epoch 2/25\n",
"51/51 [==============================] - ETA: 0s - loss: 1.3486 - accuracy: 0.5748\n",
"Epoch 2: val_accuracy did not improve from 0.38542\n",
"51/51 [==============================] - 52s 1s/step - loss: 1.3486 - accuracy: 0.5748 - val_loss: 3.4816 - val_accuracy: 0.2578\n",
"Epoch 3/25\n",
"51/51 [==============================] - ETA: 0s - loss: 0.9585 - accuracy: 0.6458\n",
"Epoch 3: val_accuracy did not improve from 0.38542\n",
"51/51 [==============================] - 51s 1s/step - loss: 0.9585 - accuracy: 0.6458 - val_loss: 4.6736 - val_accuracy: 0.2578\n",
"Epoch 4/25\n",
"51/51 [==============================] - ETA: 0s - loss: 0.7698 - accuracy: 0.7126\n",
"Epoch 4: val_accuracy did not improve from 0.38542\n",
"51/51 [==============================] - 55s 1s/step - loss: 0.7698 - accuracy: 0.7126 - val_loss: 5.1900 - val_accuracy: 0.2500\n",
"Epoch 5/25\n",
"51/51 [==============================] - ETA: 0s - loss: 0.6196 - accuracy: 0.7770\n",
"Epoch 5: val_accuracy did not improve from 0.38542\n",
"51/51 [==============================] - 52s 1s/step - loss: 0.6196 - accuracy: 0.7770 - val_loss: 6.2598 - val_accuracy: 0.3359\n",
"Epoch 6/25\n",
"51/51 [==============================] - ETA: 0s - loss: 0.5028 - accuracy: 0.8235\n",
"Epoch 6: val_accuracy did not improve from 0.38542\n",
"51/51 [==============================] - 54s 1s/step - loss: 0.5028 - accuracy: 0.8235 - val_loss: 6.7278 - val_accuracy: 0.2708\n",
"Epoch 7/25\n",
"51/51 [==============================] - ETA: 0s - loss: 0.4281 - accuracy: 0.8425\n",
"Epoch 7: val_accuracy did not improve from 0.38542\n",
"51/51 [==============================] - 54s 1s/step - loss: 0.4281 - accuracy: 0.8425 - val_loss: 4.9290 - val_accuracy: 0.3542\n",
"Epoch 8/25\n",
"51/51 [==============================] - ETA: 0s - loss: 0.3045 - accuracy: 0.8977\n",
"Epoch 8: val_accuracy improved from 0.38542 to 0.45573, saving model to alex_2.h5\n",
"51/51 [==============================] - 53s 1s/step - loss: 0.3045 - accuracy: 0.8977 - val_loss: 2.6881 - val_accuracy: 0.4557\n",
"Epoch 9/25\n",
"51/51 [==============================] - ETA: 0s - loss: 0.2855 - accuracy: 0.8915\n",
"Epoch 9: val_accuracy improved from 0.45573 to 0.48177, saving model to alex_2.h5\n",
"51/51 [==============================] - 53s 1s/step - loss: 0.2855 - accuracy: 0.8915 - val_loss: 2.4350 - val_accuracy: 0.4818\n",
"Epoch 10/25\n",
"51/51 [==============================] - ETA: 0s - loss: 0.2387 - accuracy: 0.9148\n",
"Epoch 10: val_accuracy improved from 0.48177 to 0.59115, saving model to alex_2.h5\n",
"51/51 [==============================] - 53s 1s/step - loss: 0.2387 - accuracy: 0.9148 - val_loss: 1.2724 - val_accuracy: 0.5911\n",
"Epoch 11/25\n",
"51/51 [==============================] - ETA: 0s - loss: 0.2451 - accuracy: 0.9118\n",
"Epoch 11: val_accuracy improved from 0.59115 to 0.74479, saving model to alex_2.h5\n",
"51/51 [==============================] - 53s 1s/step - loss: 0.2451 - accuracy: 0.9118 - val_loss: 0.7184 - val_accuracy: 0.7448\n",
"Epoch 12/25\n",
"51/51 [==============================] - ETA: 0s - loss: 0.2065 - accuracy: 0.9271\n",
"Epoch 12: val_accuracy improved from 0.74479 to 0.75521, saving model to alex_2.h5\n",
"51/51 [==============================] - 53s 1s/step - loss: 0.2065 - accuracy: 0.9271 - val_loss: 0.6324 - val_accuracy: 0.7552\n",
"Epoch 13/25\n",
"51/51 [==============================] - ETA: 0s - loss: 0.1495 - accuracy: 0.9442\n",
"Epoch 13: val_accuracy improved from 0.75521 to 0.88542, saving model to alex_2.h5\n",
"51/51 [==============================] - 55s 1s/step - loss: 0.1495 - accuracy: 0.9442 - val_loss: 0.3196 - val_accuracy: 0.8854\n",
"Epoch 14/25\n",
"51/51 [==============================] - ETA: 0s - loss: 0.1121 - accuracy: 0.9620\n",
"Epoch 14: val_accuracy improved from 0.88542 to 0.93750, saving model to alex_2.h5\n",
"51/51 [==============================] - 52s 1s/step - loss: 0.1121 - accuracy: 0.9620 - val_loss: 0.1828 - val_accuracy: 0.9375\n",
"Epoch 15/25\n",
"51/51 [==============================] - ETA: 0s - loss: 0.1123 - accuracy: 0.9626\n",
"Epoch 15: val_accuracy did not improve from 0.93750\n",
"51/51 [==============================] - 55s 1s/step - loss: 0.1123 - accuracy: 0.9626 - val_loss: 0.2040 - val_accuracy: 0.9271\n",
"Epoch 16/25\n",
"51/51 [==============================] - ETA: 0s - loss: 0.1076 - accuracy: 0.9614\n",
"Epoch 16: val_accuracy improved from 0.93750 to 0.94271, saving model to alex_2.h5\n",
"51/51 [==============================] - 53s 1s/step - loss: 0.1076 - accuracy: 0.9614 - val_loss: 0.1781 - val_accuracy: 0.9427\n",
"Epoch 17/25\n",
"51/51 [==============================] - ETA: 0s - loss: 0.1243 - accuracy: 0.9571\n",
"Epoch 17: val_accuracy did not improve from 0.94271\n",
"51/51 [==============================] - 50s 988ms/step - loss: 0.1243 - accuracy: 0.9571 - val_loss: 0.2918 - val_accuracy: 0.8984\n",
"Epoch 18/25\n",
"51/51 [==============================] - ETA: 0s - loss: 0.0914 - accuracy: 0.9706 \n",
"Epoch 18: val_accuracy did not improve from 0.94271\n",
"51/51 [==============================] - 952s 19s/step - loss: 0.0914 - accuracy: 0.9706 - val_loss: 0.2769 - val_accuracy: 0.9036\n",
"Epoch 19/25\n",
"51/51 [==============================] - ETA: 0s - loss: 0.0683 - accuracy: 0.9761\n",
"Epoch 19: val_accuracy did not improve from 0.94271\n",
"51/51 [==============================] - 121s 2s/step - loss: 0.0683 - accuracy: 0.9761 - val_loss: 0.2512 - val_accuracy: 0.9036\n",
"Epoch 20/25\n",
"51/51 [==============================] - ETA: 0s - loss: 0.0546 - accuracy: 0.9841\n",
"Epoch 20: val_accuracy improved from 0.94271 to 0.96354, saving model to alex_2.h5\n",
"51/51 [==============================] - 167s 3s/step - loss: 0.0546 - accuracy: 0.9841 - val_loss: 0.1222 - val_accuracy: 0.9635\n",
"Epoch 21/25\n",
"51/51 [==============================] - ETA: 0s - loss: 0.0561 - accuracy: 0.9786\n",
"Epoch 21: val_accuracy did not improve from 0.96354\n",
"51/51 [==============================] - 212s 4s/step - loss: 0.0561 - accuracy: 0.9786 - val_loss: 0.1749 - val_accuracy: 0.9349\n",
"Epoch 22/25\n",
"51/51 [==============================] - ETA: 0s - loss: 0.0399 - accuracy: 0.9902\n",
"Epoch 22: val_accuracy did not improve from 0.96354\n",
"51/51 [==============================] - 379s 7s/step - loss: 0.0399 - accuracy: 0.9902 - val_loss: 0.3205 - val_accuracy: 0.8958\n",
"Epoch 23/25\n",
"51/51 [==============================] - ETA: 0s - loss: 0.0587 - accuracy: 0.9804\n",
"Epoch 23: val_accuracy did not improve from 0.96354\n",
"51/51 [==============================] - 332s 7s/step - loss: 0.0587 - accuracy: 0.9804 - val_loss: 0.2606 - val_accuracy: 0.9036\n",
"Epoch 24/25\n",
"51/51 [==============================] - ETA: 0s - loss: 0.0629 - accuracy: 0.9804\n",
"Epoch 24: val_accuracy did not improve from 0.96354\n",
"51/51 [==============================] - 279s 6s/step - loss: 0.0629 - accuracy: 0.9804 - val_loss: 0.1527 - val_accuracy: 0.9531\n",
"Epoch 25/25\n",
"51/51 [==============================] - ETA: 0s - loss: 0.0471 - accuracy: 0.9853\n",
"Epoch 25: val_accuracy did not improve from 0.96354\n",
"51/51 [==============================] - 330s 7s/step - loss: 0.0471 - accuracy: 0.9853 - val_loss: 0.2199 - val_accuracy: 0.9297\n"
]
},
{
"data": {
"image/png": "iVBORw0KGgoAAAANSUhEUgAAAi8AAAHHCAYAAAB3K7g2AAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjYuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8o6BhiAAAACXBIWXMAAA9hAAAPYQGoP6dpAACOVUlEQVR4nOzdd3xT9f7H8dfJTkc6aKEtFErL3hsBZSug4kJU1AsIqPcKzuu4Xhde7xXnvW6vehWuvysOVNzKXrJkiIDsUmihZdOdZp7fH2lCSwu00PYk7ed5bx5JTk5yPk0jefd8l6KqqooQQgghRIjQaV2AEEIIIUR1SHgRQgghREiR8CKEEEKIkCLhRQghhBAhRcKLEEIIIUKKhBchhBBChBQJL0IIIYQIKRJehBBCCBFSJLwIIYQQIqRIeBGilimKwvTp06v9vH379qEoCrNmzarxmoSorvP9HAtRGyS8iAZh1qxZKIqCoij8/PPPFR5XVZXk5GQUReHKK6/UoELR0GRnZzN9+nQ2bdqkdSlChByD1gUIUZcsFguzZ8/m4osvLrd92bJlHDhwALPZrFFloqHJzs7m6aefJiUlhW7dumldzjnZ7XYMBvnKEMFBzryIBuXyyy9nzpw5uN3ucttnz55Nz549SUhI0KiyhqOoqEjrEuq1kpISvF5vjb+uxWKR8CKChoQX0aCMGzeO48ePs2DBgsA2p9PJ559/zs0331zpc4qKivjzn/9McnIyZrOZtm3b8tJLL3H6guwOh4P777+f+Ph4IiMjueqqqzhw4EClr3nw4EEmTZpEkyZNMJvNdOzYkQ8++OC8fqYTJ07w4IMP0rlzZyIiIrDZbIwaNYrffvutwr4lJSVMnz6dNm3aYLFYSExM5LrrriM9PT2wj9fr5dVXX6Vz585YLBbi4+MZOXIk69evB87eF+f0fhHTp09HURS2bdvGzTffTExMTOCs1+bNm5k4cSKpqalYLBYSEhKYNGkSx48fr/T9mjx5MklJSZjNZlq2bMmf/vQnnE4ne/fuRVEU/vWvf1V43qpVq1AUhY8//ri6b+sZffLJJ/Ts2ZPIyEhsNhudO3fm1VdfDTxeld/H0qVL6d27NwC33XZboEnT/56mpKQwceLECscePHgwgwcPLvc6iqLwySef8Pjjj9O0aVPCwsLIz8+v8c/F6b/b/fv3c9ddd9G2bVusViuNGjVi7Nix7Nu37/zeWCGqQWK0aFBSUlLo168fH3/8MaNGjQLgxx9/JC8vj5tuuonXXnut3P6qqnLVVVexZMkSJk+eTLdu3Zg3bx4PPfQQBw8eLPeFOWXKFP73v/9x8803079/fxYvXswVV1xRoYbDhw9z0UUXoSgK06ZNIz4+nh9//JHJkyeTn5/PfffdV62fae/evXz11VeMHTuWli1bcvjwYd555x0GDRrEtm3bSEpKAsDj8XDllVeyaNEibrrpJu69914KCgpYsGABW7duJS0tDYDJkycza9YsRo0axZQpU3C73axYsYI1a9bQq1evatXmN3bsWFq3bs2zzz4bCH0LFixg79693HbbbSQkJPD777/z7rvv8vvvv7NmzRoURQF8zSt9+vQhNzeXO+64g3bt2nHw4EE+//xziouLSU1NZcCAAXz00Ufcf//95Y770UcfERkZydVXX31edZ9uwYIFjBs3jmHDhvH8888DsH37dlauXMm9994LVO330b59e/72t7/x5JNPcscdd3DJJZcA0L9///Oq65lnnsFkMvHggw/icDgwmUxs27atRj8Xp1u3bh2rVq3ipptuolmzZuzbt4+3336bwYMHs23bNsLCws7rZxGiSlQhGoCZM2eqgLpu3Tr1jTfeUCMjI9Xi4mJVVVV17Nix6pAhQ1RVVdUWLVqoV1xxReB5X331lQqof//738u93vXXX68qiqLu2bNHVVVV3bRpkwqod911V7n9br75ZhVQn3rqqcC2yZMnq4mJieqxY8fK7XvTTTepUVFRgboyMjJUQJ05c+ZZf7aSkhLV4/GU25aRkaGazWb1b3/7W2DbBx98oALqP//5zwqv4fV6VVVV1cWLF6uAes8995xxn7PVdfrP+tRTT6mAOm7cuAr7+n/Osj7++GMVUJcvXx7YNn78eFWn06nr1q07Y03vvPOOCqjbt28PPOZ0OtW4uDh1woQJFZ53vu69917VZrOpbrf7jPtU9fexbt26M76PLVq0qLTuQYMGqYMGDQrcX7JkiQqoqampFd7PmvxcqGrF321lv7/Vq1ergPrhhx9WeEyImiTNRqLBueGGG7Db7Xz33XcUFBTw3XffnbHJ6IcffkCv13PPPfeU2/7nP/8ZVVX58ccfA/sBFfY7/SyKqqp88cUXjB49GlVVOXbsWOAyYsQI8vLy2LhxY7V+HrPZjE7n+0/Z4/Fw/PhxIiIiaNu2bbnX+uKLL4iLi+Puu++u8Br+sxxffPEFiqLw1FNPnXGf8/HHP/6xwjar1Rq4XVJSwrFjx7jooosAAnV7vV6++uorRo8eXelZH39NN9xwAxaLhY8++ijw2Lx58zh27Bi33nrredd9uujoaIqKiso1O56uqr+PmjRhwoRy72d16qjK56IyZY/ncrk4fvw4rVq1Ijo6utZ+TiH8JLyIBic+Pp7hw4cze/ZsvvzySzweD9dff32l++7fv5+kpCQiIyPLbW/fvn3gcf+1TqercIq9bdu25e4fPXqU3Nxc3n33XeLj48tdbrvtNgCOHDlSrZ/H6/Xyr3/9i9atW2M2m4mLiyM+Pp7NmzeTl5cX2C89PZ22bduetdNleno6SUlJxMbGVquGc2nZsmWFbSdOnODee++lSZMmWK1W4uPjA/v56z569Cj5+fl06tTprK8fHR3N6NGjmT17dmDbRx99RNOmTRk6dOhZn3vo0KFyF7vdfsZ977rrLtq0acOoUaNo1qwZkyZN4qeffiq3T1V/HzWpsve3Jj8XlbHb7Tz55JOBvmD+18/Nza21n1MIP+nzIhqkm2++mdtvv51Dhw4xatQooqOj6+S4/lEgt956KxMmTKh0ny5dulTrNZ999lmeeOIJJk2axDPPPENsbCw6nY777ruvVkadnOmvcY/Hc8bnnH5WAHxnS1atWsVDDz1Et27diIiIwOv1MnLkyPOqe/z48cyZM4dVq1bRuXNnvvnmG+66667A2YczSUxMLHd/5syZlXaWBWjcuDGbNm1i3rx5/Pjjj/z444/MnDmT8ePH89///heomd/H2d5jvV5fYXtl729tfy7uvvtuZs6cyX333Ue/fv2IiopCURRuuummWvncCVGWhBfRIF177bXceeedrFmzhk8//fSM+7Vo0YKFCxdSUFBQ7uzLjh07Ao/7r71eb+CvWL+dO3eWez3/SCSPx8Pw4cNr5Gf5/PPPGTJkCO+//3657bm5ucTFxQXup6WlsXbtWlwuF0ajsdLXSktLY968eZw4ceKMZ19iYmICr1+W/yxUVZw8eZJFixbx9NNP8+STTwa27969u9x+8fHx2Gw2tm7des7XHDlyJPHx8Xz00Uf07duX4uJi/vCHP5zzeac3AXXs2PGs+5tMJkaPHs3o0aPxer3cddddvPPOOzzxxBO0atWqyr+PszXJxMTEVHh/wfcep6amnvNngpr9XJzp9SdMmMDLL78c2FZSUlJp3ULUNGk2Eg1SREQEb7/9NtOnT2f06NFn3O/yyy/H4/HwxhtvlNv+r3/9C0VRAiOW/Nenj1Z65ZVXyt3X6/WMGTOGL774otIv5KNHj1b7Z9Hr9RWGbc+ZM4eDBw+W2zZmzBiOHTtW4WcBAs8fM2YMqqry9NNPn3Efm81GXFwcy5cvL/f4W2+9Va2ay76m3+nvl06n45prruHbb78NDNWurCYAg8HAuHHj+Oyzz5g1axadO3eu0lms4cOHl7ucfiamrNOHcet0usAxHA5H4Geryu8jPDwcqBgCwRco1qxZg9PpDGz77rvvyMrKOufP41eTn4uqvv7rr79+1jNwQtQUOfMiGqwzNduUNXr0aIYMGcJjjz3Gvn376Nq1K/Pnz+frr7/mvvvuC/Rx6datG+PGjeO
"text/plain": [
"<Figure size 640x480 with 1 Axes>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"8/8 [==============================] - 50s 7s/step - loss: 0.3729 - accuracy: 0.8828\n"
2023-01-11 16:25:40 +01:00
]
}
],
"source": [
"data_test = load_test_data(\"./train_test_sw/test_sw\")\n",
"X_test = data_test['values']\n",
"y_test = data_test['labels']\n",
"for filter in filters:\n",
" print(f\"{filter} ---------------------------------------\")\n",
" train_ds, test_ds, validation_ds = data_prep_alex(filter)\n",
" alex(filter, train_ds, test_ds, validation_ds)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.6"
},
"vscode": {
"interpreter": {
"hash": "31f2aee4e71d21fbe5cf8b01ff0e069b9275f58929596ceb00d14d90e3e16cd6"
}
}
},
"nbformat": 4,
"nbformat_minor": 5
}