Symulowanie-wizualne/sw-lab9-10_4.ipynb

995 lines
93 KiB
Plaintext
Raw Normal View History

2023-01-11 16:25:40 +01:00
{
"cells": [
{
"cell_type": "markdown",
"id": "dd9a88f0",
"metadata": {},
"source": [
"#### Aleksandra Jonas, Aleksandra Gronowska, Iwona Christop"
]
},
{
"cell_type": "markdown",
"id": "acda0087",
"metadata": {},
"source": [
"### Generowanie dodatkowych zdjęć w oparciu o filtry krawędziowe"
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "f790226b",
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"import cv2 as cv\n",
"import matplotlib.pyplot as plt\n",
"import numpy as np\n",
"import json\n",
"from tensorflow import keras\n",
"%matplotlib inline"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "44319623",
"metadata": {},
"outputs": [],
"source": [
"def alex(filter_name, train_ds, test_ds, validation_ds):\n",
" from keras.callbacks import ModelCheckpoint, EarlyStopping\n",
" import matplotlib.pyplot as plt\n",
" import tensorflow as tf\n",
"\n",
" alexnet = keras.models.Sequential([\n",
" keras.layers.Conv2D(filters=96, kernel_size=(11,11), strides=(4,4), activation='relu', input_shape=(227,227,3)),\n",
" keras.layers.BatchNormalization(),\n",
" keras.layers.MaxPool2D(pool_size=(3,3), strides=(2,2)),\n",
" keras.layers.Conv2D(filters=256, kernel_size=(5,5), strides=(1,1), activation='relu', padding=\"same\"),\n",
" keras.layers.BatchNormalization(),\n",
" keras.layers.MaxPool2D(pool_size=(3,3), strides=(2,2)),\n",
" keras.layers.Conv2D(filters=384, kernel_size=(3,3), strides=(1,1), activation='relu', padding=\"same\"),\n",
" keras.layers.BatchNormalization(),\n",
" keras.layers.Conv2D(filters=384, kernel_size=(3,3), strides=(1,1), activation='relu', padding=\"same\"),\n",
" keras.layers.BatchNormalization(),\n",
" keras.layers.Conv2D(filters=256, kernel_size=(3,3), strides=(1,1), activation='relu', padding=\"same\"),\n",
" keras.layers.BatchNormalization(),\n",
" keras.layers.MaxPool2D(pool_size=(3,3), strides=(2,2)),\n",
" keras.layers.Flatten(),\n",
" keras.layers.Dense(4096, activation='relu'),\n",
" keras.layers.Dropout(.5),\n",
" keras.layers.Dense(4096, activation='relu'),\n",
" keras.layers.Dropout(.5),\n",
" keras.layers.Dense(10, activation='softmax')\n",
" ])\n",
"\n",
" alexnet.compile(loss='sparse_categorical_crossentropy', optimizer=tf.optimizers.SGD(lr=.001), metrics=['accuracy'])\n",
" alexnet.summary()\n",
"\n",
" checkpoint = ModelCheckpoint(\"alex_2.h5\", monitor='val_accuracy', verbose=1, save_best_only=True, save_weights_only=False, mode='auto', period=1)\n",
" early = EarlyStopping(monitor='val_accuracy', min_delta=0, patience=20, verbose=1, mode='auto')\n",
" \n",
" alex = alexnet.fit_generator(\n",
" steps_per_epoch=len(train_ds), \n",
" generator=train_ds, \n",
" validation_data= validation_ds, \n",
" validation_steps=len(validation_ds), \n",
" epochs=25, \n",
" callbacks=[checkpoint,early])\n",
"\n",
" plt.plot(alex.history[\"accuracy\"])\n",
" plt.plot(alex.history['val_accuracy'])\n",
" plt.plot(alex.history['loss'])\n",
" plt.plot(alex.history['val_loss'])\n",
" plt.title(f\"Model accuracy - {filter_name}\")\n",
" plt.ylabel(\"Value\")\n",
" plt.xlabel(\"Epoch\")\n",
" plt.legend([\"Accuracy\",\"Validation Accuracy\",\"Loss\",\"Validation Loss\"])\n",
" plt.show()\n",
"\n",
" alexnet.evaluate(test_ds)\n"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "4e3ebfd0",
"metadata": {},
"outputs": [],
"source": [
"def fix_float_img(img):\n",
" img_normed = 255 * (img - img.min()) / (img.max() - img.min())\n",
" img_normed = np.array(img_normed, np.int)\n",
" return img_normed"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "ffeda62d",
"metadata": {},
"outputs": [],
"source": [
"# directory = r\"train_test_sw/train_sw_kontrast\"\n",
"# subdirs = [r\"/Tomato\", r\"/Lemon\", r\"/Beech\", r\"/Mean\", r\"/Gardenia\"]\n",
"\n",
"# json_entries = []\n",
"\n",
"# for sub in subdirs:\n",
"# path = directory + sub\n",
" \n",
"# for filename in os.listdir(path):\n",
"# f = os.path.join(path, filename)\n",
" \n",
"# if os.path.isfile(f):\n",
"# img = cv.imread(f)\n",
"\n",
"\n",
"# lab= cv.cvtColor(img, cv.COLOR_BGR2LAB)\n",
"# l_channel, a, b = cv.split(lab)\n",
"# # Applying CLAHE to L-channel\n",
"# # feel free to try different values for the limit and grid size:\n",
"# clahe = cv.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))\n",
"# cl = clahe.apply(l_channel)\n",
"# # merge the CLAHE enhanced L-channel with the a and b channel\n",
"# limg = cv.merge((cl,a,b))\n",
"# # Converting image from LAB Color model to BGR color spcae\n",
"# enhanced_img = cv.cvtColor(limg, cv.COLOR_LAB2BGR)\n",
"# filename_edge = f[:-4] + '_kontrast.png'\n",
"# #final_edge = fix_float_img(adjusted)\n",
"# cv.imwrite(filename_edge, enhanced_img)\n",
" \n",
" \n"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "72c68d57",
"metadata": {},
"outputs": [],
"source": [
"# directory = r\"train_test_sw/train_sw_saturacja\"\n",
"# subdirs = [r\"/Tomato\", r\"/Lemon\", r\"/Beech\", r\"/Mean\", r\"/Gardenia\"]\n",
"\n",
"# json_entries = []\n",
"\n",
"# for sub in subdirs:\n",
"# path = directory + sub\n",
" \n",
"# for filename in os.listdir(path):\n",
"# f = os.path.join(path, filename)\n",
" \n",
"# if os.path.isfile(f):\n",
"# img = cv.imread(f)\n",
"# hsv = cv.cvtColor(img, cv.COLOR_BGR2HSV)\n",
"# greenMask = cv.inRange(hsv, (26, 10, 30), (97, 100, 255))\n",
"# hsv[:,:,1] = greenMask\n",
"# back = cv.cvtColor(hsv, cv.COLOR_HSV2RGB)\n",
"# filename_edge = f[:-4] + '_saturacja.png'\n",
"# #final_edge = fix_float_img(adjusted)\n",
"# cv.imwrite(filename_edge, back)"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "4859d197",
"metadata": {},
"outputs": [],
"source": [
"# directory = r\"train_test_sw/train_sw_jezu\"\n",
"# subdirs = [r\"/Tomato\", r\"/Lemon\", r\"/Beech\", r\"/Mean\", r\"/Gardenia\"]\n",
"\n",
"# json_entries = []\n",
"\n",
"# for sub in subdirs:\n",
"# path = directory + sub\n",
" \n",
"# for filename in os.listdir(path):\n",
"# f = os.path.join(path, filename)\n",
" \n",
"# if os.path.isfile(f):\n",
"# img = cv.imread(f)\n",
"\n",
"# img_gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\n",
"# ddepth = cv.CV_16S\n",
"# kernel_size = 3\n",
"# laplacian_operator = cv.Laplacian(img_gray, ddepth, ksize=kernel_size)\n",
"# filename_edge = f[:-4] + '_laplacian.png'\n",
"# #final_edge = fix_float_img(adjusted)\n",
"# cv.imwrite(filename_edge, laplacian_operator)\n"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "aedb7b9f",
"metadata": {},
"outputs": [],
"source": [
"# directory = r\"train_test_sw/train_sw\"\n",
"# subdirs = [r\"/Tomato\", r\"/Lemon\", r\"/Beech\", r\"/Mean\", r\"/Gardenia\"]\n",
"\n",
"# json_entries = []\n",
"\n",
"# for sub in subdirs:\n",
"# path = directory + sub\n",
" \n",
"# for filename in os.listdir(path):\n",
"# f = os.path.join(path, filename)\n",
" \n",
"# if os.path.isfile(f):\n",
"# img = cv.imread(f)\n",
"\n",
"# lab_image = cv.cvtColor(img, cv.COLOR_BGR2LAB)\n",
"# filename_edge = f[:-4] + '_lab.png'\n",
"# #final_edge = fix_float_img(adjusted)\n",
"# cv.imwrite(filename_edge, lab_image)"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "dc650af1",
"metadata": {},
"outputs": [],
"source": [
"# directory = r\"train_test_sw/train_sw_emboss\"\n",
"# subdirs = [r\"/Tomato\", r\"/Lemon\", r\"/Beech\", r\"/Mean\", r\"/Gardenia\"]\n",
"\n",
"# json_entries = []\n",
"\n",
"# for sub in subdirs:\n",
"# path = directory + sub\n",
" \n",
"# for filename in os.listdir(path):\n",
"# f = os.path.join(path, filename)\n",
" \n",
"# if os.path.isfile(f):\n",
"# img = cv.imread(f)\n",
"\n",
"# height, width = img.shape[:2]\n",
"# y = np.ones((height, width), np.uint8) * 128\n",
"# output = np.zeros((height, width), np.uint8)\n",
"# # generating the kernels\n",
"# kernel1 = np.array([[0, -1, -1], # kernel for embossing bottom left side\n",
"# [1, 0, -1],\n",
"# [1, 1, 0]])\n",
"# kernel2 = np.array([[-1, -1, 0], # kernel for embossing bottom right side\n",
"# [-1, 0, 1],\n",
"# [0, 1, 1]])\n",
"# # you can generate kernels for embossing top as well\n",
"# gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\n",
"# output1 = cv.add(cv.filter2D(gray, -1, kernel1), y) # emboss on bottom left side\n",
"# output2 = cv.add(cv.filter2D(gray, -1, kernel2), y) # emboss on bottom right side\n",
"# for i in range(height):\n",
"# for j in range(width):\n",
"# output[i, j] = max(output1[i, j], output2[i, j]) # combining both embosses to produce stronger emboss\n",
"\n",
"# filename_edge = f[:-4] + '_emboss.png'\n",
"# #final_edge = fix_float_img(adjusted)\n",
"# cv.imwrite(filename_edge, output)\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": 9,
"id": "6a3f8c81",
"metadata": {},
"outputs": [],
"source": [
"# directory = r\"train_test_sw/train_sw_cartoon\"\n",
"# subdirs = [r\"/Tomato\", r\"/Lemon\", r\"/Beech\", r\"/Mean\", r\"/Gardenia\"]\n",
"\n",
"# json_entries = []\n",
"\n",
"# for sub in subdirs:\n",
"# path = directory + sub\n",
" \n",
"# for filename in os.listdir(path):\n",
"# f = os.path.join(path, filename)\n",
" \n",
"# if os.path.isfile(f):\n",
"# img = cv.imread(f)\n",
"\n",
"# edges1 = cv.bitwise_not(cv.Canny(img, 100, 200)) # for thin edges and inverting the mask obatined\n",
"# gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\n",
"# gray = cv.medianBlur(gray, 5) # applying median blur with kernel size of 5\n",
"# edges2 = cv.adaptiveThreshold(gray, 255, cv.ADAPTIVE_THRESH_MEAN_C, cv.THRESH_BINARY, 7, 7) # thick edges\n",
"# dst = cv.edgePreservingFilter(img, flags=2, sigma_s=20, sigma_r=0.1) # you can also use bilateral filter but that is slow\n",
"# # flag = 1 for RECURS_FILTER (Recursive Filtering) and 2 for NORMCONV_FILTER (Normalized Convolution). NORMCONV_FILTER produces sharpening of the edges but is slower.\n",
"# # sigma_s controls the size of the neighborhood. Range 1 - 200\n",
"# # sigma_r controls the how dissimilar colors within the neighborhood will be averaged. A larger sigma_r results in large regions of constant color. Range 0 - 1\n",
"# cartoon = cv.bitwise_and(dst, dst, mask=edges1) # adding thin edges to smoothened imag\n",
"\n",
"\n",
"# filename_edge = f[:-4] + '_cartoon.png'\n",
"# #final_edge = fix_float_img(adjusted)\n",
"# cv.imwrite(filename_edge, cartoon)\n"
]
},
{
"attachments": {},
"cell_type": "markdown",
"id": "0c0cd453",
"metadata": {},
"source": [
"## Data"
]
},
{
"cell_type": "code",
"execution_count": 10,
"id": "c4f0f653",
"metadata": {},
"outputs": [],
"source": [
"import sys\n",
"import subprocess\n",
"import pkg_resources\n",
"import numpy as np\n",
"peachy = []\n",
"\n",
"required = { 'scikit-image'}\n",
"installed = {pkg.key for pkg in pkg_resources.working_set}\n",
"missing = required - installed\n",
"\n",
"if missing: \n",
" python = sys.executable\n",
" subprocess.check_call([python, '-m', 'pip', 'install', *missing], stdout=subprocess.DEVNULL)\n",
"\n",
"def load_train_data(input_dir, newSize=(227,227)):\n",
" import numpy as np\n",
" import pandas as pd\n",
" import os\n",
" from skimage.io import imread\n",
" import cv2 as cv\n",
" from pathlib import Path\n",
" import random\n",
" from shutil import copyfile, rmtree\n",
" import json\n",
"\n",
" import seaborn as sns\n",
" import matplotlib.pyplot as plt\n",
"\n",
" import matplotlib\n",
" \n",
" image_dir = Path(input_dir)\n",
" categories_name = []\n",
" for file in os.listdir(image_dir):\n",
" d = os.path.join(image_dir, file)\n",
" if os.path.isdir(d):\n",
" categories_name.append(file)\n",
"\n",
" folders = [directory for directory in image_dir.iterdir() if directory.is_dir()]\n",
"\n",
" train_img = []\n",
" categories_count=[]\n",
" labels=[]\n",
" for i, direc in enumerate(folders):\n",
" count = 0\n",
" \n",
" for obj in direc.iterdir():\n",
" if os.path.isfile(obj) and os.path.basename(os.path.normpath(obj)) != 'desktop.ini':\n",
" labels.append(os.path.basename(os.path.normpath(direc)))\n",
" count += 1\n",
" img = imread(obj)#zwraca ndarry postaci xSize x ySize x colorDepth\n",
" if img.shape[-1] == 256:\n",
" img = np.repeat(img[..., np.newaxis], 3, axis=2)\n",
" elif img.shape[-1] == 4:\n",
" img = img[:, :, :3]\n",
" img = cv.resize(img, newSize, interpolation=cv.INTER_AREA)# zwraca ndarray\n",
" img = img / 255#normalizacja\n",
" train_img.append(img)\n",
" categories_count.append(count)\n",
" X={}\n",
" X[\"values\"] = np.array(train_img)\n",
" X[\"categories_name\"] = categories_name\n",
" X[\"categories_count\"] = categories_count\n",
" X[\"labels\"]=labels\n",
" return X\n",
"\n",
"def load_test_data(input_dir, newSize=(227,227)):\n",
" import numpy as np\n",
" import pandas as pd\n",
" import os\n",
" from skimage.io import imread\n",
" import cv2 as cv\n",
" from pathlib import Path\n",
" import random\n",
" from shutil import copyfile, rmtree\n",
" import json\n",
"\n",
" import seaborn as sns\n",
" import matplotlib.pyplot as plt\n",
"\n",
" import matplotlib\n",
"\n",
" image_path = Path(input_dir)\n",
"\n",
" labels_path = image_path.parents[0] / 'test_labels.json'\n",
"\n",
" jsonString = labels_path.read_text()\n",
" objects = json.loads(jsonString)\n",
"\n",
" categories_name = []\n",
" categories_count=[]\n",
" count = 0\n",
" c = objects[0]['value']\n",
" for e in objects:\n",
" if e['value'] != c:\n",
" categories_count.append(count)\n",
" c = e['value']\n",
" count = 1\n",
" else:\n",
" count += 1\n",
" if not e['value'] in categories_name:\n",
" categories_name.append(e['value'])\n",
"\n",
" categories_count.append(count)\n",
" \n",
" test_img = []\n",
"\n",
" labels=[]\n",
" for e in objects:\n",
" p = image_path / e['filename']\n",
" img = imread(p)#zwraca ndarry postaci xSize x ySize x colorDepth\n",
" img = cv.resize(img, newSize, interpolation=cv.INTER_AREA)# zwraca ndarray\n",
" img = img / 255#normalizacja\n",
" test_img.append(img)\n",
" labels.append(e['value'])\n",
"\n",
" X={}\n",
" X[\"values\"] = np.array(test_img)\n",
" X[\"categories_name\"] = categories_name\n",
" X[\"categories_count\"] = categories_count\n",
" X[\"labels\"]=labels\n",
" return X\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": 12,
"id": "b0dceacc",
"metadata": {},
"outputs": [],
"source": [
"def data_prep_alex(filter_name):\n",
" from sklearn.model_selection import train_test_split\n",
" from sklearn.preprocessing import LabelEncoder\n",
" import tensorflow as tf\n",
"\n",
" data_train = load_train_data(f\"./train_test_sw/train_sw_{filter_name}\")\n",
" values_train = data_train['values']\n",
" labels_train = data_train['labels']\n",
" data_test = load_test_data(\"./train_test_sw/test_sw\")\n",
" X_test = data_test['values']\n",
" y_test = data_test['labels']\n",
"\n",
" X_train, X_validate, y_train, y_validate = train_test_split(values_train, labels_train, test_size=0.2, random_state=42)\n",
"\n",
" class_le = LabelEncoder()\n",
" y_train_enc = class_le.fit_transform(y_train)\n",
" y_validate_enc = class_le.fit_transform(y_validate)\n",
" y_test_enc = class_le.fit_transform(y_test)\n",
"\n",
" train_ds = tf.data.Dataset.from_tensor_slices((X_train, y_train_enc))\n",
" validation_ds = tf.data.Dataset.from_tensor_slices((X_validate, y_validate_enc))\n",
" test_ds = tf.data.Dataset.from_tensor_slices((X_test, y_test_enc))\n",
"\n",
" train_ds_size = tf.data.experimental.cardinality(train_ds).numpy()\n",
" test_ds_size = tf.data.experimental.cardinality(test_ds).numpy()\n",
" validation_ds_size = tf.data.experimental.cardinality(validation_ds).numpy()\n",
"\n",
" train_ds = (train_ds\n",
" .shuffle(buffer_size=train_ds_size)\n",
" .batch(batch_size=32, drop_remainder=True))\n",
" test_ds = (test_ds\n",
" .shuffle(buffer_size=train_ds_size)\n",
" .batch(batch_size=32, drop_remainder=True))\n",
" validation_ds = (validation_ds\n",
" .shuffle(buffer_size=train_ds_size)\n",
" .batch(batch_size=32, drop_remainder=True))\n",
"\n",
" return train_ds, test_ds, validation_ds\n",
" "
]
},
{
"attachments": {},
"cell_type": "markdown",
"id": "b1aa7ac3",
"metadata": {},
"source": [
"### Emboss"
]
},
{
"cell_type": "code",
"execution_count": 15,
"id": "adf27f44",
"metadata": {},
"outputs": [],
"source": [
"# train_ds\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "5333a4e6",
"metadata": {},
"outputs": [],
"source": [
"\n"
]
},
{
"cell_type": "code",
"execution_count": 16,
"id": "b4ff7bba",
"metadata": {},
"outputs": [],
"source": [
"# train_ds_size = tf.data.experimental.cardinality(train_ds).numpy()\n",
"# test_ds_size = tf.data.experimental.cardinality(test_ds).numpy()\n",
"# validation_ds_size = tf.data.experimental.cardinality(validation_ds).numpy()\n",
"# print(\"Training data size:\", train_ds_size)\n",
"# print(\"Test data size:\", test_ds_size)\n",
"# print(\"Validation data size:\", validation_ds_size)"
]
},
{
"cell_type": "code",
"execution_count": 17,
"id": "5dd609ea",
"metadata": {},
"outputs": [],
"source": [
"# alexnet.compile(loss='sparse_categorical_crossentropy', optimizer=tf.optimizers.SGD(lr=.001), metrics=['accuracy'])\n",
"# alexnet.summary()"
]
},
{
"cell_type": "code",
"execution_count": 18,
"id": "220824d7",
"metadata": {},
"outputs": [],
"source": [
"\n",
"# checkpoint = ModelCheckpoint(\"alex_2.h5\", monitor='val_accuracy', verbose=1, save_best_only=True, save_weights_only=False, mode='auto', period=1)\n",
"# early = EarlyStopping(monitor='val_accuracy', min_delta=0, patience=20, verbose=1, mode='auto')\n",
"\n",
"# alex = alexnet.fit_generator(\n",
"# steps_per_epoch=len(train_ds), \n",
"# generator=train_ds, \n",
"# validation_data= validation_ds, \n",
"# validation_steps=len(validation_ds), \n",
"# epochs=25, \n",
"# callbacks=[checkpoint,early])"
]
},
{
"cell_type": "code",
"execution_count": 19,
"id": "f87a88e9",
"metadata": {},
"outputs": [],
"source": [
"# model_flat_drop.fit(train_ds,\n",
"# epochs=100,\n",
"# validation_data=validation_ds,\n",
"# validation_freq=1,\n",
"# callbacks=[tensorboard_cb])"
]
},
{
"attachments": {},
"cell_type": "markdown",
"id": "f7a72530",
"metadata": {},
"source": [
"### Saturacja"
]
},
{
"cell_type": "code",
"execution_count": 20,
"id": "6148789c",
"metadata": {},
"outputs": [],
"source": [
"# from sklearn.preprocessing import LabelEncoder\n",
"\n",
"# # Data load\n",
"# data_train = load_train_data(\"train_test_sw_kontrast/train_sw\", newSize=(16,16))\n",
"# X_train = data_train['values']\n",
"# y_train = data_train['labels']\n",
"\n",
"# data_test = load_test_data(\"train_test_sw_kontrast/test_sw\", newSize=(16,16))\n",
"# X_test = data_test['values']\n",
"# y_test = data_test['labels']\n",
"\n",
"# class_le = LabelEncoder()\n",
"# y_train_enc = class_le.fit_transform(y_train)\n",
"# y_test_enc = class_le.fit_transform(y_test)\n",
"\n",
"# X_train = X_train.flatten().reshape(X_train.shape[0], int(np.prod(X_train.shape) / X_train.shape[0]))\n",
"# X_test = X_test.flatten().reshape(X_test.shape[0], int(np.prod(X_test.shape) / X_test.shape[0]))"
]
},
{
"attachments": {},
"cell_type": "markdown",
"id": "47d3b363",
"metadata": {},
"source": [
"# ALEXNET"
]
},
{
"cell_type": "code",
"execution_count": 23,
"id": "108a46e4",
"metadata": {},
"outputs": [],
"source": [
"filters = ['laplasian', 'kontrast', 'cartoon', 'saturacja', 'emboss']"
]
},
{
"cell_type": "code",
"execution_count": 24,
"id": "12a16bca",
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"WARNING:absl:`lr` is deprecated, please use `learning_rate` instead, or use the legacy optimizer, e.g.,tf.keras.optimizers.legacy.SGD.\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Model: \"sequential\"\n",
"_________________________________________________________________\n",
" Layer (type) Output Shape Param # \n",
"=================================================================\n",
" conv2d (Conv2D) (None, 55, 55, 96) 34944 \n",
" \n",
" batch_normalization (BatchN (None, 55, 55, 96) 384 \n",
" ormalization) \n",
" \n",
" max_pooling2d (MaxPooling2D (None, 27, 27, 96) 0 \n",
" ) \n",
" \n",
" conv2d_1 (Conv2D) (None, 27, 27, 256) 614656 \n",
" \n",
" batch_normalization_1 (Batc (None, 27, 27, 256) 1024 \n",
" hNormalization) \n",
" \n",
" max_pooling2d_1 (MaxPooling (None, 13, 13, 256) 0 \n",
" 2D) \n",
" \n",
" conv2d_2 (Conv2D) (None, 13, 13, 384) 885120 \n",
" \n",
" batch_normalization_2 (Batc (None, 13, 13, 384) 1536 \n",
" hNormalization) \n",
" \n",
" conv2d_3 (Conv2D) (None, 13, 13, 384) 1327488 \n",
" \n",
" batch_normalization_3 (Batc (None, 13, 13, 384) 1536 \n",
" hNormalization) \n",
" \n",
" conv2d_4 (Conv2D) (None, 13, 13, 256) 884992 \n",
" \n",
" batch_normalization_4 (Batc (None, 13, 13, 256) 1024 \n",
" hNormalization) \n",
" \n",
" max_pooling2d_2 (MaxPooling (None, 6, 6, 256) 0 \n",
" 2D) \n",
" \n",
" flatten (Flatten) (None, 9216) 0 \n",
" \n",
" dense (Dense) (None, 4096) 37752832 \n",
" \n",
" dropout (Dropout) (None, 4096) 0 \n",
" \n",
" dense_1 (Dense) (None, 4096) 16781312 \n",
" \n",
" dropout_1 (Dropout) (None, 4096) 0 \n",
" \n",
" dense_2 (Dense) (None, 10) 40970 \n",
" \n",
"=================================================================\n",
"Total params: 58,327,818\n",
"Trainable params: 58,325,066\n",
"Non-trainable params: 2,752\n",
"_________________________________________________________________\n",
"WARNING:tensorflow:`period` argument is deprecated. Please use `save_freq` to specify the frequency in number of batches seen.\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"WARNING:tensorflow:`period` argument is deprecated. Please use `save_freq` to specify the frequency in number of batches seen.\n",
"/var/folders/6b/j4d60ym516x2s6wymzj707rh0000gn/T/ipykernel_35367/157534861.py:34: UserWarning: `Model.fit_generator` is deprecated and will be removed in a future version. Please use `Model.fit`, which supports generators.\n",
" alex = alexnet.fit_generator(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Epoch 1/25\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"2023-01-11 15:57:37.093304: W tensorflow/tsl/platform/profile_utils/cpu_utils.cc:128] Failed to get CPU frequency: 0 Hz\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"51/51 [==============================] - ETA: 0s - loss: 3.5934 - accuracy: 0.3903\n",
"Epoch 1: val_accuracy improved from -inf to 0.24740, saving model to alex_2.h5\n",
"51/51 [==============================] - 46s 888ms/step - loss: 3.5934 - accuracy: 0.3903 - val_loss: 1.9219 - val_accuracy: 0.2474\n",
"Epoch 2/25\n",
"51/51 [==============================] - ETA: 0s - loss: 1.2680 - accuracy: 0.5699\n",
"Epoch 2: val_accuracy did not improve from 0.24740\n",
"51/51 [==============================] - 45s 890ms/step - loss: 1.2680 - accuracy: 0.5699 - val_loss: 2.9384 - val_accuracy: 0.2370\n",
"Epoch 3/25\n",
"51/51 [==============================] - ETA: 0s - loss: 0.8801 - accuracy: 0.6930\n",
"Epoch 3: val_accuracy did not improve from 0.24740\n",
"51/51 [==============================] - 51s 1s/step - loss: 0.8801 - accuracy: 0.6930 - val_loss: 4.2987 - val_accuracy: 0.2318\n",
"Epoch 4/25\n",
"51/51 [==============================] - ETA: 0s - loss: 0.8070 - accuracy: 0.7181\n",
"Epoch 4: val_accuracy improved from 0.24740 to 0.27604, saving model to alex_2.h5\n",
"51/51 [==============================] - 47s 925ms/step - loss: 0.8070 - accuracy: 0.7181 - val_loss: 5.2133 - val_accuracy: 0.2760\n",
"Epoch 5/25\n",
"51/51 [==============================] - ETA: 0s - loss: 0.6284 - accuracy: 0.7714\n",
"Epoch 5: val_accuracy improved from 0.27604 to 0.28906, saving model to alex_2.h5\n",
"51/51 [==============================] - 52s 1s/step - loss: 0.6284 - accuracy: 0.7714 - val_loss: 5.1982 - val_accuracy: 0.2891\n",
"Epoch 6/25\n",
"51/51 [==============================] - ETA: 0s - loss: 0.5519 - accuracy: 0.7996\n",
"Epoch 6: val_accuracy improved from 0.28906 to 0.34635, saving model to alex_2.h5\n",
"51/51 [==============================] - 47s 925ms/step - loss: 0.5519 - accuracy: 0.7996 - val_loss: 5.3340 - val_accuracy: 0.3464\n",
"Epoch 7/25\n",
"51/51 [==============================] - ETA: 0s - loss: 0.5127 - accuracy: 0.8205\n",
"Epoch 7: val_accuracy did not improve from 0.34635\n",
"51/51 [==============================] - 48s 934ms/step - loss: 0.5127 - accuracy: 0.8205 - val_loss: 4.6689 - val_accuracy: 0.3307\n",
"Epoch 8/25\n",
"51/51 [==============================] - ETA: 0s - loss: 0.4584 - accuracy: 0.8364\n",
"Epoch 8: val_accuracy improved from 0.34635 to 0.34896, saving model to alex_2.h5\n",
"51/51 [==============================] - 48s 939ms/step - loss: 0.4584 - accuracy: 0.8364 - val_loss: 4.0851 - val_accuracy: 0.3490\n",
"Epoch 9/25\n",
"51/51 [==============================] - ETA: 0s - loss: 0.3952 - accuracy: 0.8585\n",
"Epoch 9: val_accuracy improved from 0.34896 to 0.39844, saving model to alex_2.h5\n",
"51/51 [==============================] - 49s 955ms/step - loss: 0.3952 - accuracy: 0.8585 - val_loss: 2.6378 - val_accuracy: 0.3984\n",
"Epoch 10/25\n",
"51/51 [==============================] - ETA: 0s - loss: 0.3141 - accuracy: 0.8811\n",
"Epoch 10: val_accuracy improved from 0.39844 to 0.43750, saving model to alex_2.h5\n",
"51/51 [==============================] - 48s 940ms/step - loss: 0.3141 - accuracy: 0.8811 - val_loss: 2.3606 - val_accuracy: 0.4375\n",
"Epoch 11/25\n",
"51/51 [==============================] - ETA: 0s - loss: 0.2889 - accuracy: 0.8922\n",
"Epoch 11: val_accuracy improved from 0.43750 to 0.65625, saving model to alex_2.h5\n",
"51/51 [==============================] - 48s 949ms/step - loss: 0.2889 - accuracy: 0.8922 - val_loss: 1.1387 - val_accuracy: 0.6562\n",
"Epoch 12/25\n",
"51/51 [==============================] - ETA: 0s - loss: 0.2696 - accuracy: 0.8977\n",
"Epoch 12: val_accuracy did not improve from 0.65625\n",
"51/51 [==============================] - 48s 933ms/step - loss: 0.2696 - accuracy: 0.8977 - val_loss: 1.1794 - val_accuracy: 0.6328\n",
"Epoch 13/25\n",
"51/51 [==============================] - ETA: 0s - loss: 0.2124 - accuracy: 0.9271\n",
"Epoch 13: val_accuracy improved from 0.65625 to 0.83073, saving model to alex_2.h5\n",
"51/51 [==============================] - 50s 973ms/step - loss: 0.2124 - accuracy: 0.9271 - val_loss: 0.4526 - val_accuracy: 0.8307\n",
"Epoch 14/25\n",
"51/51 [==============================] - ETA: 0s - loss: 0.1891 - accuracy: 0.9228\n",
"Epoch 14: val_accuracy did not improve from 0.83073\n",
"51/51 [==============================] - 50s 981ms/step - loss: 0.1891 - accuracy: 0.9228 - val_loss: 0.5985 - val_accuracy: 0.7943\n",
"Epoch 15/25\n",
"51/51 [==============================] - ETA: 0s - loss: 0.1603 - accuracy: 0.9381\n",
"Epoch 15: val_accuracy improved from 0.83073 to 0.83333, saving model to alex_2.h5\n",
"51/51 [==============================] - 50s 983ms/step - loss: 0.1603 - accuracy: 0.9381 - val_loss: 0.4779 - val_accuracy: 0.8333\n",
"Epoch 16/25\n",
"51/51 [==============================] - ETA: 0s - loss: 0.1852 - accuracy: 0.9314\n",
"Epoch 16: val_accuracy improved from 0.83333 to 0.86979, saving model to alex_2.h5\n",
"51/51 [==============================] - 49s 962ms/step - loss: 0.1852 - accuracy: 0.9314 - val_loss: 0.3588 - val_accuracy: 0.8698\n",
"Epoch 17/25\n",
"51/51 [==============================] - ETA: 0s - loss: 0.1484 - accuracy: 0.9504\n",
"Epoch 17: val_accuracy improved from 0.86979 to 0.87500, saving model to alex_2.h5\n",
"51/51 [==============================] - 49s 963ms/step - loss: 0.1484 - accuracy: 0.9504 - val_loss: 0.3464 - val_accuracy: 0.8750\n",
"Epoch 18/25\n",
"51/51 [==============================] - ETA: 0s - loss: 0.1367 - accuracy: 0.9534\n",
"Epoch 18: val_accuracy did not improve from 0.87500\n",
"51/51 [==============================] - 49s 962ms/step - loss: 0.1367 - accuracy: 0.9534 - val_loss: 0.4452 - val_accuracy: 0.8464\n",
"Epoch 19/25\n",
"51/51 [==============================] - ETA: 0s - loss: 0.1089 - accuracy: 0.9638\n",
"Epoch 19: val_accuracy improved from 0.87500 to 0.89062, saving model to alex_2.h5\n",
"51/51 [==============================] - 49s 953ms/step - loss: 0.1089 - accuracy: 0.9638 - val_loss: 0.3376 - val_accuracy: 0.8906\n",
"Epoch 20/25\n",
"51/51 [==============================] - ETA: 0s - loss: 0.1115 - accuracy: 0.9596\n",
"Epoch 20: val_accuracy did not improve from 0.89062\n",
"51/51 [==============================] - 49s 954ms/step - loss: 0.1115 - accuracy: 0.9596 - val_loss: 0.3655 - val_accuracy: 0.8854\n",
"Epoch 21/25\n",
"51/51 [==============================] - ETA: 0s - loss: 0.0793 - accuracy: 0.9681\n",
"Epoch 21: val_accuracy did not improve from 0.89062\n",
"51/51 [==============================] - 48s 949ms/step - loss: 0.0793 - accuracy: 0.9681 - val_loss: 0.4086 - val_accuracy: 0.8776\n",
"Epoch 22/25\n",
"51/51 [==============================] - ETA: 0s - loss: 0.0725 - accuracy: 0.9767\n",
"Epoch 22: val_accuracy improved from 0.89062 to 0.90365, saving model to alex_2.h5\n",
"51/51 [==============================] - 49s 958ms/step - loss: 0.0725 - accuracy: 0.9767 - val_loss: 0.2975 - val_accuracy: 0.9036\n",
"Epoch 23/25\n",
"51/51 [==============================] - ETA: 0s - loss: 0.0727 - accuracy: 0.9755\n",
"Epoch 23: val_accuracy did not improve from 0.90365\n",
"51/51 [==============================] - 49s 957ms/step - loss: 0.0727 - accuracy: 0.9755 - val_loss: 0.4552 - val_accuracy: 0.8698\n",
"Epoch 24/25\n",
"51/51 [==============================] - ETA: 0s - loss: 0.0659 - accuracy: 0.9737\n",
"Epoch 24: val_accuracy did not improve from 0.90365\n",
"51/51 [==============================] - 49s 952ms/step - loss: 0.0659 - accuracy: 0.9737 - val_loss: 0.3930 - val_accuracy: 0.8854\n",
"Epoch 25/25\n",
"51/51 [==============================] - ETA: 0s - loss: 0.0693 - accuracy: 0.9816\n",
"Epoch 25: val_accuracy did not improve from 0.90365\n",
"51/51 [==============================] - 50s 980ms/step - loss: 0.0693 - accuracy: 0.9816 - val_loss: 0.6543 - val_accuracy: 0.8177\n"
]
},
{
"data": {
"image/png": "iVBORw0KGgoAAAANSUhEUgAAAi8AAAHHCAYAAAB3K7g2AAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjYuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8o6BhiAAAACXBIWXMAAA9hAAAPYQGoP6dpAACS3ElEQVR4nOzdd3wUdf7H8dds3/ROEgi9dwVFQIqCAioKKk0REFBPseudP89+51nOLrbTQxAbioodERGQA6RKL9JLCJCebLJ95/fHZpeEBEggyWySz/Px2Mfuzs7OfHaJ5p1vG0VVVRUhhBBCiDpCp3UBQgghhBBVIeFFCCGEEHWKhBchhBBC1CkSXoQQQghRp0h4EUIIIUSdIuFFCCGEEHWKhBchhBBC1CkSXoQQQghRp0h4EUIIIUSdIuFFiFqgKApPPvlkld+3f/9+FEVh1qxZ1V6TOHtn++9ZFQMHDmTgwIE1cuwlS5agKApLliypkeMLUdMkvIgGY9asWSiKgqIo/O9//yv3uqqqpKWloSgKV111lQYVCiGEqAwJL6LBsVgsfPLJJ+W2L126lMOHD2M2mzWoSoja079/f+x2O/3799e6FCHOioQX0eBcccUVzJ07F4/HU2b7J598Qo8ePUhOTtaosoajqKhI6xIaNJ1Oh8ViQaeTXwGibpKfXNHgjBs3juzsbBYuXBjc5nK5+OKLL7jhhhsqfE9RUREPPPAAaWlpmM1m2rVrx4svvsjJF2V3Op3cd999JCYmEhkZydVXX83hw4crPGZ6ejqTJ0+mUaNGmM1mOnXqxPvvv39WnyknJ4cHH3yQLl26EBERQVRUFMOGDWPjxo3l9nU4HDz55JO0bdsWi8VCSkoK1157LXv27Anu4/P5eO211+jSpQsWi4XExESGDh3K2rVrgdOPxTl5PMiTTz6Joihs27aNG264gdjYWC6++GIANm3axKRJk2jZsiUWi4Xk5GQmT55MdnZ2hd/XlClTSE1NxWw206JFC26//XZcLhd79+5FURReeeWVcu9bsWIFiqLw6aefVvVrrbQDBw5wxx130K5dO6xWK/Hx8YwaNYr9+/eX2S/Qdfnbb79x2223ER8fT1RUFBMmTCA3N/e053C5XDz++OP06NGD6OhowsPD6devH4sXLy6375w5c+jRoweRkZFERUXRpUsXXnvtteDrFY15WbZsGaNGjaJp06aYzWbS0tK47777sNvtZY49adIkIiIiSE9PZ8SIEURERJCYmMiDDz6I1+ut+pcnxFkwaF2AELWtefPm9O7dm08//ZRhw4YBMH/+fPLz8xk7diyvv/56mf1VVeXqq69m8eLFTJkyhe7du7NgwQL++te/kp6eXuYX5tSpU/noo4+44YYb6NOnD7/++itXXnlluRqOHTvGRRddhKIo3HnnnSQmJjJ//nymTJlCQUEB9957b5U+0969e/n6668ZNWoULVq04NixY/znP/9hwIABbNu2jdTUVAC8Xi9XXXUVixYtYuzYsdxzzz0UFhaycOFCtmzZQqtWrQCYMmUKs2bNYtiwYUydOhWPx8OyZcv4/fff6dmzZ5VqCxg1ahRt2rThmWeeCYa+hQsXsnfvXm6++WaSk5PZunUr7777Llu3buX3339HURQAjhw5woUXXkheXh633nor7du3Jz09nS+++ILi4mJatmxJ3759+fjjj7nvvvvKnPfjjz8mMjKSa6655qzqrow1a9awYsUKxo4dS5MmTdi/fz9vv/02AwcOZNu2bYSFhZXZ/8477yQmJoYnn3ySnTt38vbbb3PgwIFgqKhIQUEB//3vfxk3bhy33HILhYWFzJgxgyFDhrB69Wq6d+8O+L/TcePGMWjQIJ5//nkAtm/fzvLly7nnnntO+Rnmzp1LcXExt99+O/Hx8axevZrp06dz+PBh5s6dW2Zfr9fLkCFD6NWrFy+++CK//PILL730Eq1ateL2228/h29SiEpShWggZs6cqQLqmjVr1DfeeEONjIxUi4uLVVVV1VGjRqmXXHKJqqqq2qxZM/XKK68Mvu/rr79WAfXpp58uc7zrr79eVRRF3b17t6qqqrphwwYVUO+4444y+91www0qoD7xxBPBbVOmTFFTUlLUrKysMvuOHTtWjY6ODta1b98+FVBnzpx52s/mcDhUr9dbZtu+fftUs9ms/uMf/whue//991VAffnll8sdw+fzqaqqqr/++qsKqHffffcp9zldXSd/1ieeeEIF1HHjxpXbN/A5S/v0009VQP3tt9+C2yZMmKDqdDp1zZo1p6zpP//5jwqo27dvD77mcrnUhIQEdeLEieXedy5O/owVfY6VK1eqgDp79uzgtsDPYI8ePVSXyxXc/u9//1sF1G+++Sa4bcCAAeqAAQOCzz0ej+p0OsucIzc3V23UqJE6efLk4LZ77rlHjYqKUj0ezynrX7x4sQqoixcvPu1nePbZZ1VFUdQDBw4Et02cOFEFyvxcqaqqnnfeeWqPHj1OeU4hqpN0G4kGafTo0djtdr7//nsKCwv5/vvvT9ll9OOPP6LX67n77rvLbH/ggQdQVZX58+cH9wPK7XdyK4qqqnz55ZcMHz4cVVXJysoK3oYMGUJ+fj7r16+v0ucxm83B8Qter5fs7GwiIiJo165dmWN9+eWXJCQkcNddd5U7RuAv/i+//BJFUXjiiSdOuc/Z+Mtf/lJum9VqDT52OBxkZWVx0UUXAQTr9vl8fP311wwfPrzCVp9ATaNHj8ZisfDxxx8HX1uwYAFZWVmMHz/+rOuujNKfw+12k52dTevWrYmJianw3/LWW2/FaDQGn99+++0YDIbgz1BF9Ho9JpMJ8H8nOTk5eDweevbsWeYcMTExFBUVlekWrepnKCoqIisriz59+qCqKn/88Ue5/U/+9+zXrx979+6t0jmFOFsSXkSDlJiYyODBg/nkk0/46quv8Hq9XH/99RXue+DAAVJTU4mMjCyzvUOHDsHXA/c6nS7Y9RLQrl27Ms8zMzPJy8vj3XffJTExsczt5ptvBuD48eNV+jw+n49XXnmFNm3aYDabSUhIIDExkU2bNpGfnx/cb8+ePbRr1w6D4dQ9xnv27CE1NZW4uLgq1XAmLVq0KLctJyeHe+65h0aNGmG1WklMTAzuF6g7MzOTgoICOnfufNrjx8TEMHz48DIzyT7++GMaN27MpZdeetr3Hj16tMzt5HEeZ2K323n88ceDY6IC339eXl6Z7z+gTZs2ZZ5HRESQkpJSbozMyT744AO6du2KxWIhPj6exMREfvjhhzLnuOOOO2jbti3Dhg2jSZMmTJ48mZ9++umMn+HgwYNMmjSJuLi44DiWAQMGAJT7DIFxUKXFxsaecdyOENVFxryIBuuGG27glltu4ejRowwbNoyYmJhaOa/P5wNg/PjxTJw4scJ9unbtWqVjPvPMMzz22GNMnjyZf/7zn8TFxaHT6bj33nuD56tOp2qBOd2AzdJ/2QeMHj2aFStW8Ne//pXu3bsTERGBz+dj6NChZ1X3hAkTmDt3LitWrKBLly58++233HHHHWecVZOSklLm+cyZM5k0aVKlz3vXXXcxc+ZM7r33Xnr37k10dDSKojB27Nhq+/4/+ugjJk2axIgRI/jrX/9KUlISer2eZ599tsxg66SkJDZs2MCCBQuYP38+8+fPZ+bMmUyYMIEPPvigwmN7vV4uu+wycnJyeOihh2jfvj3h4eGkp6czadKkcp9Br9dXy2cS4mxJeBEN1siRI7ntttv4/fff+eyzz065X7Nmzfjll18oLCws0/qyY8eO4OuBe5/PF2zdCNi5c2eZ4wVmInm9XgYPHlwtn+WLL77gkksuYcaMGWW25+XlkZCQEHzeqlUrVq1ahdvtLtNtUVqrVq1YsGABOTk5p2x9iY2NDR6/tEArVGXk5uayaNEinnrqKR5//PHg9l27dpXZLzExkaioKLZs2XLGYw4dOpTExEQ+/vhjevXqRXFxMTfddNMZ33dyF0unTp0q+Sn8vvjiCyZOnMhLL70U3OZwOMp9PwG7du3ikksuCT632WxkZGRwxRVXnPYcLVu25KuvvioTHiv
"text/plain": [
"<Figure size 640x480 with 1 Axes>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"WARNING:absl:`lr` is deprecated, please use `learning_rate` instead, or use the legacy optimizer, e.g.,tf.keras.optimizers.legacy.SGD.\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Model: \"sequential_1\"\n",
"_________________________________________________________________\n",
" Layer (type) Output Shape Param # \n",
"=================================================================\n",
" conv2d_5 (Conv2D) (None, 55, 55, 96) 34944 \n",
" \n",
" batch_normalization_5 (Batc (None, 55, 55, 96) 384 \n",
" hNormalization) \n",
" \n",
" max_pooling2d_3 (MaxPooling (None, 27, 27, 96) 0 \n",
" 2D) \n",
" \n",
" conv2d_6 (Conv2D) (None, 27, 27, 256) 614656 \n",
" \n",
" batch_normalization_6 (Batc (None, 27, 27, 256) 1024 \n",
" hNormalization) \n",
" \n",
" max_pooling2d_4 (MaxPooling (None, 13, 13, 256) 0 \n",
" 2D) \n",
" \n",
" conv2d_7 (Conv2D) (None, 13, 13, 384) 885120 \n",
" \n",
" batch_normalization_7 (Batc (None, 13, 13, 384) 1536 \n",
" hNormalization) \n",
" \n",
" conv2d_8 (Conv2D) (None, 13, 13, 384) 1327488 \n",
" \n",
" batch_normalization_8 (Batc (None, 13, 13, 384) 1536 \n",
" hNormalization) \n",
" \n",
" conv2d_9 (Conv2D) (None, 13, 13, 256) 884992 \n",
" \n",
" batch_normalization_9 (Batc (None, 13, 13, 256) 1024 \n",
" hNormalization) \n",
" \n",
" max_pooling2d_5 (MaxPooling (None, 6, 6, 256) 0 \n",
" 2D) \n",
" \n",
" flatten_1 (Flatten) (None, 9216) 0 \n",
" \n",
" dense_3 (Dense) (None, 4096) 37752832 \n",
" \n",
" dropout_2 (Dropout) (None, 4096) 0 \n",
" \n",
" dense_4 (Dense) (None, 4096) 16781312 \n",
" \n",
" dropout_3 (Dropout) (None, 4096) 0 \n",
" \n",
" dense_5 (Dense) (None, 10) 40970 \n",
" \n",
"=================================================================\n",
"Total params: 58,327,818\n",
"Trainable params: 58,325,066\n",
"Non-trainable params: 2,752\n",
"_________________________________________________________________\n",
"WARNING:tensorflow:`period` argument is deprecated. Please use `save_freq` to specify the frequency in number of batches seen.\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"WARNING:tensorflow:`period` argument is deprecated. Please use `save_freq` to specify the frequency in number of batches seen.\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Epoch 1/25\n",
"51/51 [==============================] - ETA: 0s - loss: 3.6077 - accuracy: 0.3566\n",
"Epoch 1: val_accuracy improved from -inf to 0.26302, saving model to alex_2.h5\n",
"51/51 [==============================] - 51s 994ms/step - loss: 3.6077 - accuracy: 0.3566 - val_loss: 1.8337 - val_accuracy: 0.2630\n",
"Epoch 2/25\n",
"51/51 [==============================] - ETA: 0s - loss: 1.2408 - accuracy: 0.5803\n",
"Epoch 2: val_accuracy improved from 0.26302 to 0.34375, saving model to alex_2.h5\n",
"51/51 [==============================] - 53s 1s/step - loss: 1.2408 - accuracy: 0.5803 - val_loss: 2.8576 - val_accuracy: 0.3438\n",
"Epoch 3/25\n",
"51/51 [==============================] - ETA: 0s - loss: 0.9538 - accuracy: 0.6550\n",
"Epoch 3: val_accuracy improved from 0.34375 to 0.35677, saving model to alex_2.h5\n",
"51/51 [==============================] - 56s 1s/step - loss: 0.9538 - accuracy: 0.6550 - val_loss: 4.7057 - val_accuracy: 0.3568\n",
"Epoch 4/25\n",
"44/51 [========================>.....] - ETA: 7s - loss: 0.7304 - accuracy: 0.7273"
]
}
],
"source": [
"data_test = load_test_data(\"./train_test_sw/test_sw\")\n",
"X_test = data_test['values']\n",
"y_test = data_test['labels']\n",
"for filter in filters:\n",
" print(f\"{filter} ---------------------------------------\")\n",
" train_ds, test_ds, validation_ds = data_prep_alex(filter)\n",
" alex(filter, train_ds, test_ds, validation_ds)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.6"
},
"vscode": {
"interpreter": {
"hash": "31f2aee4e71d21fbe5cf8b01ff0e069b9275f58929596ceb00d14d90e3e16cd6"
}
}
},
"nbformat": 4,
"nbformat_minor": 5
}