Symulowanie-wizualne/sw_lab7.ipynb

1036 lines
7.4 MiB
Plaintext
Raw Permalink Normal View History

2022-12-10 00:31:18 +01:00
{
"cells": [
{
"cell_type": "markdown",
"id": "b3f048a4",
"metadata": {},
"source": [
"# Zadanie 7 - Alexnet\n",
"### Aleksandra Jonas, Aleksandra Gronowska, Iwona Christop"
]
},
{
"cell_type": "markdown",
"id": "554d48da",
"metadata": {},
"source": [
"## Przygotowanie danych"
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "9c1fa435",
"metadata": {},
"outputs": [],
"source": [
"from IPython.display import Image, SVG, display"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "2fe63b50",
"metadata": {},
"outputs": [],
"source": [
"import sys\n",
"import subprocess\n",
"import pkg_resources\n",
"import numpy as np\n",
"\n",
"required = { 'scikit-image'}\n",
"installed = {pkg.key for pkg in pkg_resources.working_set}\n",
"missing = required - installed\n",
"# Alexnet requires images to be of dim = (227, 227, 3)\n",
"newSize = (227,227)\n",
"\n",
"if missing: \n",
" python = sys.executable\n",
" subprocess.check_call([python, '-m', 'pip', 'install', *missing], stdout=subprocess.DEVNULL)\n",
"\n",
"def load_train_data(input_dir):\n",
" import numpy as np\n",
" import pandas as pd\n",
" import os\n",
" from skimage.io import imread\n",
" import cv2 as cv\n",
" from pathlib import Path\n",
" import random\n",
" from shutil import copyfile, rmtree\n",
" import json\n",
"\n",
" import seaborn as sns\n",
" import matplotlib.pyplot as plt\n",
"\n",
" import matplotlib\n",
" \n",
" image_dir = Path(input_dir)\n",
" categories_name = []\n",
" for file in os.listdir(image_dir):\n",
" d = os.path.join(image_dir, file)\n",
" if os.path.isdir(d):\n",
" categories_name.append(file)\n",
"\n",
" folders = [directory for directory in image_dir.iterdir() if directory.is_dir()]\n",
"\n",
" train_img = []\n",
" categories_count=[]\n",
" labels=[]\n",
" for i, direc in enumerate(folders):\n",
" count = 0\n",
" for obj in direc.iterdir():\n",
" if os.path.isfile(obj) and os.path.basename(os.path.normpath(obj)) != 'desktop.ini':\n",
" labels.append(os.path.basename(os.path.normpath(direc)))\n",
" count += 1\n",
" img = imread(obj)#zwraca ndarry postaci xSize x ySize x colorDepth\n",
" img = img[:, :, :3]\n",
" img = cv.resize(img, newSize, interpolation=cv.INTER_AREA)# zwraca ndarray\n",
" img = img / 255 #normalizacja\n",
" train_img.append(img)\n",
" categories_count.append(count)\n",
" X={}\n",
" X[\"values\"] = np.array(train_img)\n",
" X[\"categories_name\"] = categories_name\n",
" X[\"categories_count\"] = categories_count\n",
" X[\"labels\"]=labels\n",
" return X\n",
"\n",
"def load_test_data(input_dir):\n",
" import numpy as np\n",
" import pandas as pd\n",
" import os\n",
" from skimage.io import imread\n",
" import cv2 as cv\n",
" from pathlib import Path\n",
" import random\n",
" from shutil import copyfile, rmtree\n",
" import json\n",
"\n",
" import seaborn as sns\n",
" import matplotlib.pyplot as plt\n",
"\n",
" import matplotlib\n",
"\n",
" image_path = Path(input_dir)\n",
"\n",
" labels_path = image_path.parents[0] / 'test_labels.json'\n",
"\n",
" jsonString = labels_path.read_text()\n",
" objects = json.loads(jsonString)\n",
"\n",
" categories_name = []\n",
" categories_count=[]\n",
" count = 0\n",
" c = objects[0]['value']\n",
" for e in objects:\n",
" if e['value'] != c:\n",
" categories_count.append(count)\n",
" c = e['value']\n",
" count = 1\n",
" else:\n",
" count += 1\n",
" if not e['value'] in categories_name:\n",
" categories_name.append(e['value'])\n",
"\n",
" categories_count.append(count)\n",
" \n",
" test_img = []\n",
"\n",
" labels=[]\n",
" for e in objects:\n",
" p = image_path / e['filename']\n",
" img = imread(p)#zwraca ndarry postaci xSize x ySize x colorDepth\n",
" img = img[:, :, :3]\n",
" img = cv.resize(img, newSize, interpolation=cv.INTER_AREA)# zwraca ndarray\n",
" img = img / 255#normalizacja\n",
" test_img.append(img)\n",
" labels.append(e['value'])\n",
"\n",
" X={}\n",
" X[\"values\"] = np.array(test_img)\n",
" X[\"categories_name\"] = categories_name\n",
" X[\"categories_count\"] = categories_count\n",
" X[\"labels\"]=labels\n",
" return X"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "cc941c5a",
"metadata": {},
"outputs": [],
"source": [
"# Data load\n",
"data_train = load_train_data(\"./train_test_sw/train_sw\")\n",
"values_train = data_train['values']\n",
"labels_train = data_train['labels']\n",
"\n",
"data_test = load_test_data(\"./train_test_sw/test_sw\")\n",
"X_test = data_test['values']\n",
"y_test = data_test['labels']"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "25040ac9",
"metadata": {},
"outputs": [],
"source": [
"from sklearn.model_selection import train_test_split"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "18d44949",
"metadata": {},
"outputs": [],
"source": [
"X_train, X_validate, y_train, y_validate = train_test_split(values_train, labels_train, test_size=0.2, random_state=42)"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "a1fe47e6",
"metadata": {},
"outputs": [],
"source": [
"from sklearn.preprocessing import LabelEncoder"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "d90af799",
"metadata": {},
"outputs": [],
"source": [
"class_le = LabelEncoder()\n",
"y_train_enc = class_le.fit_transform(y_train)\n",
"y_validate_enc = class_le.fit_transform(y_validate)\n",
"y_test_enc = class_le.fit_transform(y_test)"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "c2323985",
"metadata": {},
"outputs": [],
"source": [
"import tensorflow as tf"
]
},
{
"cell_type": "code",
"execution_count": 9,
"id": "dfe674dc",
"metadata": {},
"outputs": [],
"source": [
"train_ds = tf.data.Dataset.from_tensor_slices((X_train, y_train_enc))\n",
"validation_ds = tf.data.Dataset.from_tensor_slices((X_validate, y_validate_enc))\n",
"test_ds = tf.data.Dataset.from_tensor_slices((X_test, y_test_enc))"
]
},
{
"cell_type": "code",
"execution_count": 10,
"id": "076c8ac9",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Training data size: 820\n",
"Test data size: 259\n",
"Validation data size: 206\n"
]
}
],
"source": [
"train_ds_size = tf.data.experimental.cardinality(train_ds).numpy()\n",
"test_ds_size = tf.data.experimental.cardinality(test_ds).numpy()\n",
"validation_ds_size = tf.data.experimental.cardinality(validation_ds).numpy()\n",
"print(\"Training data size:\", train_ds_size)\n",
"print(\"Test data size:\", test_ds_size)\n",
"print(\"Validation data size:\", validation_ds_size)"
]
},
{
"cell_type": "code",
"execution_count": 11,
"id": "07ebcd4a",
"metadata": {},
"outputs": [],
"source": [
"train_ds = (train_ds\n",
" .shuffle(buffer_size=train_ds_size)\n",
" .batch(batch_size=32, drop_remainder=True))\n",
"test_ds = (test_ds\n",
" .shuffle(buffer_size=train_ds_size)\n",
" .batch(batch_size=32, drop_remainder=True))\n",
"validation_ds = (validation_ds\n",
" .shuffle(buffer_size=train_ds_size)\n",
" .batch(batch_size=32, drop_remainder=True))"
]
},
{
"cell_type": "markdown",
"id": "8ca54e00",
"metadata": {},
"source": [
"## Model 1 - batch size = 32"
]
},
{
"cell_type": "code",
"execution_count": 12,
"id": "698cb6c3",
"metadata": {},
"outputs": [],
"source": [
"from tensorflow import keras\n",
"import os\n",
"import time"
]
},
{
"cell_type": "code",
"execution_count": 13,
"id": "e200f588",
"metadata": {},
"outputs": [],
"source": [
"model = keras.models.Sequential([\n",
" keras.layers.Conv2D(filters=96, kernel_size=(11,11), strides=(4,4), activation='relu', input_shape=(227,227,3)),\n",
" keras.layers.MaxPool2D(pool_size=(3,3), strides=(2,2)),\n",
" keras.layers.Conv2D(filters=256, kernel_size=(5,5), strides=(1,1), activation='relu', padding=\"same\"),\n",
" keras.layers.MaxPool2D(pool_size=(3,3), strides=(2,2)),\n",
" keras.layers.Conv2D(filters=384, kernel_size=(3,3), strides=(1,1), activation='relu', padding=\"same\"),\n",
" keras.layers.Conv2D(filters=384, kernel_size=(3,3), strides=(1,1), activation='relu', padding=\"same\"),\n",
" keras.layers.Conv2D(filters=256, kernel_size=(3,3), strides=(1,1), activation='relu', padding=\"same\"),\n",
" keras.layers.MaxPool2D(pool_size=(3,3), strides=(2,2)),\n",
" keras.layers.Flatten(),\n",
" keras.layers.Dense(4096, activation='relu'),\n",
" keras.layers.Dense(4096, activation='relu'),\n",
" keras.layers.Dense(10, activation='softmax')\n",
"])"
]
},
{
"cell_type": "code",
"execution_count": 14,
"id": "51a068f2",
"metadata": {},
"outputs": [],
"source": [
"root_logdir = os.path.join(os.curdir, \"logs\\\\fit\\\\\")\n",
"def get_run_logdir():\n",
" run_id = time.strftime(\"run_%Y_%m_%d-%H_%M_%S\")\n",
" return os.path.join(root_logdir, run_id)\n",
"run_logdir = get_run_logdir()\n",
"tensorboard_cb = keras.callbacks.TensorBoard(run_logdir)"
]
},
{
"cell_type": "code",
"execution_count": 15,
"id": "0dc257b5",
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"WARNING:absl:`lr` is deprecated, please use `learning_rate` instead, or use the legacy optimizer, e.g.,tf.keras.optimizers.legacy.SGD.\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Model: \"sequential\"\n",
"_________________________________________________________________\n",
" Layer (type) Output Shape Param # \n",
"=================================================================\n",
" conv2d (Conv2D) (None, 55, 55, 96) 34944 \n",
" \n",
" max_pooling2d (MaxPooling2D (None, 27, 27, 96) 0 \n",
" ) \n",
" \n",
" conv2d_1 (Conv2D) (None, 27, 27, 256) 614656 \n",
" \n",
" max_pooling2d_1 (MaxPooling (None, 13, 13, 256) 0 \n",
" 2D) \n",
" \n",
" conv2d_2 (Conv2D) (None, 13, 13, 384) 885120 \n",
" \n",
" conv2d_3 (Conv2D) (None, 13, 13, 384) 1327488 \n",
" \n",
" conv2d_4 (Conv2D) (None, 13, 13, 256) 884992 \n",
" \n",
" max_pooling2d_2 (MaxPooling (None, 6, 6, 256) 0 \n",
" 2D) \n",
" \n",
" flatten (Flatten) (None, 9216) 0 \n",
" \n",
" dense (Dense) (None, 4096) 37752832 \n",
" \n",
" dense_1 (Dense) (None, 4096) 16781312 \n",
" \n",
" dense_2 (Dense) (None, 10) 40970 \n",
" \n",
"=================================================================\n",
"Total params: 58,322,314\n",
"Trainable params: 58,322,314\n",
"Non-trainable params: 0\n",
"_________________________________________________________________\n"
]
}
],
"source": [
"model.compile(loss='sparse_categorical_crossentropy', optimizer=tf.optimizers.SGD(lr=.001), metrics=['accuracy'])\n",
"model.summary()"
]
},
{
"cell_type": "code",
"execution_count": 16,
"id": "d47c38da",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Epoch 1/100\n",
"25/25 [==============================] - 53s 2s/step - loss: 2.2516 - accuracy: 0.2150 - val_loss: 2.1732 - val_accuracy: 0.2656\n",
"Epoch 2/100\n",
"25/25 [==============================] - 40s 2s/step - loss: 1.9127 - accuracy: 0.2362 - val_loss: 1.6225 - val_accuracy: 0.2188\n",
"Epoch 3/100\n",
"25/25 [==============================] - 40s 2s/step - loss: 1.6217 - accuracy: 0.2612 - val_loss: 1.5785 - val_accuracy: 0.3438\n",
"Epoch 4/100\n",
"25/25 [==============================] - 41s 2s/step - loss: 1.5799 - accuracy: 0.3038 - val_loss: 1.5517 - val_accuracy: 0.2917\n",
"Epoch 5/100\n",
"25/25 [==============================] - 41s 2s/step - loss: 1.5565 - accuracy: 0.2988 - val_loss: 1.5266 - val_accuracy: 0.3750\n",
"Epoch 6/100\n",
"25/25 [==============================] - 41s 2s/step - loss: 1.4953 - accuracy: 0.3738 - val_loss: 1.6185 - val_accuracy: 0.3750\n",
"Epoch 7/100\n",
"25/25 [==============================] - 41s 2s/step - loss: 1.4873 - accuracy: 0.3613 - val_loss: 1.4834 - val_accuracy: 0.4219\n",
"Epoch 8/100\n",
"25/25 [==============================] - 41s 2s/step - loss: 1.4110 - accuracy: 0.4187 - val_loss: 1.3432 - val_accuracy: 0.4740\n",
"Epoch 9/100\n",
"25/25 [==============================] - 40s 2s/step - loss: 1.3515 - accuracy: 0.4363 - val_loss: 1.3292 - val_accuracy: 0.4688\n",
"Epoch 10/100\n",
"25/25 [==============================] - 40s 2s/step - loss: 1.3174 - accuracy: 0.4563 - val_loss: 1.3137 - val_accuracy: 0.4375\n",
"Epoch 11/100\n",
"25/25 [==============================] - 41s 2s/step - loss: 1.2342 - accuracy: 0.4950 - val_loss: 1.3598 - val_accuracy: 0.5260\n",
"Epoch 12/100\n",
"25/25 [==============================] - 40s 2s/step - loss: 1.2386 - accuracy: 0.4787 - val_loss: 1.1717 - val_accuracy: 0.5052\n",
"Epoch 13/100\n",
"25/25 [==============================] - 40s 2s/step - loss: 1.1891 - accuracy: 0.5025 - val_loss: 1.1926 - val_accuracy: 0.4896\n",
"Epoch 14/100\n",
"25/25 [==============================] - 41s 2s/step - loss: 1.1791 - accuracy: 0.4950 - val_loss: 1.1991 - val_accuracy: 0.4271\n",
"Epoch 15/100\n",
"25/25 [==============================] - 42s 2s/step - loss: 1.1458 - accuracy: 0.5275 - val_loss: 1.1069 - val_accuracy: 0.5104\n",
"Epoch 16/100\n",
"25/25 [==============================] - 40s 2s/step - loss: 1.0808 - accuracy: 0.5450 - val_loss: 1.0976 - val_accuracy: 0.5521\n",
"Epoch 17/100\n",
"25/25 [==============================] - 42s 2s/step - loss: 1.0491 - accuracy: 0.5650 - val_loss: 1.2781 - val_accuracy: 0.4740\n",
"Epoch 18/100\n",
"25/25 [==============================] - 42s 2s/step - loss: 1.0335 - accuracy: 0.5575 - val_loss: 1.1958 - val_accuracy: 0.5417\n",
"Epoch 19/100\n",
"25/25 [==============================] - 41s 2s/step - loss: 1.0198 - accuracy: 0.5675 - val_loss: 1.0505 - val_accuracy: 0.5729\n",
"Epoch 20/100\n",
"25/25 [==============================] - 40s 2s/step - loss: 0.9765 - accuracy: 0.5900 - val_loss: 1.0172 - val_accuracy: 0.5938\n",
"Epoch 21/100\n",
"25/25 [==============================] - 39s 2s/step - loss: 0.9793 - accuracy: 0.5913 - val_loss: 1.0369 - val_accuracy: 0.5365\n",
"Epoch 22/100\n",
"25/25 [==============================] - 42s 2s/step - loss: 0.9380 - accuracy: 0.6162 - val_loss: 1.0670 - val_accuracy: 0.5104\n",
"Epoch 23/100\n",
"25/25 [==============================] - 40s 2s/step - loss: 0.9409 - accuracy: 0.5813 - val_loss: 0.9529 - val_accuracy: 0.6094\n",
"Epoch 24/100\n",
"25/25 [==============================] - 44s 2s/step - loss: 0.8277 - accuracy: 0.6375 - val_loss: 1.2964 - val_accuracy: 0.4635\n",
"Epoch 25/100\n",
"25/25 [==============================] - 52s 2s/step - loss: 0.9220 - accuracy: 0.5938 - val_loss: 0.9610 - val_accuracy: 0.6042\n",
"Epoch 26/100\n",
"25/25 [==============================] - 39s 2s/step - loss: 0.8962 - accuracy: 0.6137 - val_loss: 1.0367 - val_accuracy: 0.5365\n",
"Epoch 27/100\n",
"25/25 [==============================] - 42s 2s/step - loss: 0.8650 - accuracy: 0.6237 - val_loss: 1.0654 - val_accuracy: 0.5156\n",
"Epoch 28/100\n",
"25/25 [==============================] - 45s 2s/step - loss: 0.8186 - accuracy: 0.6413 - val_loss: 0.9914 - val_accuracy: 0.6094\n",
"Epoch 29/100\n",
"25/25 [==============================] - 41s 2s/step - loss: 0.8347 - accuracy: 0.6313 - val_loss: 0.9955 - val_accuracy: 0.5990\n",
"Epoch 30/100\n",
"25/25 [==============================] - 42s 2s/step - loss: 0.7907 - accuracy: 0.6513 - val_loss: 0.9453 - val_accuracy: 0.6146\n",
"Epoch 31/100\n",
"25/25 [==============================] - 42s 2s/step - loss: 0.7743 - accuracy: 0.6675 - val_loss: 0.9493 - val_accuracy: 0.6042\n",
"Epoch 32/100\n",
"25/25 [==============================] - 41s 2s/step - loss: 0.7444 - accuracy: 0.6938 - val_loss: 0.9506 - val_accuracy: 0.6146\n",
"Epoch 33/100\n",
"25/25 [==============================] - 42s 2s/step - loss: 0.7630 - accuracy: 0.6525 - val_loss: 0.8973 - val_accuracy: 0.6354\n",
"Epoch 34/100\n",
"25/25 [==============================] - 46s 2s/step - loss: 0.7529 - accuracy: 0.6850 - val_loss: 0.9552 - val_accuracy: 0.5833\n",
"Epoch 35/100\n",
"25/25 [==============================] - 39s 2s/step - loss: 0.6825 - accuracy: 0.7063 - val_loss: 1.0233 - val_accuracy: 0.5729\n",
"Epoch 36/100\n",
"25/25 [==============================] - 39s 2s/step - loss: 0.6654 - accuracy: 0.7287 - val_loss: 0.9992 - val_accuracy: 0.6250\n",
"Epoch 37/100\n",
"25/25 [==============================] - 39s 2s/step - loss: 0.7306 - accuracy: 0.7075 - val_loss: 1.1470 - val_accuracy: 0.5833\n",
"Epoch 38/100\n",
"25/25 [==============================] - 40s 2s/step - loss: 0.6823 - accuracy: 0.7237 - val_loss: 0.9150 - val_accuracy: 0.6406\n",
"Epoch 39/100\n",
"25/25 [==============================] - 41s 2s/step - loss: 0.7293 - accuracy: 0.6900 - val_loss: 0.9105 - val_accuracy: 0.6719\n",
"Epoch 40/100\n",
"25/25 [==============================] - 39s 2s/step - loss: 0.6359 - accuracy: 0.7225 - val_loss: 0.8538 - val_accuracy: 0.6823\n",
"Epoch 41/100\n",
"25/25 [==============================] - 38s 2s/step - loss: 0.6523 - accuracy: 0.7287 - val_loss: 1.5683 - val_accuracy: 0.5417\n",
"Epoch 42/100\n",
"25/25 [==============================] - 39s 2s/step - loss: 0.6885 - accuracy: 0.7237 - val_loss: 0.9864 - val_accuracy: 0.6458\n",
"Epoch 43/100\n",
"25/25 [==============================] - 37s 1s/step - loss: 0.5583 - accuracy: 0.7775 - val_loss: 0.9455 - val_accuracy: 0.6198\n",
"Epoch 44/100\n",
"25/25 [==============================] - 38s 1s/step - loss: 0.5613 - accuracy: 0.7588 - val_loss: 0.8001 - val_accuracy: 0.6771\n",
"Epoch 45/100\n",
"25/25 [==============================] - 39s 2s/step - loss: 0.5904 - accuracy: 0.7850 - val_loss: 0.8891 - val_accuracy: 0.6719\n",
"Epoch 46/100\n",
"25/25 [==============================] - 40s 2s/step - loss: 0.5847 - accuracy: 0.7600 - val_loss: 0.7383 - val_accuracy: 0.7135\n",
"Epoch 47/100\n",
"25/25 [==============================] - 40s 2s/step - loss: 0.5609 - accuracy: 0.7650 - val_loss: 0.9535 - val_accuracy: 0.6354\n",
"Epoch 48/100\n",
"25/25 [==============================] - 41s 2s/step - loss: 0.5933 - accuracy: 0.7700 - val_loss: 0.8282 - val_accuracy: 0.6823\n",
"Epoch 49/100\n",
"25/25 [==============================] - 40s 2s/step - loss: 0.5148 - accuracy: 0.7862 - val_loss: 1.0083 - val_accuracy: 0.6094\n",
"Epoch 50/100\n",
"25/25 [==============================] - 41s 2s/step - loss: 0.5318 - accuracy: 0.7763 - val_loss: 0.7793 - val_accuracy: 0.6927\n",
"Epoch 51/100\n",
"25/25 [==============================] - 41s 2s/step - loss: 0.4546 - accuracy: 0.8138 - val_loss: 0.7321 - val_accuracy: 0.7396\n",
"Epoch 52/100\n",
"25/25 [==============================] - 41s 2s/step - loss: 0.4888 - accuracy: 0.7987 - val_loss: 0.7415 - val_accuracy: 0.7292\n",
"Epoch 53/100\n",
"25/25 [==============================] - 41s 2s/step - loss: 0.4525 - accuracy: 0.8263 - val_loss: 1.1359 - val_accuracy: 0.5938\n",
"Epoch 54/100\n",
"25/25 [==============================] - 41s 2s/step - loss: 0.4635 - accuracy: 0.8100 - val_loss: 0.8153 - val_accuracy: 0.7083\n",
"Epoch 55/100\n",
"25/25 [==============================] - 40s 2s/step - loss: 0.3715 - accuracy: 0.8587 - val_loss: 0.8006 - val_accuracy: 0.7083\n",
"Epoch 56/100\n",
"25/25 [==============================] - 40s 2s/step - loss: 0.7150 - accuracy: 0.7650 - val_loss: 0.6763 - val_accuracy: 0.7604\n",
"Epoch 57/100\n",
"25/25 [==============================] - 40s 2s/step - loss: 0.4236 - accuracy: 0.8400 - val_loss: 1.2931 - val_accuracy: 0.5625\n",
"Epoch 58/100\n",
"25/25 [==============================] - 40s 2s/step - loss: 0.4233 - accuracy: 0.8338 - val_loss: 0.7108 - val_accuracy: 0.7188\n",
"Epoch 59/100\n",
"25/25 [==============================] - 39s 2s/step - loss: 0.4240 - accuracy: 0.8263 - val_loss: 0.8515 - val_accuracy: 0.7656\n",
"Epoch 60/100\n",
"25/25 [==============================] - 40s 2s/step - loss: 0.2996 - accuracy: 0.8913 - val_loss: 1.1627 - val_accuracy: 0.6719\n",
"Epoch 61/100\n",
"25/25 [==============================] - 41s 2s/step - loss: 0.4147 - accuracy: 0.8438 - val_loss: 0.8675 - val_accuracy: 0.7656\n",
"Epoch 62/100\n",
"25/25 [==============================] - 40s 2s/step - loss: 0.3373 - accuracy: 0.8575 - val_loss: 1.8903 - val_accuracy: 0.5260\n",
"Epoch 63/100\n",
"25/25 [==============================] - 41s 2s/step - loss: 0.3147 - accuracy: 0.8913 - val_loss: 0.9597 - val_accuracy: 0.6927\n",
"Epoch 64/100\n",
"25/25 [==============================] - 40s 2s/step - loss: 0.4258 - accuracy: 0.8625 - val_loss: 0.7272 - val_accuracy: 0.7448\n",
"Epoch 65/100\n",
"25/25 [==============================] - 40s 2s/step - loss: 0.2791 - accuracy: 0.8950 - val_loss: 0.7932 - val_accuracy: 0.7396\n",
"Epoch 66/100\n",
"25/25 [==============================] - 40s 2s/step - loss: 0.2791 - accuracy: 0.8963 - val_loss: 1.1467 - val_accuracy: 0.6823\n",
"Epoch 67/100\n",
"25/25 [==============================] - 41s 2s/step - loss: 0.2417 - accuracy: 0.9050 - val_loss: 0.8308 - val_accuracy: 0.7344\n",
"Epoch 68/100\n",
"25/25 [==============================] - 43s 2s/step - loss: 0.4000 - accuracy: 0.8725 - val_loss: 0.8193 - val_accuracy: 0.6875\n",
"Epoch 69/100\n",
"25/25 [==============================] - 41s 2s/step - loss: 0.2515 - accuracy: 0.9162 - val_loss: 0.8325 - val_accuracy: 0.7396\n",
"Epoch 70/100\n",
"25/25 [==============================] - 40s 2s/step - loss: 0.2121 - accuracy: 0.9187 - val_loss: 0.9849 - val_accuracy: 0.7240\n",
"Epoch 71/100\n",
"25/25 [==============================] - 41s 2s/step - loss: 0.1987 - accuracy: 0.9262 - val_loss: 0.8387 - val_accuracy: 0.7760\n",
"Epoch 72/100\n",
"25/25 [==============================] - 40s 2s/step - loss: 0.2786 - accuracy: 0.8975 - val_loss: 0.7462 - val_accuracy: 0.7917\n",
"Epoch 73/100\n",
"25/25 [==============================] - 40s 2s/step - loss: 0.1309 - accuracy: 0.9625 - val_loss: 1.0813 - val_accuracy: 0.7448\n",
"Epoch 74/100\n",
"25/25 [==============================] - 40s 2s/step - loss: 0.3271 - accuracy: 0.9013 - val_loss: 0.7063 - val_accuracy: 0.7604\n",
"Epoch 75/100\n",
"25/25 [==============================] - 40s 2s/step - loss: 0.2142 - accuracy: 0.9187 - val_loss: 0.9269 - val_accuracy: 0.7708\n",
"Epoch 76/100\n",
"25/25 [==============================] - 40s 2s/step - loss: 0.1603 - accuracy: 0.9438 - val_loss: 0.8590 - val_accuracy: 0.7448\n",
"Epoch 77/100\n",
"25/25 [==============================] - 42s 2s/step - loss: 0.1331 - accuracy: 0.9513 - val_loss: 1.0895 - val_accuracy: 0.7083\n",
"Epoch 78/100\n",
"25/25 [==============================] - 41s 2s/step - loss: 0.1177 - accuracy: 0.9638 - val_loss: 1.0417 - val_accuracy: 0.7500\n",
"Epoch 79/100\n",
"25/25 [==============================] - 41s 2s/step - loss: 0.4039 - accuracy: 0.8775 - val_loss: 0.8521 - val_accuracy: 0.7240\n",
"Epoch 80/100\n",
"25/25 [==============================] - 41s 2s/step - loss: 0.1795 - accuracy: 0.9488 - val_loss: 0.9234 - val_accuracy: 0.7344\n",
"Epoch 81/100\n",
"25/25 [==============================] - 40s 2s/step - loss: 0.0812 - accuracy: 0.9850 - val_loss: 0.9745 - val_accuracy: 0.7656\n",
"Epoch 82/100\n",
"25/25 [==============================] - 40s 2s/step - loss: 0.0976 - accuracy: 0.9625 - val_loss: 1.0829 - val_accuracy: 0.7500\n",
"Epoch 83/100\n",
"25/25 [==============================] - 40s 2s/step - loss: 0.1220 - accuracy: 0.9513 - val_loss: 1.8248 - val_accuracy: 0.5885\n",
"Epoch 84/100\n",
"25/25 [==============================] - 40s 2s/step - loss: 0.7078 - accuracy: 0.8188 - val_loss: 0.7013 - val_accuracy: 0.7552\n",
"Epoch 85/100\n",
"25/25 [==============================] - 41s 2s/step - loss: 0.1607 - accuracy: 0.9613 - val_loss: 0.8556 - val_accuracy: 0.7656\n",
"Epoch 86/100\n",
"25/25 [==============================] - 41s 2s/step - loss: 0.0831 - accuracy: 0.9812 - val_loss: 1.0032 - val_accuracy: 0.7552\n",
"Epoch 87/100\n",
"25/25 [==============================] - 40s 2s/step - loss: 0.1305 - accuracy: 0.9575 - val_loss: 0.8887 - val_accuracy: 0.7604\n",
"Epoch 88/100\n",
"25/25 [==============================] - 40s 2s/step - loss: 0.0602 - accuracy: 0.9825 - val_loss: 1.1799 - val_accuracy: 0.7135\n",
"Epoch 89/100\n",
"25/25 [==============================] - 41s 2s/step - loss: 0.0469 - accuracy: 0.9887 - val_loss: 1.1216 - val_accuracy: 0.7396\n",
"Epoch 90/100\n",
"25/25 [==============================] - 40s 2s/step - loss: 0.1920 - accuracy: 0.9325 - val_loss: 1.2466 - val_accuracy: 0.6927\n",
"Epoch 91/100\n",
"25/25 [==============================] - 41s 2s/step - loss: 0.2436 - accuracy: 0.9275 - val_loss: 0.8773 - val_accuracy: 0.7656\n",
"Epoch 92/100\n",
"25/25 [==============================] - 41s 2s/step - loss: 0.0651 - accuracy: 0.9887 - val_loss: 1.0198 - val_accuracy: 0.7448\n",
"Epoch 93/100\n",
"25/25 [==============================] - 41s 2s/step - loss: 0.0424 - accuracy: 0.9912 - val_loss: 1.1194 - val_accuracy: 0.7448\n",
"Epoch 94/100\n",
"25/25 [==============================] - 41s 2s/step - loss: 0.0291 - accuracy: 0.9975 - val_loss: 1.2345 - val_accuracy: 0.7292\n",
"Epoch 95/100\n",
"25/25 [==============================] - 43s 2s/step - loss: 0.0237 - accuracy: 1.0000 - val_loss: 1.2051 - val_accuracy: 0.7292\n",
"Epoch 96/100\n",
"25/25 [==============================] - 43s 2s/step - loss: 0.9514 - accuracy: 0.8100 - val_loss: 0.9362 - val_accuracy: 0.6562\n",
"Epoch 97/100\n",
"25/25 [==============================] - 40s 2s/step - loss: 0.3801 - accuracy: 0.8913 - val_loss: 0.8821 - val_accuracy: 0.7396\n",
"Epoch 98/100\n",
"25/25 [==============================] - 39s 2s/step - loss: 0.1676 - accuracy: 0.9588 - val_loss: 0.8548 - val_accuracy: 0.7292\n",
"Epoch 99/100\n",
"25/25 [==============================] - 39s 2s/step - loss: 0.0977 - accuracy: 0.9812 - val_loss: 1.1352 - val_accuracy: 0.7135\n",
"Epoch 100/100\n",
"25/25 [==============================] - 39s 2s/step - loss: 0.1027 - accuracy: 0.9663 - val_loss: 0.8968 - val_accuracy: 0.8021\n"
]
},
{
"data": {
"text/plain": [
"<keras.callbacks.History at 0x18ee5f3b490>"
]
},
"execution_count": 16,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"model.fit(train_ds,\n",
" epochs=100,\n",
" validation_data=validation_ds,\n",
" validation_freq=1,\n",
" callbacks=[tensorboard_cb])"
]
},
{
"cell_type": "code",
"execution_count": 17,
"id": "0f02f00e",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"8/8 [==============================] - 7s 327ms/step - loss: 1.0574 - accuracy: 0.7070\n"
]
},
{
"data": {
"text/plain": [
"[1.0574449300765991, 0.70703125]"
]
},
"execution_count": 17,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"model.evaluate(test_ds)"
]
},
{
"cell_type": "markdown",
"id": "ac11196e",
"metadata": {},
"source": [
"## Wizualizacja filtrów obrazowych na poszczególnych warstwach"
]
},
{
"cell_type": "code",
"execution_count": 18,
"id": "2e0312d6",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"conv2d (11, 11, 3, 96)\n",
"conv2d_1 (5, 5, 96, 256)\n",
"conv2d_2 (3, 3, 256, 384)\n",
"conv2d_3 (3, 3, 384, 384)\n",
"conv2d_4 (3, 3, 384, 256)\n"
]
}
],
"source": [
"layers_names = []\n",
"for layer in model.layers:\n",
" # check for convolutional layer\n",
" if 'conv' not in layer.name:\n",
" continue\n",
" layers_names.append(layer.name)\n",
" filters, biases = layer.get_weights()\n",
" print(layer.name, filters.shape)"
]
},
{
"cell_type": "code",
"execution_count": 19,
"id": "d30c6b5e",
"metadata": {},
"outputs": [],
"source": [
"import matplotlib.pyplot as plt"
]
},
{
"cell_type": "code",
"execution_count": 20,
"id": "41e41fca",
"metadata": {},
"outputs": [],
"source": [
"filters, biases = model.layers[0].get_weights()\n",
"fmin, fmax = filters.min(), filters.max()\n",
"filters = (filters - fmin) / (fmax - fmin)"
]
},
{
"cell_type": "code",
"execution_count": 21,
"id": "90c820db",
"metadata": {},
"outputs": [],
"source": [
"nb_filters, tmp = 3, 1"
]
},
{
"cell_type": "code",
"execution_count": 22,
"id": "f0d16f58",
"metadata": {},
"outputs": [
{
"data": {
"image/png": "iVBORw0KGgoAAAANSUhEUgAAAeMAAAGKCAYAAAA/jCmqAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjYuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/av/WaAAAACXBIWXMAAA9hAAAPYQGoP6dpAAAlbElEQVR4nO3daXgU1L3H8ZOQnSQgNIKBIAqiDVrBhbBpXYCqFUEQLFZBFqFYlEUlgKDslAoCSuEBQRGQ4sKmEFYVlIIsFVBRHjajwbBvmUyWCZm5L+j03r6Z82uS3mM638+b++bLPzPJmfkzeHMaEQgEAgYAADgT6foBAAAQ7ljGAAA4xjIGAMAxljEAAI6xjAEAcIxlDACAYyxjAAAcYxkDAOBYVFn/oN/vN7m5uSYpKclERERU5GPCf0ggEDAej8ekpqaayMjw/XsYZ7fy4exextmtfNSzW+ZlnJuba9LS0sr6x+FQTk6OqVu3ruuH4Qxnt/Li7HJ2Kyvb2S3zMk5KSjLGGNOnTx8TExMTsl27dq00c8mSJVKXmJgodSkpKVL3zjvvWBuv1yvNatasmdRNmzZN6s6cOSN1DRo0sDYlJSVmxYoV//zZhavg8//iiy+sZ+m5556TZo4YMULqTpw4IXWvvPKK1LVp08ba1KtXT5r19ddfS92PP/4odV26dJG6gwcPWpvi4mIzdepUzu4/nn+3bt2s77t///vfpZl/+MMfpO7WW2+VOvW9rWfPntZm9+7d0qzFixdL3aBBg6RO/bo//PCDtbl06ZLZvHmz9eyWeRkH/4kkJibGxMbGhmzVf1ZSl6z6gkxOTpa6+Ph4a1NaWirNqlq1qtRFR0dLXZUqVaTO9sL8v8L9n7eCzz8xMdF6ltSfk3p2ExISpC4qSntpxsXFWRvlfBtjrK/jIPV7oj5X5TkEcXb/933X9ppX3zvU86GecfV8KO+V6tmo6Oeqvp+qr1Nj7Gc3fP/jCwAAPxMsYwAAHGMZAwDgGMsYAADHWMYAADjGMgYAwLEy/2pT0MaNG63/b+Xp6enSrE8//VTqVq1aJXU1atSQutGjR1ubuXPnVujXVP/f/zt06CB1x44dszY+n0+aFS6ioqKsv5qQkZEhzerWrZvUde7cWerq1KkjdUePHrU22dnZ0qxZs2ZJ3cqVK6Vu6tSpUqf8LndBQYE0K1xkZ2dbz26nTp2kWZ9//rnU7du3T+rU382fPn26tXn99delWYcOHZI69VeWhg4dKnV5eXnWJj8/32zatMna8ckYAADHWMYAADjGMgYAwDGWMQAAjrGMAQBwjGUMAIBjLGMAABxjGQMA4BjLGAAAx8p9A1fv3r2t/wPQAwcOlGadO3dO6l577TWpW7BggdTt3bvX2vziF7+QZqn/Y9PqbUcbN26UutatW1sbr9dr5s+fL80LByNHjrTehLZkyRJp1n333Sd1M2fOlLpevXpJ3VVXXWVt1P9B9a5du0pdmzZtpO7999+XusWLF1uboqIiaVa4GDJkiKlatWrIJjExUZr1xz/+Ueq+//57qfvkk0+k7tSpU9ambdu20qy77rpL6jIzM6VOfS14vV5ro958yCdjAAAcYxkDAOAYyxgAAMdYxgAAOMYyBgDAMZYxAACOsYwBAHCMZQwAgGMsYwAAHCv3DVytWrWy3vTy5ptvSrMWLVokdWlpaVL317/+Ver27NljbWy3jAWpN7csW7ZM6goLC6VuxYoV1qa4uFiaFS4SExNNTExMhcwaMWKE1F28eFHqzp49K3V33nmntVFvcbv55pulbt68eVI3ceJEqVNuG+Ps/qsdO3ZY35PuvfdeaVZycrLUqTev3XrrrVJ35MgRa9OwYUNplno+srKypG7Dhg1St379emtz6dIlaRafjAEAcIxlDACAYyxjAAAcYxkDAOAYyxgAAMdYxgAAOMYyBgDAMZYxAACOsYwBAHCs3Ddw+Xw+6+0nzZo1k2b99NNPUqfcmGWMfvNQly5drM1HH30kzVJv1urcubPUPfLII1J34cIFa+Pz+aRZ4eLhhx82VatWDdlMmzZNmqX+nO666y6pU28Ueuqpp6yN+hzOnDkjderZPXjwoNT17NnT2uTn58vPIxycPn3aentcrVq1pFmBQEDqBgwYIHVHjx6VurFjx1qbqVOnSrMWLFggdYcOHZK68+fPS12PHj2sTUFBgfnkk0+sHZ+MAQBwjGUMAIBjLGMAABxjGQMA4BjLGAAAx1jGAAA4xjIGAMAxljEAAI6xjAEAcKzcN3Dt2LHDxMXFhWzy8vKkWYmJiVLXpEkTqYuK0p7ezJkzrc1XX30lzVIf2/Dhw6Vu/fr1Ute1a1drU1BQYJYsWSLNCwfDhg0zVapUCdnMmTNHmvXFF19I3c6dO6VOubHHGGPatm1rbVq1aiXNateundTVqVNH6vr37y912dnZ1sbr9UqzwsXtt99u4uPjQzazZ8+WZo0aNUrqPv74Y6mzvaaCatasaW22bNkizRo8eLDUHT9+XOqaNm0qdd988421KSwslGbxyRgAAMdYxgAAOMYyBgDAMZYxAACOsYwBAHCMZQwAgGMsYwAAHGMZAwDgWLkv/fB6vaa0tDRkM3fuXGnW888/L3X33HOP1KmXGGzfvt3anD9/XpqVlpYmdadOnZK6xo0bS13fvn2tjd/vl2aFixUrVpikpKSQzbhx46RZv/71r6VO/Xk+8cQTUqe8FtTXQc+ePaVu0qRJUvf73/9e6h566CFrY3uPCTdr1qwx0dHRIZthw4ZJs5599lmp69ixo9QFAgGpa9CggbXp0KGDNOvll1+WujFjxkjdiRMnpE65WEU9u3wyBgDAMZYxAACOsYwBAHCMZQwAgGMsYwAAHGMZAwDgGMsYAADHWMYAADjGMgYAwLFy38A1f/58ExkZeqdPmzZNmrVy5UqpU2+SGj16tNTFxcVZm1GjRkmzNmzYIHUpKSlSp97YVK1aNWtTWFgo37YTDsaNG2diYmJCNj169JBmRURESJ1yU5ox+u1Jq1evtjYJCQnSLJ/PJ3XNmjWTuoEDB0qdcnuZz+cz3333nTQvHKxatcranD17Vpr17bffSl2tWrWkTn2PqV69urXZunWrNOudd96RultuuUXqFi1aJHVt27a1NsXFxdLZ5ZMxAACOsYwBAHCMZQwAgGMsYwAAHGMZAwDgGMsYAADHWMYAADjGMgYAwDGWMQAAjpX7Bq7Ro0eb+Pj4kE16ero0a926dVI3d+5cqUtNTZU6RdOmTaVOvSUoJydH6jIzM6Vu6NCh1sZ2U1q4mTx5sklOTg7ZqDf7nDx5UuqiorSX3NKlS6VuxowZ1qZXr17SrOzsbKmbM2eO1O3YsUPq8O976aWXrDcHXrx4UZql3sCl3l7Vu3dvqbvxxhutTcuWLaVZDz30kNTt3LlT6tSzq9w2lp+fb2bNmmXteHcGAMAxljEAAI6xjAEAcIxlDACAYyxjAAAcYxkDAOAYyxgAAMdYxgAAOMYyBgDAsXLfwJWdnW1iY2NDNgsWLJBm9e3bV+oef/xxqXv77bel7sSJE9amXbt20qw33nhD6tq0aSN158+fl7pGjRpZG6/XK80KFxcuXDClpaUhG7/fL81au3at1H3wwQdS17p1a6lTboa76qqrpFnXX3+91M2ePVvqbrvtNqmbMmWKtQkEAtKscNGyZUtTtWrVkE1aWpo0y+fzSd3NN98sdYsXL5a6GjVqWJujR49Ksy5cuCB17777rtQ98sgjUtesWTNro55dPhkDAOAYyxgAAMdYxgAAOMYyBgDAMZYxAACOsYwBAHCMZQwAgGMsYwAAHGMZAwDgWESgjFfb5OXlmWrVqplmzZqZqKjQF3mNHj1ampmYmCh11157rdQVFxdLnfL4EhISpFm1a9eWuu7du0ud+hzWr19vbQoLC83QoUPNxYsXTXJysjT3v1Hw7DZv3tx6djt16iTNTElJkbp58+ZJXUZGhtQdP37c2vTu3VuatXPnTqnr0qWL1LVv377C5hUVFZlJkyZxdv9xdjMzM603Hx46dEiaOXbsWKkrKCiQOuW9yBhjXn75ZWuzadMmaZZ
"text/plain": [
"<Figure size 640x480 with 9 Axes>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"for i in range(nb_filters):\n",
" f = filters[:, :, :, i]\n",
" for j in range(3):\n",
" ax = plt.subplot(nb_filters, 3, tmp)\n",
" ax.set_xticks([])\n",
" ax.set_yticks([])\n",
" plt.imshow(f[:, :, j], cmap='gray')\n",
" tmp += 1\n",
"plt.show()"
]
},
{
"cell_type": "code",
"execution_count": 23,
"id": "892aa9d8",
"metadata": {},
"outputs": [],
"source": [
"img_width, img_height = 227, 227"
]
},
{
"cell_type": "code",
"execution_count": 24,
"id": "c73eb93e",
"metadata": {},
"outputs": [],
"source": [
"layer = model.get_layer(name=layers_names[0])\n",
"feature_extractor = keras.Model(inputs=model.inputs, outputs=layer.output)"
]
},
{
"cell_type": "code",
"execution_count": 25,
"id": "713427ad",
"metadata": {},
"outputs": [],
"source": [
"def initialize_image():\n",
" # random noisy img\n",
" img = tf.random.uniform((1, img_width, img_height, 3))\n",
" return img"
]
},
{
"cell_type": "code",
"execution_count": 26,
"id": "98d22c96",
"metadata": {},
"outputs": [],
"source": [
"def loss_calc(input_image, filter_index, fex):\n",
" activation = fex(input_image)\n",
" # getting rid of the border pixels so they don't inlfuence our results in any fun way\n",
" filter_activation = activation[:, 2:-2, 2:-2, filter_index]\n",
" return tf.reduce_mean(filter_activation)"
]
},
{
"cell_type": "code",
"execution_count": 27,
"id": "97378fdd",
"metadata": {},
"outputs": [],
"source": [
"def gradient_ascent_step(img, filter_index, learning_rate, fex):\n",
" with tf.GradientTape() as tape:\n",
" tape.watch(img)\n",
" loss = loss_calc(img, filter_index, fex)\n",
" grads = tape.gradient(loss, img)\n",
" grads = tf.math.l2_normalize(grads)\n",
" img += learning_rate * grads\n",
" return loss, img"
]
},
{
"cell_type": "code",
"execution_count": 28,
"id": "6903ea27",
"metadata": {},
"outputs": [],
"source": [
"def deprocess_image(img):\n",
" img -= img.mean()\n",
" img /= img.std() + 1e-5\n",
" img *= 0.15\n",
"\n",
" img = img[25:-25, 25:-25, :]\n",
"\n",
" img += 0.5\n",
" img = np.clip(img, 0, 1)\n",
"\n",
" img *= 255\n",
" img = np.clip(img, 0, 255).astype(\"uint8\")\n",
" return img"
]
},
{
"cell_type": "code",
"execution_count": 29,
"id": "904dd5eb",
"metadata": {},
"outputs": [],
"source": [
"def visualize_filter(filter_index, fex):\n",
" # 20 steps grad desc\n",
" iterations = 30\n",
" learning_rate = 10.0\n",
" img = initialize_image()\n",
" for iteration in range(iterations):\n",
" loss, img = gradient_ascent_step(img, filter_index, learning_rate, fex)\n",
" img = deprocess_image(img[0].numpy())\n",
" return loss, img"
]
},
{
"cell_type": "code",
"execution_count": 30,
"id": "01a803ac",
"metadata": {},
"outputs": [],
"source": [
"loss, img = visualize_filter(0, feature_extractor)\n",
"keras.preprocessing.image.save_img(\"lab7props/0.png\", img)"
]
},
{
"cell_type": "code",
"execution_count": 31,
"id": "c568a497",
"metadata": {},
"outputs": [
{
"data": {
"image/png": "iVBORw0KGgoAAAANSUhEUgAAALEAAACxCAIAAAAES8uSAAEAAElEQVR4nAThZZArC4Ig6qUgBSmGFDOVpFKVClRMh+HCudg43T07OzML7+3a4fC+CDv849k/HHasY97GzszCQHdP4+3L98A9XOcUowpUpRIzSylKQQpS4O8jTPu9up2YgeJ+6pj1kcvzf0AlK+lY3JZd0V7/lxI6F9xjTnZJjFsJH03bylNn00Bo9BsUf0ApxkaDa3HzP7J1P/puo7sMi0iEv+UYlw9zyPzV+2XZ93nVTCUeuRWSYY7tpk53eQpZGxK98SzBVG68zf4cmq5e/9ZbliZ3xn6my9cABIPJrzHLtH/Ysz2uisyZC6I1qORMOTNrOsqF0HqOpq+9bkg+2fcS/7LUCkMv6/IfNS7xKVojPXzcX3CcvMZ+kDMQHYfn4vcvU97pXQv31sNqb75DYZlOyR2jB+SwtrHWJGpTcP/YIv3wOOwfBzQZ/mO2RfVmC3xAs3U04ba6lbjszjff8Y88IhIErZpp8oSbVG8lxAYqTl71IFciD/Xm3V3n7g/W18q/+J2/8gnxwCMgLk84/uFUe/vgSeVOXU1Z3PGid3zU6vvVFpuz6dEYCH6Tvt1iU5zxpcnqm7wVhxvjG4XESg8laTJYRpoBb9p2f02+LSDqRoJvWRqoEhoN3Wws/Z7asV+lcsZdZfPTFNRV7waEY2CmIn+rZfzk4CS5FgfCc/XSwnQ1kF+L8qhjV/Eqf7NMepcE9ZpOQDaX8zeWM4zGB5+f628EPhP8oCJirTm3tNb4UfJGngSwkMEE6/JKOw2S4sSX5dXb5H/yqysTdcKH1f8H5NYxkh6pfSTB9wLDcfLzAaBs0ZjsyBAQ+7xsmThnqzQLHMFZX8qB3GsYy8Um4ZQOShN95OomJePO/t6QyZV3SMw+f5tZv9MssAXDflIbVEZoZDHv0k3kzESr3v41gy1bH6p5j9M9EQlbixAoRvVl6WygUC8UkntCUytRFhDYFnIDpUFvIgGdjkhuELkMx07cP8UlTaLt4uzI5xf5GXmUOVAUyiA1X2FPUKUxYKAnBgZloGuyZN3nZhYe1+TZl9cOO5mPb0WeV3urZZuH1uyL+8hu4a54ycUPQF2QESaXJoj5NElqSXQSw6nw3KYopuqX+1IajOuqpyXedKXk4RTnTcWL+ANKqSgfFPKTmNRtZ12gCWGxOUkSyLjCF1l3WyS43ue0U+1oHe9q0qLSCD6OXzYCi3z+IE0bDEx+1lv6cP6DIpIgKXK1FKvH6RSK9TFxshmxj5r4e9EyxbBN914TA5ZQJrpgRLw4TuqOt1hFkeDtWXRsgQ4liU0J6bgmEKkKn5bKJ+Kx3ZPETUWR5KeRjPY3lZNZAdvoTgRn30l0zngxHaO23RmznUeHa2MYKUjoc4GLMI2oq/+YlE63F74bnoobNHOq0bfI020mzB5o++6mwrC+lZ3kE1sg0fIF+xToNSOOYQKe9kwXQ0UszlLEYQUqSpaLAFk6zMxxdijlYJaS5nDd3HuvTWWgy6/i0zXZ4AtB1N0/KOZ5bS5/wDiKui6J/eSxmOcR7a8Xzl73iRc4FDPTzoLnxdHRlLf6Rvl6Z8/dItWhutmpDx3lLta5UFOUfKt2t94cHLVkF8JysP92s+xJkqw1qg6gHob3nX1y+5mM8NTy9tlXZ7XKYNMgiNQfVa5Irl7joVK3Rcy9KLE8VyqsTnOaG4Q3cqeoRx9oztRQxXsMEoiNTI3I/ioyTFW0xHRf/Wzyje8UT4fGDqFh606qQkQOWcUKh5fk5MqAKow3z4XqQI3YTQ99IoVbGzt5JxD6otJs1T3U4dn0C8+JH4m1GjZyhbD3baGWA2brLHICebKeIZetObL8po8dcH7HwBHFKceR5bt63o0AuJJthg4al7u5wpEhvadMkFZDDXeTMrBdtApO00mIs99pM76n2H+pFr76ppiOCxEp56KTD78uZJboZ1RBXPXGlYMaQBhDup6Sf+voglqUOrX6vrydRC8AmurEAF+s+eoPS5mYxTWSCElJwZcxOTXpKciS0r3DrQ6TqB60uxctSsCPywUWQKpO3Qi1w/yMmvtKT9tr0DeeZaND1YGEiQ3IT+YNZg7a5VxczVq/8BRGWXOjyy/OQZaLwmeKKdAGyeNoDw7ApGZKcifSMkPVby4KhmVNOPjWUmUXdEUGKAFYl7JWnla1z/9Im3RdkfFXxTZXe59rOdkXg+54gTT507uh77+SLWsaRUkB5qrrtanhN193iXPaTvIYlzLSlD2FZHYsGP+9hm6NdflrXQdBtGdhNjYuBtcmtUJmiIxhZRr03jJw2nlNDdmw9GDSQF3nJH6cS7iUTVUcLtLzY8TC9gF9bZmS9Humlek42jSBpqTulZpTSvVIN3rQVr38M+hVvlShjgkOyefXKPXaFZVlF46/3Luvarg0NFMMrzB91wqpfXpgQepIu19K6CYxyFq4Y3h64P4BDOzmmjeXB8HUGeOwHuNNOZSNPfyffoDbClrgxqAVJ7uXuqRoNikysCPFTbwa6lwizcnp6+UTKc3p1JKvYeZXVf8nKOdRv6IfnxgAv32HLvzeU5uUKETQMzxIoJFo+h/c+2Lj5U+ZbQ9gZU3mJHnKLW3etSebWcS3z/bfX6ImwsSfTU5fXrxhJ6sROmWMOj3oPpd7RWcV8cw7/QD+5OcpbmqYlP1s9GEuos618TrFOqous9+KW4WWElGMMIjeKrft94Yo43p2CXPO0i9IqFF+h7EV23KQla/axKvSaPG/mduj/MjRJEWyzHYieZlmh23DTNbpf2YP12ik80rqnk4a4ImJ9dc7MxvmCft5DVwL1YfW9KRU6ZKnJZxQZaQ+sKH/yErHOqfA+wDb7tgB6IOOJWxoQ/9O/R25Xl/k5PtIh2RwEtg492jDfilWVPeoilFRIQyUEw7h/5S0DKZ8n/di9uPKn94pgrDrQh4Y+YF9Q1EpkgN521u1TfPfBDJ9Sdue8bA+ER6LI8tUwTNjF2Rpw+NZMbVyCXuYC70/WDrMRithOppYNu8omyUm1pqMSDnm/ykvW7qE42qLPZh5PV/gEzIuEWa+VT2vuMxA9akjSF7gfDOWHSGjpwznHdn1z0eSAqAPv+eGTMPXogxxmRG4719lGH7fT+np5OZIXnQHeKkv3vBIwtNHzR78xTUIL3X8hrPeLPantQRPjUGCsPTBsstCSMXS8XGcR5//H306t1QOTlRujIFP5nKivGqjlrUZpL+zRlp++vB+m/hDx38dgBOc3p4ho5xFv1OqbpHc+23vjQc1bwd5lwo8UTMkfy35P3SeprjOXkBoE9q3shh7KHDzUnemeK+EFXEptj6fgdWEX6YRVmpY/3RTekv1WlekdCJJKw4SZo8HKhX3FJwo3l8W/g9SsxY/AC246seFhxoIqgJv1e6bDwj/N/d/yqgc5u+b22OsUaJrKHGwXsS79v7FCNau0q5t0uMoxvgxTD4WtfsVTqZVvpdMDmRzO70e14gIkX4VIIficeP49NJ6dmd5pFOPwtzc6LHmag3Nnkk4g5SxSwGtwx2/tL6UWXgYbprn9lvtRd6w6vZiWmufSiJz6ML8fgOzXpGLPBI+QpNiVd4A66Y1tTs+dmcs9yiMkyRiR6vRHxt1hs/mhhRBPxfrsy6BksNEUm2Q+1b7F8XsLNCHc+WqQINy6AVt6710zNtYRBpnHDFfRE2iRU6zXGTOgZoDpXOyUW5R4DaNUWcAED/v9StUNBnI9AiLUFAKjJwzUiCFz34ah0eYXmFqyF0eK7sJ2DJj9Ol5BL7WGDwW0tgAxkfNo37fBpXJGRPUcSH3otqfwavmYLU58/HLkn8aKiGeGswY9w6pZAJ2n3LZVM/tOOt6NZ8b2qRZ339RLhJsVMNRSTJL3bzCAQFj5CzfmDN6gp4lqGQlVQJjDy5duYEpYz3s91j2HeW+sSgeR6ttyB7OOlnL7PGorCRpPo6wOKyAmUsuA1o8ietmyfqwz98cPxYmZOW6MYQOVPLHMvk9Bk0Tr1aBXKQN0gcqrNplvMt2u+tGOkTF9gzKpaeRDJmE3UDJuOf
"text/plain": [
"<IPython.core.display.Image object>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"display(Image(\"lab7props/0.png\"))"
]
},
{
"cell_type": "code",
"execution_count": 32,
"id": "189528ba",
"metadata": {},
"outputs": [],
"source": [
"def visualize_layer(layer_name, n):\n",
" from IPython.display import Image, display\n",
" \n",
" layer = model.get_layer(name=layer_name)\n",
" fex = keras.Model(inputs=model.inputs, outputs=layer.output)\n",
" \n",
" print(f\"Getting started with layer {layer_name}\")\n",
" all_imgs = []\n",
" for filter_index in range(n**2):\n",
" # print(\"Processing filter %d\" % (filter_index,))\n",
" loss, img = visualize_filter(filter_index, fex)\n",
" all_imgs.append(img)\n",
"\n",
" margin = 5\n",
" cropped_width = img_width - 25 * 2\n",
" cropped_height = img_height - 25 * 2\n",
" width = n * cropped_width + (n - 1) * margin\n",
" height = n * cropped_height + (n - 1) * margin\n",
" stitched_filters = np.zeros((width, height, 3))\n",
"\n",
" for i in range(n):\n",
" for j in range(n):\n",
" img = all_imgs[i * n + j]\n",
" stitched_filters[\n",
" (cropped_width + margin) * i : (cropped_width + margin) * i + cropped_width,\n",
" (cropped_height + margin) * j : (cropped_height + margin) * j\n",
" + cropped_height,\n",
" :,\n",
" ] = img\n",
"\n",
" filename = f\"lab7props/{layer_name}_stitched_filters.png\"\n",
" keras.preprocessing.image.save_img(filename, stitched_filters)\n",
" print(f\"{layer_name} done\")"
]
},
{
"cell_type": "code",
"execution_count": 33,
"id": "cbd6719f",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"['conv2d', 'conv2d_1', 'conv2d_2', 'conv2d_3', 'conv2d_4']"
]
},
"execution_count": 33,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"layers_names"
]
},
{
"cell_type": "code",
"execution_count": 34,
"id": "82b3b56d",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Getting started with layer conv2d\n",
"conv2d done\n",
"Getting started with layer conv2d_1\n",
"conv2d_1 done\n",
"Getting started with layer conv2d_2\n",
"conv2d_2 done\n",
"Getting started with layer conv2d_3\n",
"conv2d_3 done\n",
"Getting started with layer conv2d_4\n",
"conv2d_4 done\n"
]
}
],
"source": [
"for name in layers_names:\n",
" visualize_layer(name, 8)"
]
},
{
"cell_type": "code",
"execution_count": 35,
"id": "9a517989",
"metadata": {},
"outputs": [
{
"data": {
"image/png": "iVBORw0KGgoAAAANSUhEUgAABasAAAWrCAIAAAC15XirAAEAAElEQVR4nFzd5Zsj55U2cJVKVSqVmJlZajVz9/QwmzmxYztxYIMb2MAmC9kk72I2yW6yEAbHFOOMx8PczCy1mJmZSqr3qzf33/C77ud6zodzAMFrPze6qXKbc7HV39Vn+5bbXVG2hB9KVWqyjRBhJF4g6P3y2qgL1ysil3JTaWJnailG6W8FakIvU3K66CtQNyCaZbMpNi8BZp33mniwIApNr0E1eCnenihzEVXIB2rBSNZWoXS1kRh1PNqp9CU4gOpiUKbO32fSaUyhNhmjItAOE0znGOYWGW+HduU2BSMgXGsVGN0yyZiRNblzRbHWvpk/VdPt2fbrYF8ikj5OxRPE7YbluPt+St82qAau21kqINZSt5RlhQ9IiRaCyOlqOqPdLxi02LICBdsKxh5J17Nx2z/QmcpKL6N82Z2k5BkXwT+z3uHyDxYZKo6e0n6VoJreKRVObhjpxptLFBOxRhSUsZYpVOUJXCHeo0vMwunXy/Xh1WzrtIdZUS+i+uGFA1yeJpCn8tmoBSOXrSVKTjTfqQ92wj5djpNUN2JaRdeZVISIRXGgPCLqzlHpIEYkNhi2tt0vNOzKkaOXAuVhip8gFxG7mj0KRb5hR4ReRrtns6RhN+erQqqI3EqSJtSOS8nprokueHtHeS67GR5iW3PEa/iEcGsVHN5nkozBIl+/10yNt2Q5cYBPszqTaVkelpE2t4xHY9vVYx1SV3PfI5YXFsXjbXGY/Z6IOenYBgclYsrCwBThQ3npre9yQ/tMotSNuyhmWqHQ6uSkFV1SIgBa+0R2V+3BN4V6atjF1LO5S/V99ZgFSHn1VdHdEFHCqpeJFUZyPNrxmk6Uq/sSXpUUYOdN6mbZRS+lURppuzbAz+9J2EWE0AxLDNIGh9ApBJ0hmllU7nQbIhbTXawpBERHqiOsCXNo0WIs3q/KWFuNINF7ZNCSytVaOSQax44oMvNymLVtxtlJsSDh9xNoSjY7HSJ2x3xUVx+J7e5E2HoFwVVrRzQB8cZxkt4PHPBpYzmnX20Bb2/XtBIEipS6ZoMfiI86uZvtTs8wnK76CA55GD84NmDeiMXIOR1bkBZ08ukawG3KMnhdZKGlD0BKAcloZktac2cux2cKHYmq+Ug6UWQzkkCSkO/RYZVSm9yU1/GaQAHMp7tjOYarDGh1+bynK6bT14iJPi3sXmZa2NT9VnFYg8766gqqnpyKE5lVhA8FHa0mtWHkmQ4K3fF6eJPQsEB4MGcS6OOZBoPH8sZDiJ6DxvzRzule+no40+T6pW6D0hTfswubo8RKQHi07HbREYE8kqr1q/KZKAY0zEjpLofCCTAk4nzDVciIaLxcDdfg6jLJVZMW8GyPHCSvUta5dKI0IIKatX1qmiPV/vH4Dz8shPXV2BT4evWBpJgUcP7pb1QUR/UjAWfoeuSVn339xNVrA/VHbpbXS0N05q5AV7vrL+U3/nFI8H7imLe7VgPbp+VUt2/iXS+i1L/yy77x/7o60pRc2/VHHzqqdueGyruhXOTtf/2M4Q/zT+XUq/Htu09pNUsHx4K6TnL+xz95dvyX7ol6Mp3yz//7OdFN/1kf9X4o2fjOBP3n5fN5Tnxu4Ud3Tveuzk/Pc/fLd4N/93nyFc/hfVoyH7J/Q8Z9L/jRuGLz2uy/v/XsS3u3bVdHA66di5892V9eljmSgXpm5duPHn55g0dj+wuupd7B0dXKoXzMGfTfe/k53pUPPhc59Ob2kuuFccOKW+eh5xOptX820PaFh97fsOtrqRlzz8G23i1dD8PFR61Ce5xKSVHcoWv/o5P/anfCz1wJpL3fsKHX9r6eHFtzXl/+45foV1eOJFiuypX73/wo7Yexb/B0Sxe9l3/Yx/cFRrcCQEv69hmx1F8aT9Siuyr0KCUiwOkbWwWkc2AeUCwSTCb/fl4kBAgcSqqWEgsic9dmnjxEuMty0K5iSfFTisaf0s/2Db5xYTn38QGW7+7YOtNViC9/s+/Qm7tDpLF9773FTz1imbvZy5EG9m+vfuKQZiX/AIm2/uu95XefkRy4jl0qBvONtb+bQd6vvMht/vZgvvDspOS96DCQ9BxUVn40oXyze8rdTDWYjcN/eOGjHxbyVPDzJA8P8MUr4l6Ev8loktEGeYfRYLfDJMUgZ8OzYRVQwjgE19Vl0GNQyrzeNIdkWwQ9U6os5jdkMIOjvKNjY7KYNEogJgdviRkSxX1WyyxOErfVIKOyQiVbBXnaNQrXBl+mZSXEMDOjE5fh9RadeXixfUthrXOWDDXU6GzenGDosnGkwWo0VQ4yiWlOQRsRAUO3KVJK424WO6fYVQd76l1+i5uhtsPgroUBQTsD6Rp9l7kxqq5xqsxwaniHvXWaUCe2yOEcmuoGqVYM3hoA4Eoc9av5SC6iFYl57s5VPtVIiwvT4lQbSBFomGGHm6zSs5ycUZ4LOrU6omKZ6OSN5BnXugTx0SB2sVes90STIDrc4PiNfCCcLnGSgwn2SptBt9k5QbmMkL1FGaHUPSoFXlnL7lpkhuyuVCam3xWtGdlE8S4VAJR7nU3TCKG5TOLQp9YrtwAqnxqtt3TiUCLVr8egHVYH7tk2LhyiE1vzqiyXkRQtGQQoY5cWq/d4oFcMckRb1XXLlkvZxZleAsHLj1JNefKbCjquX1FVaL1X5NfG6JRqGalFxmqmC+MUQcWtivPVaWBLomwqZ2t12cxSc3EMAMMxEUwRbLfeNPOUjC4VjPVsSu/pYCUC/378nz8s5Okf/KExVFBFSrdkfPZ8SzvdbTWd67xTwDtO+UmQF3RvSPvkfqzNKvDSDk/fQ0RXdoQXa8PlBbZcN8tnsbdbSrQO6wM+4lRhiSMC9qesxHlCsRbogVj+Q3rw1SyZW0GQVnQSLL0qgE2+KUHcpT/sv07kieYGKeLLCgPrzbKW6SWBkZT+OfDODlfWQLju+6zTytVEhVWREMIbtvN9d0ukYac6FXxvdAS+kpUw8gyqLDymYs0mW7Q4A4xH9Q9RPtjWwzwisnbt/HD/BQEojkDp+P6jGt5VHhUoCTj+ZaVAch8jQL5hNdshVBEvoHxlGiTvrfYO05ZZQslmM04oKAztuxkCN2+VgQm6pTrrUQwlST7IcUxZXJNaGjmlbONtyyOa38RJUy1jfmnzqC3xhniAkQGgeKJf3lzJIkymJl91qvXoXbvsRJpcol/j9kjmPV0LLEhlYv1GxSWcOBEURlgXj5Hl7xHViko37yycOUL6BcQQrmUV3fzwRO2/HYOGAq9J3zUKAre6XFZWp6eETf2ktWyxlJqgHlwZflz9ciF/OH2sZd+UDTXfr/NleJ1V2lEIaDe6rF5MXQ0tDhwTXHFbIZCnXZ+3TPJukeq57IDO+/rwQ4M/s0dnvMNVzuVerelqlSULVDKE8HQ/+kb33vde+LCQnz5wZfiIfGXhPyb3Ni7y/k2jyrfA+4O5tE822VZMkz94jV5bqg0/gggGajd/w+Jk59lfmZFQF/b+8ECpcQt+SNPbcQe2zzjm/MaHlaOW8PL7ur3S6rlne4m1ufhv+xzs1GR/FZlqVn9+7GrlqvqMbUSynH3juVTjf7ovWQ9lfTdWz0EL9yRfF6sYieTlj8z7/lP86TMzXH/mT4KDpBedpveiqeC1s4X6NvvjGCwiVn4o9JWK+mcFRDhA3z00v/yy4IWeo3zPpV9/tNB4q/9zwzqFz/vbng1fUHWWINNsEy4fst+aR1+aVk8vNf7m0YX8u/2fmKbIC4wfQwuMzZFhkdkGOm6eWtu5onpaNzJw8+53zrbbTvF5zCCB7t/VZBNp60M0CmsNfvP0XuUyckoh0Cew147tBtdOfFqQ7YGKX2cdsOJjZzioIOL5wFTdXRR+jG/klPzvntkMLOu/1T0Gtjcv2JKrztb/YwwJ95vf/+iF1NXTX+O0iQH/61MJeHbyKf6Amv7O33PixMDUQ6YB7P5b9heS86+ZPzV1WJ+c/XeRi3mZ+/ghs+p+7m9H7KXc0KfAPoB++U0p4HY3P6M/Ne7Nfke6BDgMX9L
"text/plain": [
"<IPython.core.display.Image object>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"display(Image(f\"lab7props/{layers_names[0]}_stitched_filters.png\"))"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3.9.4 64-bit",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.4"
},
"vscode": {
"interpreter": {
"hash": "085c51388782ab7dcc7b32a500f9634129d1cddb82cd7a37058a5984251a0bc1"
}
}
},
"nbformat": 4,
"nbformat_minor": 5
}