7.4 MiB
7.4 MiB
Zadanie 7 - Alexnet
Aleksandra Jonas, Aleksandra Gronowska, Iwona Christop
Przygotowanie danych
from IPython.display import Image, SVG, display
import sys
import subprocess
import pkg_resources
import numpy as np
required = { 'scikit-image'}
installed = {pkg.key for pkg in pkg_resources.working_set}
missing = required - installed
# Alexnet requires images to be of dim = (227, 227, 3)
newSize = (227,227)
if missing:
python = sys.executable
subprocess.check_call([python, '-m', 'pip', 'install', *missing], stdout=subprocess.DEVNULL)
def load_train_data(input_dir):
import numpy as np
import pandas as pd
import os
from skimage.io import imread
import cv2 as cv
from pathlib import Path
import random
from shutil import copyfile, rmtree
import json
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib
image_dir = Path(input_dir)
categories_name = []
for file in os.listdir(image_dir):
d = os.path.join(image_dir, file)
if os.path.isdir(d):
categories_name.append(file)
folders = [directory for directory in image_dir.iterdir() if directory.is_dir()]
train_img = []
categories_count=[]
labels=[]
for i, direc in enumerate(folders):
count = 0
for obj in direc.iterdir():
if os.path.isfile(obj) and os.path.basename(os.path.normpath(obj)) != 'desktop.ini':
labels.append(os.path.basename(os.path.normpath(direc)))
count += 1
img = imread(obj)#zwraca ndarry postaci xSize x ySize x colorDepth
img = img[:, :, :3]
img = cv.resize(img, newSize, interpolation=cv.INTER_AREA)# zwraca ndarray
img = img / 255 #normalizacja
train_img.append(img)
categories_count.append(count)
X={}
X["values"] = np.array(train_img)
X["categories_name"] = categories_name
X["categories_count"] = categories_count
X["labels"]=labels
return X
def load_test_data(input_dir):
import numpy as np
import pandas as pd
import os
from skimage.io import imread
import cv2 as cv
from pathlib import Path
import random
from shutil import copyfile, rmtree
import json
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib
image_path = Path(input_dir)
labels_path = image_path.parents[0] / 'test_labels.json'
jsonString = labels_path.read_text()
objects = json.loads(jsonString)
categories_name = []
categories_count=[]
count = 0
c = objects[0]['value']
for e in objects:
if e['value'] != c:
categories_count.append(count)
c = e['value']
count = 1
else:
count += 1
if not e['value'] in categories_name:
categories_name.append(e['value'])
categories_count.append(count)
test_img = []
labels=[]
for e in objects:
p = image_path / e['filename']
img = imread(p)#zwraca ndarry postaci xSize x ySize x colorDepth
img = img[:, :, :3]
img = cv.resize(img, newSize, interpolation=cv.INTER_AREA)# zwraca ndarray
img = img / 255#normalizacja
test_img.append(img)
labels.append(e['value'])
X={}
X["values"] = np.array(test_img)
X["categories_name"] = categories_name
X["categories_count"] = categories_count
X["labels"]=labels
return X
# Data load
data_train = load_train_data("./train_test_sw/train_sw")
values_train = data_train['values']
labels_train = data_train['labels']
data_test = load_test_data("./train_test_sw/test_sw")
X_test = data_test['values']
y_test = data_test['labels']
from sklearn.model_selection import train_test_split
X_train, X_validate, y_train, y_validate = train_test_split(values_train, labels_train, test_size=0.2, random_state=42)
from sklearn.preprocessing import LabelEncoder
class_le = LabelEncoder()
y_train_enc = class_le.fit_transform(y_train)
y_validate_enc = class_le.fit_transform(y_validate)
y_test_enc = class_le.fit_transform(y_test)
import tensorflow as tf
train_ds = tf.data.Dataset.from_tensor_slices((X_train, y_train_enc))
validation_ds = tf.data.Dataset.from_tensor_slices((X_validate, y_validate_enc))
test_ds = tf.data.Dataset.from_tensor_slices((X_test, y_test_enc))
train_ds_size = tf.data.experimental.cardinality(train_ds).numpy()
test_ds_size = tf.data.experimental.cardinality(test_ds).numpy()
validation_ds_size = tf.data.experimental.cardinality(validation_ds).numpy()
print("Training data size:", train_ds_size)
print("Test data size:", test_ds_size)
print("Validation data size:", validation_ds_size)
Training data size: 820 Test data size: 259 Validation data size: 206
train_ds = (train_ds
.shuffle(buffer_size=train_ds_size)
.batch(batch_size=32, drop_remainder=True))
test_ds = (test_ds
.shuffle(buffer_size=train_ds_size)
.batch(batch_size=32, drop_remainder=True))
validation_ds = (validation_ds
.shuffle(buffer_size=train_ds_size)
.batch(batch_size=32, drop_remainder=True))
Model 1 - batch size = 32
from tensorflow import keras
import os
import time
model = keras.models.Sequential([
keras.layers.Conv2D(filters=96, kernel_size=(11,11), strides=(4,4), activation='relu', input_shape=(227,227,3)),
keras.layers.MaxPool2D(pool_size=(3,3), strides=(2,2)),
keras.layers.Conv2D(filters=256, kernel_size=(5,5), strides=(1,1), activation='relu', padding="same"),
keras.layers.MaxPool2D(pool_size=(3,3), strides=(2,2)),
keras.layers.Conv2D(filters=384, kernel_size=(3,3), strides=(1,1), activation='relu', padding="same"),
keras.layers.Conv2D(filters=384, kernel_size=(3,3), strides=(1,1), activation='relu', padding="same"),
keras.layers.Conv2D(filters=256, kernel_size=(3,3), strides=(1,1), activation='relu', padding="same"),
keras.layers.MaxPool2D(pool_size=(3,3), strides=(2,2)),
keras.layers.Flatten(),
keras.layers.Dense(4096, activation='relu'),
keras.layers.Dense(4096, activation='relu'),
keras.layers.Dense(10, activation='softmax')
])
root_logdir = os.path.join(os.curdir, "logs\\\\fit\\\\")
def get_run_logdir():
run_id = time.strftime("run_%Y_%m_%d-%H_%M_%S")
return os.path.join(root_logdir, run_id)
run_logdir = get_run_logdir()
tensorboard_cb = keras.callbacks.TensorBoard(run_logdir)
model.compile(loss='sparse_categorical_crossentropy', optimizer=tf.optimizers.SGD(lr=.001), metrics=['accuracy'])
model.summary()
WARNING:absl:`lr` is deprecated, please use `learning_rate` instead, or use the legacy optimizer, e.g.,tf.keras.optimizers.legacy.SGD.
Model: "sequential" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d (Conv2D) (None, 55, 55, 96) 34944 max_pooling2d (MaxPooling2D (None, 27, 27, 96) 0 ) conv2d_1 (Conv2D) (None, 27, 27, 256) 614656 max_pooling2d_1 (MaxPooling (None, 13, 13, 256) 0 2D) conv2d_2 (Conv2D) (None, 13, 13, 384) 885120 conv2d_3 (Conv2D) (None, 13, 13, 384) 1327488 conv2d_4 (Conv2D) (None, 13, 13, 256) 884992 max_pooling2d_2 (MaxPooling (None, 6, 6, 256) 0 2D) flatten (Flatten) (None, 9216) 0 dense (Dense) (None, 4096) 37752832 dense_1 (Dense) (None, 4096) 16781312 dense_2 (Dense) (None, 10) 40970 ================================================================= Total params: 58,322,314 Trainable params: 58,322,314 Non-trainable params: 0 _________________________________________________________________
model.fit(train_ds,
epochs=100,
validation_data=validation_ds,
validation_freq=1,
callbacks=[tensorboard_cb])
Epoch 1/100 25/25 [==============================] - 53s 2s/step - loss: 2.2516 - accuracy: 0.2150 - val_loss: 2.1732 - val_accuracy: 0.2656 Epoch 2/100 25/25 [==============================] - 40s 2s/step - loss: 1.9127 - accuracy: 0.2362 - val_loss: 1.6225 - val_accuracy: 0.2188 Epoch 3/100 25/25 [==============================] - 40s 2s/step - loss: 1.6217 - accuracy: 0.2612 - val_loss: 1.5785 - val_accuracy: 0.3438 Epoch 4/100 25/25 [==============================] - 41s 2s/step - loss: 1.5799 - accuracy: 0.3038 - val_loss: 1.5517 - val_accuracy: 0.2917 Epoch 5/100 25/25 [==============================] - 41s 2s/step - loss: 1.5565 - accuracy: 0.2988 - val_loss: 1.5266 - val_accuracy: 0.3750 Epoch 6/100 25/25 [==============================] - 41s 2s/step - loss: 1.4953 - accuracy: 0.3738 - val_loss: 1.6185 - val_accuracy: 0.3750 Epoch 7/100 25/25 [==============================] - 41s 2s/step - loss: 1.4873 - accuracy: 0.3613 - val_loss: 1.4834 - val_accuracy: 0.4219 Epoch 8/100 25/25 [==============================] - 41s 2s/step - loss: 1.4110 - accuracy: 0.4187 - val_loss: 1.3432 - val_accuracy: 0.4740 Epoch 9/100 25/25 [==============================] - 40s 2s/step - loss: 1.3515 - accuracy: 0.4363 - val_loss: 1.3292 - val_accuracy: 0.4688 Epoch 10/100 25/25 [==============================] - 40s 2s/step - loss: 1.3174 - accuracy: 0.4563 - val_loss: 1.3137 - val_accuracy: 0.4375 Epoch 11/100 25/25 [==============================] - 41s 2s/step - loss: 1.2342 - accuracy: 0.4950 - val_loss: 1.3598 - val_accuracy: 0.5260 Epoch 12/100 25/25 [==============================] - 40s 2s/step - loss: 1.2386 - accuracy: 0.4787 - val_loss: 1.1717 - val_accuracy: 0.5052 Epoch 13/100 25/25 [==============================] - 40s 2s/step - loss: 1.1891 - accuracy: 0.5025 - val_loss: 1.1926 - val_accuracy: 0.4896 Epoch 14/100 25/25 [==============================] - 41s 2s/step - loss: 1.1791 - accuracy: 0.4950 - val_loss: 1.1991 - val_accuracy: 0.4271 Epoch 15/100 25/25 [==============================] - 42s 2s/step - loss: 1.1458 - accuracy: 0.5275 - val_loss: 1.1069 - val_accuracy: 0.5104 Epoch 16/100 25/25 [==============================] - 40s 2s/step - loss: 1.0808 - accuracy: 0.5450 - val_loss: 1.0976 - val_accuracy: 0.5521 Epoch 17/100 25/25 [==============================] - 42s 2s/step - loss: 1.0491 - accuracy: 0.5650 - val_loss: 1.2781 - val_accuracy: 0.4740 Epoch 18/100 25/25 [==============================] - 42s 2s/step - loss: 1.0335 - accuracy: 0.5575 - val_loss: 1.1958 - val_accuracy: 0.5417 Epoch 19/100 25/25 [==============================] - 41s 2s/step - loss: 1.0198 - accuracy: 0.5675 - val_loss: 1.0505 - val_accuracy: 0.5729 Epoch 20/100 25/25 [==============================] - 40s 2s/step - loss: 0.9765 - accuracy: 0.5900 - val_loss: 1.0172 - val_accuracy: 0.5938 Epoch 21/100 25/25 [==============================] - 39s 2s/step - loss: 0.9793 - accuracy: 0.5913 - val_loss: 1.0369 - val_accuracy: 0.5365 Epoch 22/100 25/25 [==============================] - 42s 2s/step - loss: 0.9380 - accuracy: 0.6162 - val_loss: 1.0670 - val_accuracy: 0.5104 Epoch 23/100 25/25 [==============================] - 40s 2s/step - loss: 0.9409 - accuracy: 0.5813 - val_loss: 0.9529 - val_accuracy: 0.6094 Epoch 24/100 25/25 [==============================] - 44s 2s/step - loss: 0.8277 - accuracy: 0.6375 - val_loss: 1.2964 - val_accuracy: 0.4635 Epoch 25/100 25/25 [==============================] - 52s 2s/step - loss: 0.9220 - accuracy: 0.5938 - val_loss: 0.9610 - val_accuracy: 0.6042 Epoch 26/100 25/25 [==============================] - 39s 2s/step - loss: 0.8962 - accuracy: 0.6137 - val_loss: 1.0367 - val_accuracy: 0.5365 Epoch 27/100 25/25 [==============================] - 42s 2s/step - loss: 0.8650 - accuracy: 0.6237 - val_loss: 1.0654 - val_accuracy: 0.5156 Epoch 28/100 25/25 [==============================] - 45s 2s/step - loss: 0.8186 - accuracy: 0.6413 - val_loss: 0.9914 - val_accuracy: 0.6094 Epoch 29/100 25/25 [==============================] - 41s 2s/step - loss: 0.8347 - accuracy: 0.6313 - val_loss: 0.9955 - val_accuracy: 0.5990 Epoch 30/100 25/25 [==============================] - 42s 2s/step - loss: 0.7907 - accuracy: 0.6513 - val_loss: 0.9453 - val_accuracy: 0.6146 Epoch 31/100 25/25 [==============================] - 42s 2s/step - loss: 0.7743 - accuracy: 0.6675 - val_loss: 0.9493 - val_accuracy: 0.6042 Epoch 32/100 25/25 [==============================] - 41s 2s/step - loss: 0.7444 - accuracy: 0.6938 - val_loss: 0.9506 - val_accuracy: 0.6146 Epoch 33/100 25/25 [==============================] - 42s 2s/step - loss: 0.7630 - accuracy: 0.6525 - val_loss: 0.8973 - val_accuracy: 0.6354 Epoch 34/100 25/25 [==============================] - 46s 2s/step - loss: 0.7529 - accuracy: 0.6850 - val_loss: 0.9552 - val_accuracy: 0.5833 Epoch 35/100 25/25 [==============================] - 39s 2s/step - loss: 0.6825 - accuracy: 0.7063 - val_loss: 1.0233 - val_accuracy: 0.5729 Epoch 36/100 25/25 [==============================] - 39s 2s/step - loss: 0.6654 - accuracy: 0.7287 - val_loss: 0.9992 - val_accuracy: 0.6250 Epoch 37/100 25/25 [==============================] - 39s 2s/step - loss: 0.7306 - accuracy: 0.7075 - val_loss: 1.1470 - val_accuracy: 0.5833 Epoch 38/100 25/25 [==============================] - 40s 2s/step - loss: 0.6823 - accuracy: 0.7237 - val_loss: 0.9150 - val_accuracy: 0.6406 Epoch 39/100 25/25 [==============================] - 41s 2s/step - loss: 0.7293 - accuracy: 0.6900 - val_loss: 0.9105 - val_accuracy: 0.6719 Epoch 40/100 25/25 [==============================] - 39s 2s/step - loss: 0.6359 - accuracy: 0.7225 - val_loss: 0.8538 - val_accuracy: 0.6823 Epoch 41/100 25/25 [==============================] - 38s 2s/step - loss: 0.6523 - accuracy: 0.7287 - val_loss: 1.5683 - val_accuracy: 0.5417 Epoch 42/100 25/25 [==============================] - 39s 2s/step - loss: 0.6885 - accuracy: 0.7237 - val_loss: 0.9864 - val_accuracy: 0.6458 Epoch 43/100 25/25 [==============================] - 37s 1s/step - loss: 0.5583 - accuracy: 0.7775 - val_loss: 0.9455 - val_accuracy: 0.6198 Epoch 44/100 25/25 [==============================] - 38s 1s/step - loss: 0.5613 - accuracy: 0.7588 - val_loss: 0.8001 - val_accuracy: 0.6771 Epoch 45/100 25/25 [==============================] - 39s 2s/step - loss: 0.5904 - accuracy: 0.7850 - val_loss: 0.8891 - val_accuracy: 0.6719 Epoch 46/100 25/25 [==============================] - 40s 2s/step - loss: 0.5847 - accuracy: 0.7600 - val_loss: 0.7383 - val_accuracy: 0.7135 Epoch 47/100 25/25 [==============================] - 40s 2s/step - loss: 0.5609 - accuracy: 0.7650 - val_loss: 0.9535 - val_accuracy: 0.6354 Epoch 48/100 25/25 [==============================] - 41s 2s/step - loss: 0.5933 - accuracy: 0.7700 - val_loss: 0.8282 - val_accuracy: 0.6823 Epoch 49/100 25/25 [==============================] - 40s 2s/step - loss: 0.5148 - accuracy: 0.7862 - val_loss: 1.0083 - val_accuracy: 0.6094 Epoch 50/100 25/25 [==============================] - 41s 2s/step - loss: 0.5318 - accuracy: 0.7763 - val_loss: 0.7793 - val_accuracy: 0.6927 Epoch 51/100 25/25 [==============================] - 41s 2s/step - loss: 0.4546 - accuracy: 0.8138 - val_loss: 0.7321 - val_accuracy: 0.7396 Epoch 52/100 25/25 [==============================] - 41s 2s/step - loss: 0.4888 - accuracy: 0.7987 - val_loss: 0.7415 - val_accuracy: 0.7292 Epoch 53/100 25/25 [==============================] - 41s 2s/step - loss: 0.4525 - accuracy: 0.8263 - val_loss: 1.1359 - val_accuracy: 0.5938 Epoch 54/100 25/25 [==============================] - 41s 2s/step - loss: 0.4635 - accuracy: 0.8100 - val_loss: 0.8153 - val_accuracy: 0.7083 Epoch 55/100 25/25 [==============================] - 40s 2s/step - loss: 0.3715 - accuracy: 0.8587 - val_loss: 0.8006 - val_accuracy: 0.7083 Epoch 56/100 25/25 [==============================] - 40s 2s/step - loss: 0.7150 - accuracy: 0.7650 - val_loss: 0.6763 - val_accuracy: 0.7604 Epoch 57/100 25/25 [==============================] - 40s 2s/step - loss: 0.4236 - accuracy: 0.8400 - val_loss: 1.2931 - val_accuracy: 0.5625 Epoch 58/100 25/25 [==============================] - 40s 2s/step - loss: 0.4233 - accuracy: 0.8338 - val_loss: 0.7108 - val_accuracy: 0.7188 Epoch 59/100 25/25 [==============================] - 39s 2s/step - loss: 0.4240 - accuracy: 0.8263 - val_loss: 0.8515 - val_accuracy: 0.7656 Epoch 60/100 25/25 [==============================] - 40s 2s/step - loss: 0.2996 - accuracy: 0.8913 - val_loss: 1.1627 - val_accuracy: 0.6719 Epoch 61/100 25/25 [==============================] - 41s 2s/step - loss: 0.4147 - accuracy: 0.8438 - val_loss: 0.8675 - val_accuracy: 0.7656 Epoch 62/100 25/25 [==============================] - 40s 2s/step - loss: 0.3373 - accuracy: 0.8575 - val_loss: 1.8903 - val_accuracy: 0.5260 Epoch 63/100 25/25 [==============================] - 41s 2s/step - loss: 0.3147 - accuracy: 0.8913 - val_loss: 0.9597 - val_accuracy: 0.6927 Epoch 64/100 25/25 [==============================] - 40s 2s/step - loss: 0.4258 - accuracy: 0.8625 - val_loss: 0.7272 - val_accuracy: 0.7448 Epoch 65/100 25/25 [==============================] - 40s 2s/step - loss: 0.2791 - accuracy: 0.8950 - val_loss: 0.7932 - val_accuracy: 0.7396 Epoch 66/100 25/25 [==============================] - 40s 2s/step - loss: 0.2791 - accuracy: 0.8963 - val_loss: 1.1467 - val_accuracy: 0.6823 Epoch 67/100 25/25 [==============================] - 41s 2s/step - loss: 0.2417 - accuracy: 0.9050 - val_loss: 0.8308 - val_accuracy: 0.7344 Epoch 68/100 25/25 [==============================] - 43s 2s/step - loss: 0.4000 - accuracy: 0.8725 - val_loss: 0.8193 - val_accuracy: 0.6875 Epoch 69/100 25/25 [==============================] - 41s 2s/step - loss: 0.2515 - accuracy: 0.9162 - val_loss: 0.8325 - val_accuracy: 0.7396 Epoch 70/100 25/25 [==============================] - 40s 2s/step - loss: 0.2121 - accuracy: 0.9187 - val_loss: 0.9849 - val_accuracy: 0.7240 Epoch 71/100 25/25 [==============================] - 41s 2s/step - loss: 0.1987 - accuracy: 0.9262 - val_loss: 0.8387 - val_accuracy: 0.7760 Epoch 72/100 25/25 [==============================] - 40s 2s/step - loss: 0.2786 - accuracy: 0.8975 - val_loss: 0.7462 - val_accuracy: 0.7917 Epoch 73/100 25/25 [==============================] - 40s 2s/step - loss: 0.1309 - accuracy: 0.9625 - val_loss: 1.0813 - val_accuracy: 0.7448 Epoch 74/100 25/25 [==============================] - 40s 2s/step - loss: 0.3271 - accuracy: 0.9013 - val_loss: 0.7063 - val_accuracy: 0.7604 Epoch 75/100 25/25 [==============================] - 40s 2s/step - loss: 0.2142 - accuracy: 0.9187 - val_loss: 0.9269 - val_accuracy: 0.7708 Epoch 76/100 25/25 [==============================] - 40s 2s/step - loss: 0.1603 - accuracy: 0.9438 - val_loss: 0.8590 - val_accuracy: 0.7448 Epoch 77/100 25/25 [==============================] - 42s 2s/step - loss: 0.1331 - accuracy: 0.9513 - val_loss: 1.0895 - val_accuracy: 0.7083 Epoch 78/100 25/25 [==============================] - 41s 2s/step - loss: 0.1177 - accuracy: 0.9638 - val_loss: 1.0417 - val_accuracy: 0.7500 Epoch 79/100 25/25 [==============================] - 41s 2s/step - loss: 0.4039 - accuracy: 0.8775 - val_loss: 0.8521 - val_accuracy: 0.7240 Epoch 80/100 25/25 [==============================] - 41s 2s/step - loss: 0.1795 - accuracy: 0.9488 - val_loss: 0.9234 - val_accuracy: 0.7344 Epoch 81/100 25/25 [==============================] - 40s 2s/step - loss: 0.0812 - accuracy: 0.9850 - val_loss: 0.9745 - val_accuracy: 0.7656 Epoch 82/100 25/25 [==============================] - 40s 2s/step - loss: 0.0976 - accuracy: 0.9625 - val_loss: 1.0829 - val_accuracy: 0.7500 Epoch 83/100 25/25 [==============================] - 40s 2s/step - loss: 0.1220 - accuracy: 0.9513 - val_loss: 1.8248 - val_accuracy: 0.5885 Epoch 84/100 25/25 [==============================] - 40s 2s/step - loss: 0.7078 - accuracy: 0.8188 - val_loss: 0.7013 - val_accuracy: 0.7552 Epoch 85/100 25/25 [==============================] - 41s 2s/step - loss: 0.1607 - accuracy: 0.9613 - val_loss: 0.8556 - val_accuracy: 0.7656 Epoch 86/100 25/25 [==============================] - 41s 2s/step - loss: 0.0831 - accuracy: 0.9812 - val_loss: 1.0032 - val_accuracy: 0.7552 Epoch 87/100 25/25 [==============================] - 40s 2s/step - loss: 0.1305 - accuracy: 0.9575 - val_loss: 0.8887 - val_accuracy: 0.7604 Epoch 88/100 25/25 [==============================] - 40s 2s/step - loss: 0.0602 - accuracy: 0.9825 - val_loss: 1.1799 - val_accuracy: 0.7135 Epoch 89/100 25/25 [==============================] - 41s 2s/step - loss: 0.0469 - accuracy: 0.9887 - val_loss: 1.1216 - val_accuracy: 0.7396 Epoch 90/100 25/25 [==============================] - 40s 2s/step - loss: 0.1920 - accuracy: 0.9325 - val_loss: 1.2466 - val_accuracy: 0.6927 Epoch 91/100 25/25 [==============================] - 41s 2s/step - loss: 0.2436 - accuracy: 0.9275 - val_loss: 0.8773 - val_accuracy: 0.7656 Epoch 92/100 25/25 [==============================] - 41s 2s/step - loss: 0.0651 - accuracy: 0.9887 - val_loss: 1.0198 - val_accuracy: 0.7448 Epoch 93/100 25/25 [==============================] - 41s 2s/step - loss: 0.0424 - accuracy: 0.9912 - val_loss: 1.1194 - val_accuracy: 0.7448 Epoch 94/100 25/25 [==============================] - 41s 2s/step - loss: 0.0291 - accuracy: 0.9975 - val_loss: 1.2345 - val_accuracy: 0.7292 Epoch 95/100 25/25 [==============================] - 43s 2s/step - loss: 0.0237 - accuracy: 1.0000 - val_loss: 1.2051 - val_accuracy: 0.7292 Epoch 96/100 25/25 [==============================] - 43s 2s/step - loss: 0.9514 - accuracy: 0.8100 - val_loss: 0.9362 - val_accuracy: 0.6562 Epoch 97/100 25/25 [==============================] - 40s 2s/step - loss: 0.3801 - accuracy: 0.8913 - val_loss: 0.8821 - val_accuracy: 0.7396 Epoch 98/100 25/25 [==============================] - 39s 2s/step - loss: 0.1676 - accuracy: 0.9588 - val_loss: 0.8548 - val_accuracy: 0.7292 Epoch 99/100 25/25 [==============================] - 39s 2s/step - loss: 0.0977 - accuracy: 0.9812 - val_loss: 1.1352 - val_accuracy: 0.7135 Epoch 100/100 25/25 [==============================] - 39s 2s/step - loss: 0.1027 - accuracy: 0.9663 - val_loss: 0.8968 - val_accuracy: 0.8021
<keras.callbacks.History at 0x18ee5f3b490>
model.evaluate(test_ds)
8/8 [==============================] - 7s 327ms/step - loss: 1.0574 - accuracy: 0.7070
[1.0574449300765991, 0.70703125]
Wizualizacja filtrów obrazowych na poszczególnych warstwach
layers_names = []
for layer in model.layers:
# check for convolutional layer
if 'conv' not in layer.name:
continue
layers_names.append(layer.name)
filters, biases = layer.get_weights()
print(layer.name, filters.shape)
conv2d (11, 11, 3, 96) conv2d_1 (5, 5, 96, 256) conv2d_2 (3, 3, 256, 384) conv2d_3 (3, 3, 384, 384) conv2d_4 (3, 3, 384, 256)
import matplotlib.pyplot as plt
filters, biases = model.layers[0].get_weights()
fmin, fmax = filters.min(), filters.max()
filters = (filters - fmin) / (fmax - fmin)
nb_filters, tmp = 3, 1
for i in range(nb_filters):
f = filters[:, :, :, i]
for j in range(3):
ax = plt.subplot(nb_filters, 3, tmp)
ax.set_xticks([])
ax.set_yticks([])
plt.imshow(f[:, :, j], cmap='gray')
tmp += 1
plt.show()
img_width, img_height = 227, 227
layer = model.get_layer(name=layers_names[0])
feature_extractor = keras.Model(inputs=model.inputs, outputs=layer.output)
def initialize_image():
# random noisy img
img = tf.random.uniform((1, img_width, img_height, 3))
return img
def loss_calc(input_image, filter_index, fex):
activation = fex(input_image)
# getting rid of the border pixels so they don't inlfuence our results in any fun way
filter_activation = activation[:, 2:-2, 2:-2, filter_index]
return tf.reduce_mean(filter_activation)
def gradient_ascent_step(img, filter_index, learning_rate, fex):
with tf.GradientTape() as tape:
tape.watch(img)
loss = loss_calc(img, filter_index, fex)
grads = tape.gradient(loss, img)
grads = tf.math.l2_normalize(grads)
img += learning_rate * grads
return loss, img
def deprocess_image(img):
img -= img.mean()
img /= img.std() + 1e-5
img *= 0.15
img = img[25:-25, 25:-25, :]
img += 0.5
img = np.clip(img, 0, 1)
img *= 255
img = np.clip(img, 0, 255).astype("uint8")
return img
def visualize_filter(filter_index, fex):
# 20 steps grad desc
iterations = 30
learning_rate = 10.0
img = initialize_image()
for iteration in range(iterations):
loss, img = gradient_ascent_step(img, filter_index, learning_rate, fex)
img = deprocess_image(img[0].numpy())
return loss, img
loss, img = visualize_filter(0, feature_extractor)
keras.preprocessing.image.save_img("lab7props/0.png", img)
display(Image("lab7props/0.png"))
def visualize_layer(layer_name, n):
from IPython.display import Image, display
layer = model.get_layer(name=layer_name)
fex = keras.Model(inputs=model.inputs, outputs=layer.output)
print(f"Getting started with layer {layer_name}")
all_imgs = []
for filter_index in range(n**2):
# print("Processing filter %d" % (filter_index,))
loss, img = visualize_filter(filter_index, fex)
all_imgs.append(img)
margin = 5
cropped_width = img_width - 25 * 2
cropped_height = img_height - 25 * 2
width = n * cropped_width + (n - 1) * margin
height = n * cropped_height + (n - 1) * margin
stitched_filters = np.zeros((width, height, 3))
for i in range(n):
for j in range(n):
img = all_imgs[i * n + j]
stitched_filters[
(cropped_width + margin) * i : (cropped_width + margin) * i + cropped_width,
(cropped_height + margin) * j : (cropped_height + margin) * j
+ cropped_height,
:,
] = img
filename = f"lab7props/{layer_name}_stitched_filters.png"
keras.preprocessing.image.save_img(filename, stitched_filters)
print(f"{layer_name} done")
layers_names
['conv2d', 'conv2d_1', 'conv2d_2', 'conv2d_3', 'conv2d_4']
for name in layers_names:
visualize_layer(name, 8)
Getting started with layer conv2d conv2d done Getting started with layer conv2d_1 conv2d_1 done Getting started with layer conv2d_2 conv2d_2 done Getting started with layer conv2d_3 conv2d_3 done Getting started with layer conv2d_4 conv2d_4 done
display(Image(f"lab7props/{layers_names[0]}_stitched_filters.png"))