Symulowanie-wizualne/sw_lab9-10_3.ipynb

754 lines
163 KiB
Plaintext
Raw Normal View History

2023-01-11 22:52:15 +01:00
{
"cells": [
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"### Aleksandra Jonas, Aleksandra Gronowka, Iwona Christop"
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"## Przygotowanie danych"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"import cv2 as cv\n",
"import matplotlib.pyplot as plt\n",
"import numpy as np\n",
"import json\n",
"from tensorflow import keras\n",
"%matplotlib inline"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [],
"source": [
"import sys\n",
"import subprocess\n",
"import pkg_resources\n",
"import numpy as np\n",
"peachy = []\n",
"\n",
"required = { 'scikit-image'}\n",
"installed = {pkg.key for pkg in pkg_resources.working_set}\n",
"missing = required - installed\n",
"\n",
"if missing: \n",
" python = sys.executable\n",
" subprocess.check_call([python, '-m', 'pip', 'install', *missing], stdout=subprocess.DEVNULL)\n",
"\n",
"def load_train_data(input_dir, newSize=(227,227)):\n",
" import numpy as np\n",
" import pandas as pd\n",
" import os\n",
" from skimage.io import imread\n",
" import cv2 as cv\n",
" from pathlib import Path\n",
" import random\n",
" from shutil import copyfile, rmtree\n",
" import json\n",
"\n",
" import seaborn as sns\n",
" import matplotlib.pyplot as plt\n",
"\n",
" import matplotlib\n",
" \n",
" image_dir = Path(input_dir)\n",
" categories_name = []\n",
" for file in os.listdir(image_dir):\n",
" d = os.path.join(image_dir, file)\n",
" if os.path.isdir(d):\n",
" categories_name.append(file)\n",
"\n",
" folders = [directory for directory in image_dir.iterdir() if directory.is_dir()]\n",
"\n",
" train_img = []\n",
" categories_count=[]\n",
" labels=[]\n",
" for i, direc in enumerate(folders):\n",
" count = 0\n",
" \n",
" for obj in direc.iterdir():\n",
" if os.path.isfile(obj) and os.path.basename(os.path.normpath(obj)) != 'desktop.ini':\n",
" labels.append(os.path.basename(os.path.normpath(direc)))\n",
" count += 1\n",
" img = imread(obj)#zwraca ndarry postaci xSize x ySize x colorDepth\n",
" if img.shape[-1] == 256:\n",
" img = np.repeat(img[..., np.newaxis], 3, axis=2)\n",
" elif img.shape[-1] == 4:\n",
" img = img[:, :, :3]\n",
" img = cv.resize(img, newSize, interpolation=cv.INTER_AREA)# zwraca ndarray\n",
" img = img / 255#normalizacja\n",
" train_img.append(img)\n",
" categories_count.append(count)\n",
" X={}\n",
" X[\"values\"] = np.array(train_img)\n",
" X[\"categories_name\"] = categories_name\n",
" X[\"categories_count\"] = categories_count\n",
" X[\"labels\"]=labels\n",
" return X\n",
"\n",
"def load_test_data(input_dir, newSize=(227,227)):\n",
" import numpy as np\n",
" import pandas as pd\n",
" import os\n",
" from skimage.io import imread\n",
" import cv2 as cv\n",
" from pathlib import Path\n",
" import random\n",
" from shutil import copyfile, rmtree\n",
" import json\n",
"\n",
" import seaborn as sns\n",
" import matplotlib.pyplot as plt\n",
"\n",
" import matplotlib\n",
"\n",
" image_path = Path(input_dir)\n",
"\n",
" labels_path = image_path.parents[0] / 'test_labels.json'\n",
"\n",
" jsonString = labels_path.read_text()\n",
" objects = json.loads(jsonString)\n",
"\n",
" categories_name = []\n",
" categories_count=[]\n",
" count = 0\n",
" c = objects[0]['value']\n",
" for e in objects:\n",
" if e['value'] != c:\n",
" categories_count.append(count)\n",
" c = e['value']\n",
" count = 1\n",
" else:\n",
" count += 1\n",
" if not e['value'] in categories_name:\n",
" categories_name.append(e['value'])\n",
"\n",
" categories_count.append(count)\n",
" \n",
" test_img = []\n",
"\n",
" labels=[]\n",
" for e in objects:\n",
" p = image_path / e['filename']\n",
" img = imread(p)#zwraca ndarry postaci xSize x ySize x colorDepth\n",
" if img.shape[-1] == 4:\n",
" img = img[:, :, :3]\n",
" img = cv.resize(img, newSize, interpolation=cv.INTER_AREA)# zwraca ndarray\n",
" img = img / 255#normalizacja\n",
" test_img.append(img)\n",
" labels.append(e['value'])\n",
"\n",
" X={}\n",
" X[\"values\"] = np.array(test_img)\n",
" X[\"categories_name\"] = categories_name\n",
" X[\"categories_count\"] = categories_count\n",
" X[\"labels\"]=labels\n",
" return X\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [],
"source": [
"from sklearn.model_selection import train_test_split\n",
"from sklearn.preprocessing import LabelEncoder\n",
"import tensorflow as tf\n",
"\n",
"data_train = load_train_data(f\"./train_test_sw/train_sw_unity\")\n",
"values_train = data_train['values']\n",
"labels_train = data_train['labels']\n",
"data_test = load_test_data(\"./train_test_sw/test_sw\")\n",
"X_test = data_test['values']\n",
"y_test = data_test['labels']\n",
"\n",
"X_train, X_validate, y_train, y_validate = train_test_split(values_train, labels_train, test_size=0.2, random_state=42)\n",
"\n",
"class_le = LabelEncoder()\n",
"y_train_enc = class_le.fit_transform(y_train)\n",
"y_validate_enc = class_le.fit_transform(y_validate)\n",
"y_test_enc = class_le.fit_transform(y_test)\n",
"\n",
"train_ds = tf.data.Dataset.from_tensor_slices((X_train, y_train_enc))\n",
"validation_ds = tf.data.Dataset.from_tensor_slices((X_validate, y_validate_enc))\n",
"test_ds = tf.data.Dataset.from_tensor_slices((X_test, y_test_enc))\n",
"\n",
"train_ds_size = tf.data.experimental.cardinality(train_ds).numpy()\n",
"test_ds_size = tf.data.experimental.cardinality(test_ds).numpy()\n",
"validation_ds_size = tf.data.experimental.cardinality(validation_ds).numpy()\n",
"\n",
"train_ds = (train_ds\n",
" .shuffle(buffer_size=train_ds_size)\n",
" .batch(batch_size=32, drop_remainder=True))\n",
"test_ds = (test_ds\n",
" .shuffle(buffer_size=train_ds_size)\n",
" .batch(batch_size=32, drop_remainder=True))\n",
"validation_ds = (validation_ds\n",
" .shuffle(buffer_size=train_ds_size)\n",
" .batch(batch_size=32, drop_remainder=True))"
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"## AlexNet"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [],
"source": [
"from keras.callbacks import ModelCheckpoint, EarlyStopping\n",
"import matplotlib.pyplot as plt\n",
"import tensorflow as tf\n",
"\n",
"alexnet = keras.models.Sequential([\n",
"keras.layers.Conv2D(filters=96, kernel_size=(11,11), strides=(4,4), activation='relu', input_shape=(227,227,3)),\n",
"keras.layers.BatchNormalization(),\n",
"keras.layers.MaxPool2D(pool_size=(3,3), strides=(2,2)),\n",
"keras.layers.Conv2D(filters=256, kernel_size=(5,5), strides=(1,1), activation='relu', padding=\"same\"),\n",
"keras.layers.BatchNormalization(),\n",
"keras.layers.MaxPool2D(pool_size=(3,3), strides=(2,2)),\n",
"keras.layers.Conv2D(filters=384, kernel_size=(3,3), strides=(1,1), activation='relu', padding=\"same\"),\n",
"keras.layers.BatchNormalization(),\n",
"keras.layers.Conv2D(filters=384, kernel_size=(3,3), strides=(1,1), activation='relu', padding=\"same\"),\n",
"keras.layers.BatchNormalization(),\n",
"keras.layers.Conv2D(filters=256, kernel_size=(3,3), strides=(1,1), activation='relu', padding=\"same\"),\n",
"keras.layers.BatchNormalization(),\n",
"keras.layers.MaxPool2D(pool_size=(3,3), strides=(2,2)),\n",
"keras.layers.Flatten(),\n",
"keras.layers.Dense(4096, activation='relu'),\n",
"keras.layers.Dropout(.5),\n",
"keras.layers.Dense(4096, activation='relu'),\n",
"keras.layers.Dropout(.5),\n",
"keras.layers.Dense(10, activation='softmax')\n",
"])"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"WARNING:absl:`lr` is deprecated, please use `learning_rate` instead, or use the legacy optimizer, e.g.,tf.keras.optimizers.legacy.SGD.\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Model: \"sequential\"\n",
"_________________________________________________________________\n",
" Layer (type) Output Shape Param # \n",
"=================================================================\n",
" conv2d (Conv2D) (None, 55, 55, 96) 34944 \n",
" \n",
" batch_normalization (BatchN (None, 55, 55, 96) 384 \n",
" ormalization) \n",
" \n",
" max_pooling2d (MaxPooling2D (None, 27, 27, 96) 0 \n",
" ) \n",
" \n",
" conv2d_1 (Conv2D) (None, 27, 27, 256) 614656 \n",
" \n",
" batch_normalization_1 (Batc (None, 27, 27, 256) 1024 \n",
" hNormalization) \n",
" \n",
" max_pooling2d_1 (MaxPooling (None, 13, 13, 256) 0 \n",
" 2D) \n",
" \n",
" conv2d_2 (Conv2D) (None, 13, 13, 384) 885120 \n",
" \n",
" batch_normalization_2 (Batc (None, 13, 13, 384) 1536 \n",
" hNormalization) \n",
" \n",
" conv2d_3 (Conv2D) (None, 13, 13, 384) 1327488 \n",
" \n",
" batch_normalization_3 (Batc (None, 13, 13, 384) 1536 \n",
" hNormalization) \n",
" \n",
" conv2d_4 (Conv2D) (None, 13, 13, 256) 884992 \n",
" \n",
" batch_normalization_4 (Batc (None, 13, 13, 256) 1024 \n",
" hNormalization) \n",
" \n",
" max_pooling2d_2 (MaxPooling (None, 6, 6, 256) 0 \n",
" 2D) \n",
" \n",
" flatten (Flatten) (None, 9216) 0 \n",
" \n",
" dense (Dense) (None, 4096) 37752832 \n",
" \n",
" dropout (Dropout) (None, 4096) 0 \n",
" \n",
" dense_1 (Dense) (None, 4096) 16781312 \n",
" \n",
" dropout_1 (Dropout) (None, 4096) 0 \n",
" \n",
" dense_2 (Dense) (None, 10) 40970 \n",
" \n",
"=================================================================\n",
"Total params: 58,327,818\n",
"Trainable params: 58,325,066\n",
"Non-trainable params: 2,752\n",
"_________________________________________________________________\n"
]
}
],
"source": [
"alexnet.compile(loss='sparse_categorical_crossentropy', optimizer=tf.optimizers.SGD(lr=.001), metrics=['accuracy'])\n",
"alexnet.summary()"
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"WARNING:tensorflow:`period` argument is deprecated. Please use `save_freq` to specify the frequency in number of batches seen.\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"WARNING:tensorflow:`period` argument is deprecated. Please use `save_freq` to specify the frequency in number of batches seen.\n",
"/var/folders/6b/j4d60ym516x2s6wymzj707rh0000gn/T/ipykernel_37874/1998863165.py:4: UserWarning: `Model.fit_generator` is deprecated and will be removed in a future version. Please use `Model.fit`, which supports generators.\n",
" alex = alexnet.fit_generator(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Epoch 1/25\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"2023-01-11 20:31:44.007163: W tensorflow/tsl/platform/profile_utils/cpu_utils.cc:128] Failed to get CPU frequency: 0 Hz\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"50/50 [==============================] - ETA: 0s - loss: 3.8100 - accuracy: 0.3950\n",
"Epoch 1: val_accuracy improved from -inf to 0.29688, saving model to alex_2.h5\n",
"50/50 [==============================] - 46s 896ms/step - loss: 3.8100 - accuracy: 0.3950 - val_loss: 1.8302 - val_accuracy: 0.2969\n",
"Epoch 2/25\n",
"50/50 [==============================] - ETA: 0s - loss: 1.3961 - accuracy: 0.5281\n",
"Epoch 2: val_accuracy improved from 0.29688 to 0.38281, saving model to alex_2.h5\n",
"50/50 [==============================] - 46s 918ms/step - loss: 1.3961 - accuracy: 0.5281 - val_loss: 1.9363 - val_accuracy: 0.3828\n",
"Epoch 3/25\n",
"50/50 [==============================] - ETA: 0s - loss: 1.0805 - accuracy: 0.5956\n",
"Epoch 3: val_accuracy did not improve from 0.38281\n",
"50/50 [==============================] - 48s 955ms/step - loss: 1.0805 - accuracy: 0.5956 - val_loss: 2.2350 - val_accuracy: 0.3438\n",
"Epoch 4/25\n",
"50/50 [==============================] - ETA: 0s - loss: 0.8866 - accuracy: 0.6600\n",
"Epoch 4: val_accuracy did not improve from 0.38281\n",
"50/50 [==============================] - 60s 1s/step - loss: 0.8866 - accuracy: 0.6600 - val_loss: 2.0590 - val_accuracy: 0.3203\n",
"Epoch 5/25\n",
"50/50 [==============================] - ETA: 0s - loss: 0.7940 - accuracy: 0.6981\n",
"Epoch 5: val_accuracy did not improve from 0.38281\n",
"50/50 [==============================] - 74s 1s/step - loss: 0.7940 - accuracy: 0.6981 - val_loss: 2.4437 - val_accuracy: 0.3672\n",
"Epoch 6/25\n",
"50/50 [==============================] - ETA: 0s - loss: 0.6635 - accuracy: 0.7325\n",
"Epoch 6: val_accuracy improved from 0.38281 to 0.45833, saving model to alex_2.h5\n",
"50/50 [==============================] - 68s 1s/step - loss: 0.6635 - accuracy: 0.7325 - val_loss: 1.8824 - val_accuracy: 0.4583\n",
"Epoch 7/25\n",
"50/50 [==============================] - ETA: 0s - loss: 0.6460 - accuracy: 0.7406\n",
"Epoch 7: val_accuracy improved from 0.45833 to 0.49479, saving model to alex_2.h5\n",
"50/50 [==============================] - 67s 1s/step - loss: 0.6460 - accuracy: 0.7406 - val_loss: 1.3159 - val_accuracy: 0.4948\n",
"Epoch 8/25\n",
"50/50 [==============================] - ETA: 0s - loss: 0.5407 - accuracy: 0.7837\n",
"Epoch 8: val_accuracy improved from 0.49479 to 0.62500, saving model to alex_2.h5\n",
"50/50 [==============================] - 70s 1s/step - loss: 0.5407 - accuracy: 0.7837 - val_loss: 0.9668 - val_accuracy: 0.6250\n",
"Epoch 9/25\n",
"50/50 [==============================] - ETA: 0s - loss: 0.4979 - accuracy: 0.7994\n",
"Epoch 9: val_accuracy did not improve from 0.62500\n",
"50/50 [==============================] - 66s 1s/step - loss: 0.4979 - accuracy: 0.7994 - val_loss: 1.1679 - val_accuracy: 0.5677\n",
"Epoch 10/25\n",
"50/50 [==============================] - ETA: 0s - loss: 0.4673 - accuracy: 0.8256\n",
"Epoch 10: val_accuracy improved from 0.62500 to 0.74479, saving model to alex_2.h5\n",
"50/50 [==============================] - 69s 1s/step - loss: 0.4673 - accuracy: 0.8256 - val_loss: 0.6585 - val_accuracy: 0.7448\n",
"Epoch 11/25\n",
"50/50 [==============================] - ETA: 0s - loss: 0.4136 - accuracy: 0.8313\n",
"Epoch 11: val_accuracy did not improve from 0.74479\n",
"50/50 [==============================] - 84s 2s/step - loss: 0.4136 - accuracy: 0.8313 - val_loss: 0.8328 - val_accuracy: 0.7188\n",
"Epoch 12/25\n",
"50/50 [==============================] - ETA: 0s - loss: 0.3804 - accuracy: 0.8519\n",
"Epoch 12: val_accuracy improved from 0.74479 to 0.76302, saving model to alex_2.h5\n",
"50/50 [==============================] - 80s 2s/step - loss: 0.3804 - accuracy: 0.8519 - val_loss: 0.6793 - val_accuracy: 0.7630\n",
"Epoch 13/25\n",
"50/50 [==============================] - ETA: 0s - loss: 0.3550 - accuracy: 0.8587\n",
"Epoch 13: val_accuracy did not improve from 0.76302\n",
"50/50 [==============================] - 81s 2s/step - loss: 0.3550 - accuracy: 0.8587 - val_loss: 0.6221 - val_accuracy: 0.7630\n",
"Epoch 14/25\n",
"50/50 [==============================] - ETA: 0s - loss: 0.3337 - accuracy: 0.8744\n",
"Epoch 14: val_accuracy did not improve from 0.76302\n",
"50/50 [==============================] - 96s 2s/step - loss: 0.3337 - accuracy: 0.8744 - val_loss: 0.6317 - val_accuracy: 0.7578\n",
"Epoch 15/25\n",
"50/50 [==============================] - ETA: 0s - loss: 0.2860 - accuracy: 0.8950\n",
"Epoch 15: val_accuracy improved from 0.76302 to 0.79167, saving model to alex_2.h5\n",
"50/50 [==============================] - 79s 2s/step - loss: 0.2860 - accuracy: 0.8950 - val_loss: 0.6067 - val_accuracy: 0.7917\n",
"Epoch 16/25\n",
"50/50 [==============================] - ETA: 0s - loss: 0.2721 - accuracy: 0.8881\n",
"Epoch 16: val_accuracy improved from 0.79167 to 0.81250, saving model to alex_2.h5\n",
"50/50 [==============================] - 77s 2s/step - loss: 0.2721 - accuracy: 0.8881 - val_loss: 0.5126 - val_accuracy: 0.8125\n",
"Epoch 17/25\n",
"50/50 [==============================] - ETA: 0s - loss: 0.2564 - accuracy: 0.8969\n",
"Epoch 17: val_accuracy improved from 0.81250 to 0.81510, saving model to alex_2.h5\n",
"50/50 [==============================] - 78s 2s/step - loss: 0.2564 - accuracy: 0.8969 - val_loss: 0.5017 - val_accuracy: 0.8151\n",
"Epoch 18/25\n",
"50/50 [==============================] - ETA: 0s - loss: 0.2534 - accuracy: 0.8981\n",
"Epoch 18: val_accuracy improved from 0.81510 to 0.82292, saving model to alex_2.h5\n",
"50/50 [==============================] - 72s 1s/step - loss: 0.2534 - accuracy: 0.8981 - val_loss: 0.4199 - val_accuracy: 0.8229\n",
"Epoch 19/25\n",
"50/50 [==============================] - ETA: 0s - loss: 0.2327 - accuracy: 0.9075\n",
"Epoch 19: val_accuracy improved from 0.82292 to 0.84635, saving model to alex_2.h5\n",
"50/50 [==============================] - 70s 1s/step - loss: 0.2327 - accuracy: 0.9075 - val_loss: 0.4260 - val_accuracy: 0.8464\n",
"Epoch 20/25\n",
"50/50 [==============================] - ETA: 0s - loss: 0.2132 - accuracy: 0.9219\n",
"Epoch 20: val_accuracy did not improve from 0.84635\n",
"50/50 [==============================] - 69s 1s/step - loss: 0.2132 - accuracy: 0.9219 - val_loss: 0.6660 - val_accuracy: 0.7995\n",
"Epoch 21/25\n",
"50/50 [==============================] - ETA: 0s - loss: 0.1870 - accuracy: 0.9287\n",
"Epoch 21: val_accuracy did not improve from 0.84635\n",
"50/50 [==============================] - 70s 1s/step - loss: 0.1870 - accuracy: 0.9287 - val_loss: 0.5399 - val_accuracy: 0.8203\n",
"Epoch 22/25\n",
"50/50 [==============================] - ETA: 0s - loss: 0.1861 - accuracy: 0.9294\n",
"Epoch 22: val_accuracy did not improve from 0.84635\n",
"50/50 [==============================] - 78s 2s/step - loss: 0.1861 - accuracy: 0.9294 - val_loss: 0.5620 - val_accuracy: 0.8151\n",
"Epoch 23/25\n",
"50/50 [==============================] - ETA: 0s - loss: 0.1494 - accuracy: 0.9375\n",
"Epoch 23: val_accuracy improved from 0.84635 to 0.88281, saving model to alex_2.h5\n",
"50/50 [==============================] - 80s 2s/step - loss: 0.1494 - accuracy: 0.9375 - val_loss: 0.3850 - val_accuracy: 0.8828\n",
"Epoch 24/25\n",
"50/50 [==============================] - ETA: 0s - loss: 0.1548 - accuracy: 0.9481\n",
"Epoch 24: val_accuracy did not improve from 0.88281\n",
"50/50 [==============================] - 81s 2s/step - loss: 0.1548 - accuracy: 0.9481 - val_loss: 0.4789 - val_accuracy: 0.8646\n",
"Epoch 25/25\n",
"50/50 [==============================] - ETA: 0s - loss: 0.1541 - accuracy: 0.9456\n",
"Epoch 25: val_accuracy did not improve from 0.88281\n",
"50/50 [==============================] - 82s 2s/step - loss: 0.1541 - accuracy: 0.9456 - val_loss: 0.4806 - val_accuracy: 0.8411\n"
]
}
],
"source": [
"checkpoint = ModelCheckpoint(\"alex_2.h5\", monitor='val_accuracy', verbose=1, save_best_only=True, save_weights_only=False, mode='auto', period=1)\n",
"early = EarlyStopping(monitor='val_accuracy', min_delta=0, patience=20, verbose=1, mode='auto')\n",
"\n",
"alex = alexnet.fit_generator(\n",
" steps_per_epoch=len(train_ds), \n",
" generator=train_ds, \n",
" validation_data= validation_ds, \n",
" validation_steps=len(validation_ds), \n",
" epochs=25, \n",
" callbacks=[checkpoint,early])"
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {},
"outputs": [
{
"data": {
"image/png": "iVBORw0KGgoAAAANSUhEUgAAAjwAAAHHCAYAAAC7soLdAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjYuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8o6BhiAAAACXBIWXMAAA9hAAAPYQGoP6dpAACdo0lEQVR4nOzdd3hTZf/H8fdJmqR70gll741sUZnKUIYiywHIcAEKPi4EBUQFRRQURVGk8lOmCA4URLaAKGqRJQKW2UXpXmnG+f2RNrS0QAtp0/F9XU+uJCcn53xT6pNP73MPRVVVFSGEEEKISkzj7AKEEEIIIUqbBB4hhBBCVHoSeIQQQghR6UngEUIIIUSlJ4FHCCGEEJWeBB4hhBBCVHoSeIQQQghR6UngEUIIIUSlJ4FHCCGEEJWeBB4hKhBFUZg5c2aJ33f69GkURSEiIsLhNYmyMXr0aGrXru3sMoSosCTwCFFCERERKIqCoij88ssvhV5XVZXw8HAUReGee+5xQoWioklOTsbV1RVFUTh27JhTasgLxYqisG7dukKvz5w5E0VRSEhIKPGx9+7dy8yZM0lOTnZApULcGAk8QtwgV1dXVqxYUWj7zp07OX/+PAaDwQlViYpo7dq1KIpCSEgIX375pbPL4dVXX8WRyyzu3buXWbNmSeARTiWBR4gb1K9fP9auXYvZbC6wfcWKFbRt25aQkBAnVVZ1ZGRkOLsEh/jiiy/o168fI0aMKDJEl6XWrVvz999/s379eqfWIYSjSeAR4gaNGDGCS5cusWXLFvu2nJwcvvrqKx544IEi35ORkcH//vc/wsPDMRgMNGrUiLfffrvQX9NGo5EpU6YQGBiIl5cXAwYM4Pz580Ue88KFC4wZM4bg4GAMBgPNmjXjs88+u6HPlJiYyLPPPkuLFi3w9PTE29ubvn37cvDgwUL7ZmdnM3PmTBo2bIirqyuhoaHcd999nDp1yr6P1Wpl4cKFtGjRAldXVwIDA+nTpw8HDhwArt236Mr+SnmXVI4ePcoDDzyAn58ft912GwB///03o0ePpm7duri6uhISEsKYMWO4dOlSkT+vsWPHEhYWhsFgoE6dOjzxxBPk5OTw33//oSgK7777bqH37d27F0VRWLlyZUl/rNd09uxZdu/ezfDhwxk+fDhRUVHs3bu3WO+1Wq0sWLCAZs2a4erqSnBwMI899hhJSUn2fWbMmIFGo2Hr1q0F3vvoo4+i1+sL/dsOHz6chg0bFruVZ//+/fTp0wcfHx/c3d3p2rUre/bssb8+c+ZMnnvuOQDq1Kljv2x2+vTpYn1GIRzFxdkFCFFR1a5dm86dO7Ny5Ur69u0LwI8//khKSgrDhw/nvffeK7C/qqoMGDCA7du3M3bsWFq3bs3mzZt57rnnuHDhQoEv2XHjxvHFF1/wwAMPcOutt7Jt2zbuvvvuQjXExcXRqVMnFEVh4sSJBAYG8uOPPzJ27FhSU1OZPHlyiT7Tf//9x4YNGxgyZAh16tQhLi6Ojz/+mK5du3L06FHCwsIAsFgs3HPPPWzdupXhw4fz9NNPk5aWxpYtWzh8+DD16tUDYOzYsURERNC3b1/GjRuH2Wxm9+7d/Prrr7Rr165EteUZMmQIDRo04I033rB/IW/ZsoX//vuPRx55hJCQEI4cOcKSJUs4cuQIv/76K4qiABAdHU2HDh1ITk7m0UcfpXHjxly4cIGvvvqKzMxM6tatS5cuXfjyyy+ZMmVKgfN++eWXeHl5MXDgwBuq+2pWrlyJh4cH99xzD25ubtSrV48vv/ySW2+99brvfeyxx4iIiOCRRx7hqaeeIioqikWLFvHXX3+xZ88edDod06dP57vvvmPs2LEcOnQILy8vNm/ezCeffMLs2bNp1apVgWNqtVqmT5/OyJEjWb9+Pffdd99Vz79t2zb69u1L27Zt7cFq2bJl9OjRg927d9OhQwfuu+8+/v33X1auXMm7775LtWrVAAgMDLy5H5wQJaUKIUpk2bJlKqD+/vvv6qJFi1QvLy81MzNTVVVVHTJkiNq9e3dVVVW1Vq1a6t13321/34YNG1RAfe211woc7/7771cVRVFPnjypqqqqRkZGqoD65JNPFtjvgQceUAF1xowZ9m1jx45VQ0ND1YSEhAL7Dh8+XPXx8bHXFRUVpQLqsmXLrvnZsrOzVYvFUmBbVFSUajAY1FdffdW+7bPPPlMB9Z133il0DKvVqqqqqm7btk0F1Keeeuqq+1yrris/64wZM1RAHTFiRKF98z5nfitXrlQBddeuXfZtI0eOVDUajfr7779ftaaPP/5YBdRjx47ZX8vJyVGrVaumjho1qtD7blaLFi3UBx980P78pZdeUqtVq6aaTKYC+40aNUqtVauW/fnu3btVQP3yyy8L7Ldp06ZC2w8dOqTq9Xp13LhxalJSklq9enW1Xbt2Bc6R928xb9481Ww2qw0aNFBbtWpl/7nk/fwvXryoqqrt59WgQQO1d+/e9n1U1fZvUadOHfXOO++0b5s3b54KqFFRUTf+gxLiJsklLSFuwtChQ8nKyuL7778nLS2N77///qqXs3744Qe0Wi1PPfVUge3/+9//UFWVH3/80b4fUGi/K1trVFVl3bp19O/fH1VVSUhIsN969+5NSkoKf/75Z4k+j8FgQKOx/d+CxWLh0qVLeHp60qhRowLHWrduHdWqVWPSpEmFjpHXmrJu3ToURWHGjBlX3edGPP7444W2ubm52R9nZ2eTkJBAp06dAOx1W61WNmzYQP/+/YtsXcqraejQobi6uhboPLx582YSEhJ46KGHbrjuovz9998cOnSIESNG2LeNGDGChIQENm/efM33rl27Fh8fH+68884C//Zt27bF09OT7du32/dt3rw5s2bN4tNPP6V3794kJCTw+eef4+JSdCN/XivPwYMH2bBhQ5H7REZGcuLECR544AEuXbpkP39GRgY9e/Zk165dWK3Wkv9QhCglcklLiJsQGBhIr169WLFiBZmZmVgsFu6///4i9z1z5gxhYWF4eXkV2N6kSRP763n3Go3GflkoT6NGjQo8v3jxIsnJySxZsoQlS5YUec74+PgSfZ68PjcffvghUVFRWCwW+2sBAQH2x6dOnaJRo0ZX/cLM2ycsLAx/f/8S1XA9derUKbQtMTGRWbNmsWrVqkKfOSUlBbD9vFJTU2nevPk1j+/r60v//v1ZsWIFs2fPBmyXs6pXr06PHj2u+d7Y2NgCz318fAqEsSt98cUXeHh4ULduXU6ePAnYRv/Vrl2bL7/8ssjLmHlOnDhBSkoKQUFBRb5+5c/hueeeY9WqVfz222+88cYbNG3a9Jqf5cEHH2T27Nm8+uqrDBo0qMjzA4waNeqqx0hJScHPz++a5xGirEjgEeImPfDAA4wfP57Y2Fj69u2Lr69vmZw376/nhx566KpfOi1btizRMd944w1efvllxowZw+zZs/H390ej0TB58uRS+Wv9ai09+YPWlYoKEEOHDmXv3r0899xztG7dGk9PT6xWK3369LmhukeOHMnatWvZu3cvLVq04Ntvv+XJJ5+0t35dTWhoaIHny5YtY/To0UXuq6oqK1euJCMjo8jwER8fT3p6Op6enkW+32q1EhQUdNVh7Ff2kfnvv//sIeXQoUPX/BxwuZVn9OjRfPPNN0WeH2DevHm0bt26yGNcrXYhnEECjxA36d577+Wxxx7j119/ZfXq1Vfdr1atWvz888+kpaUVaOX5559/7K/n3VutVnsrSp7jx48XOF7eCC6LxUKvXr0c8lm++uorunfvztKlSwtsT05Otnc2BahXrx779+/HZDKh0+mKPFa9evXYvHkziYmJV23lyfvr/8r5WfJau4ojKSmJrVu3MmvWLF555RX79rwv9zyBgYF4e3tz+PDh6x6zT58+BAYG8uWXX9KxY0cyMzN5+OGHr/u+/CP2AJo1a3bVffPma3r11VftrXz5P9Ojjz7Khg0brnoZrV69evz888906dLlmq1IYAsno0ePxtvbm8mTJ/PGG29w//33X7NDMtjC9GuvvcasWbMYMGBAofMDeHt7X/f372YuYQr
"text/plain": [
"<Figure size 640x480 with 1 Axes>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"plt.plot(alex.history[\"accuracy\"])\n",
"plt.plot(alex.history['val_accuracy'])\n",
"plt.plot(alex.history['loss'])\n",
"plt.plot(alex.history['val_loss'])\n",
"plt.title(f\"Model accuracy - AlexNet\")\n",
"plt.ylabel(\"Value\")\n",
"plt.xlabel(\"Epoch\")\n",
"plt.legend([\"Accuracy\",\"Validation Accuracy\",\"Loss\",\"Validation Loss\"])\n",
"plt.show()"
]
},
{
"cell_type": "code",
"execution_count": 11,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"8/8 [==============================] - 4s 450ms/step - loss: 0.4419 - accuracy: 0.8516\n"
]
},
{
"data": {
"text/plain": [
"[0.4419291615486145, 0.8515625]"
]
},
"execution_count": 11,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"alexnet.evaluate(test_ds)"
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"## MLP"
]
},
{
"cell_type": "code",
"execution_count": 14,
"metadata": {},
"outputs": [],
"source": [
"X_train = X_train.flatten().reshape(X_train.shape[0], int(np.prod(X_train.shape) / X_train.shape[0]))\n",
"X_test = X_test.flatten().reshape(X_test.shape[0], int(np.prod(X_test.shape) / X_test.shape[0]))"
]
},
{
"cell_type": "code",
"execution_count": 21,
"metadata": {},
"outputs": [],
"source": [
"from sklearn.neural_network import MLPClassifier\n",
"from tqdm import tqdm\n",
"\n",
"def test_mlp(X_train, y_train, X_val, y_val, X_test, y_test, hidden_layer_sizes, alpha, max_iter):\n",
" mlp = MLPClassifier(hidden_layer_sizes=hidden_layer_sizes, alpha=alpha, max_iter=max_iter)\n",
" accuracy = []\n",
"\n",
" result = {\n",
" 'num_layers': len(hidden_layer_sizes),\n",
" 'layer_sizes': hidden_layer_sizes,\n",
" 'regularization': alpha,\n",
" 'max_iter': max_iter\n",
" }\n",
"\n",
" for i in tqdm(range(max_iter)):\n",
" mlp.partial_fit(X_train, y_train, np.unique(y_train))\n",
" accuracy.append(mlp.score(X_train, y_train))\n",
" if i == 50:\n",
" result['checkpoint_train_accuracy'] = np.mean(accuracy)\n",
" result['checkpoint_val_accuracy'] = mlp.score(X_val, y_val)\n",
" result['checkpoint_test_accuracy'] = mlp.score(X_test, y_test)\n",
"\n",
" result['full_train_accuracy'] = np.mean(accuracy)\n",
" result['full_val_accuracy'] = mlp.score(X_val, y_val)\n",
" result['full_test_accuracy'] = mlp.score(X_test, y_test)\n",
" result['accuracy_curve'] = accuracy\n",
" result['loss_curve'] = mlp.loss_curve_\n",
"\n",
" return result\n",
"\n",
"def print_result(result):\n",
" print(f\"NUMBER OF HIDDEN LAYERS = {result['num_layers']}\")\n",
" print(f\"HIDDEN LAYER SIZES = {result['layer_sizes']}\")\n",
" print(f\"REGULARIZATION = {result['regularization']}\")\n",
" print(\"\\n50 EPOCHS\")\n",
" print(f\"train_accuracy = {round(result['checkpoint_train_accuracy'] * 100, 2)}%\")\n",
" print(f\"val_accuracy = {round(result['checkpoint_val_accuracy'] * 100, 2)}%\")\n",
" print(f\"test_accuracy = {round(result['checkpoint_test_accuracy'] * 100, 2)}%\")\n",
" print(f\"\\n{result['max_iter']} EPOCHS\")\n",
" print(f\"train_accuracy = {round(result['full_train_accuracy'] * 100, 2)}%\")\n",
" print(f\"val_accuracy = {round(result['checkpoint_val_accuracy'] * 100, 2)}%\")\n",
" print(f\"test_accuracy = {round(result['full_test_accuracy'] * 100, 2)}%\")\n",
"\n",
"def get_plot(result):\n",
" f = plt.figure(figsize=(12,6))\n",
" plt.plot(result['loss_curve'], label='loss')\n",
" plt.plot(result['accuracy_curve'], label='accuracy')\n",
" plt.legend(loc='best')\n",
" plt.xlabel('number of iterations')\n",
" plt.grid()\n",
" plt.show()\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": 22,
"metadata": {},
"outputs": [],
"source": [
"from sklearn.model_selection import train_test_split\n",
"\n",
"NEW_SIZE = 64\n",
"\n",
"ONE_LAYER = (286,)\n",
"\n",
"X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.05, random_state=42)\n",
"\n",
"all_results = []"
]
},
{
"cell_type": "code",
"execution_count": 23,
"metadata": {
"tags": []
},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"100%|██████████| 200/200 [42:45<00:00, 12.83s/it]\n",
"100%|██████████| 200/200 [39:45<00:00, 11.93s/it]\n"
]
}
],
"source": [
"all_results.append(test_mlp(X_train, y_train, X_val, y_val, X_test, y_test, hidden_layer_sizes=ONE_LAYER, alpha=0.1, max_iter=200))\n",
"all_results.append(test_mlp(X_train, y_train, X_val, y_val, X_test, y_test, hidden_layer_sizes=ONE_LAYER, alpha=0.001, max_iter=200))"
]
},
{
"cell_type": "code",
"execution_count": 38,
"id": "a0861788",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"NUMBER OF HIDDEN LAYERS = 1\n",
"HIDDEN LAYER SIZES = (286,)\n",
"REGULARIZATION = 0.1\n",
"\n",
"50 EPOCHS\n",
"train_accuracy = 68.78%\n",
"val_accuracy = 58.02%\n",
"test_accuracy = 53.67%\n",
"\n",
"200 EPOCHS\n",
"train_accuracy = 90.29%\n",
"val_accuracy = 58.02%\n",
"test_accuracy = 59.07%\n"
]
},
{
"data": {
"image/png": "iVBORw0KGgoAAAANSUhEUgAAA94AAAINCAYAAADIsKceAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjYuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8o6BhiAAAACXBIWXMAAA9hAAAPYQGoP6dpAAB7iUlEQVR4nO3deXxU9b3/8feZNXsCZGdH2WRTURFXVATRWlF/VtFb1LrctmBVXGldwPZerN66tHq1t61ib4tab93qgiIKqCACioooAgJhSQIBsieznt8fJzNJSAgEksyc4fV8OI9kzjafM58Mzud8v+f7NUzTNAUAAAAAADqFI9YBAAAAAACQyCi8AQAAAADoRBTeAAAAAAB0IgpvAAAAAAA6EYU3AAAAAACdiMIbAAAAAIBOROENAAAAAEAnovAGAAAAAKATuWIdQEcIh8PasWOH0tPTZRhGrMMBAAAAACQ40zRVVVWlwsJCORxtt2knROG9Y8cO9e7dO9ZhAAAAAACOMFu3blWvXr3a3CYhCu/09HRJ1glnZGTEOJq2BQIBvfvuu5owYYLcbnesw8F+kCd7IE/2QJ7sg1zZA3myB/JkD+TJHuI1T5WVlerdu3e0Hm1LQhTeke7lGRkZtii8U1JSlJGREVd/NGiOPNkDebIH8mQf5MoeyJM9kCd7IE/2EO95OpjbnRlcDQAAAACATkThDQAAAABAJ6LwBgAAAACgEyXEPd4AAAAAEM9CoZACgUCsw7ClQCAgl8ul+vp6hUKhLn1tp9Mpl8t12NNWU3gDAAAAQCeqrq7Wtm3bZJpmrEOxJdM0lZ+fr61btx52AXwoUlJSVFBQII/Hc8jHoPAGAAAAgE4SCoW0bds2paSkKCcnJyaFo92Fw2FVV1crLS1NDkfX3S1tmqb8fr927dqlTZs2aeDAgYf8+hTeAAAAANBJAoGATNNUTk6OkpOTYx2OLYXDYfn9fiUlJXVp4S1JycnJcrvd2rJlSzSGQ8HgagAAAADQyWjptq+OKPYpvAEAAAAA6EQU3gAAAAAAdCIKbwAAAABAM+PGjdMtt9wS6zASRrsK7zlz5ujEE09Uenq6cnNzNXnyZK1bt67ZNvX19Zo2bZp69OihtLQ0XXrppSotLW3zuKZp6r777lNBQYGSk5M1fvx4rV+/vv1nAwAAAABAnGlX4b148WJNmzZNn3zyiRYsWKBAIKAJEyaopqYmus2tt96qf/3rX3rppZe0ePFi7dixQ5dcckmbx33ooYf0+9//Xk8//bSWL1+u1NRUTZw4UfX19Yd2VgAAAAAAxIl2Fd7z58/XNddco2HDhmnUqFGaO3euioqKtGrVKklSRUWF/vKXv+iRRx7R2WefrdGjR+vZZ5/V0qVL9cknn7R6TNM09dhjj+mee+7RRRddpJEjR+qvf/2rduzYoVdfffWwTxAAAAAA4oVpmqr1B2PyME3zkGLeu3evpk6dqm7duiklJUWTJk1q1kN5y5YtuvDCC9WtWzelpqZq2LBheuutt6L7XnXVVdHp1AYOHKhnn322Q95LOzmsebwrKiokSd27d5ckrVq1SoFAQOPHj49uM2TIEPXp00fLli3TySef3OIYmzZtUklJSbN9MjMzNWbMGC1btkxXXHHF4YQIAAAAAHGjLhDSMfe9E5PXXvvARKV42l8CXnPNNVq/fr1ef/11ZWRk6K677tL555+vtWvXyu12a9q0afL7/VqyZIlSU1O1du1apaWlSZLuvfderV27Vm+//bays7O1YcMG1dXVdfSpxb1DLrzD4bBuueUWnXrqqRo+fLgkqaSkRB6PR1lZWc22zcvLU0lJSavHiSzPy8s76H18Pp98Pl/0eWVlpSRrcvpAIHBI59NVIvHFe5xHOvJkD+TJHsiTfZAreyBP9kCe7KEr8hQIBGSapsLhcPQRK+19fdM0tW7dOr3++uv68MMPdcopp0iS/vd//1d9+/bVyy+/rMsuu0xFRUW65JJLNGzYMElSv379oq+3ZcsWHXvssTr++OMlSX369Imua08ckZ+xeP/C4bBM01QgEJDT6Ywub8/fzSEX3tOmTdOaNWv00UcfHeohDtmcOXM0e/bsFsvfffddpaSkdHk8B6vSL22pNuR2GNKCBbEOBwdhAXmyBfJkD+TJPsiVPZAneyBP9tCZeXK5XMrPz1d1dbX8fr9M09SyGS17AneFQF2NKuuNg9o2GAzK7/dr1apVcrlcGjp0aLTB0+126+ijj9YXX3yhiRMn6vrrr9dtt92mt99+W+PGjdOFF14YbZydOnWqrr76aq1cuVJnnXWWLrjgAo0ZM+aQ4q+qqjqk/Q6X3+9XXV2dlixZomAwGF1eW1t70Mc4pMJ7+vTpeuONN7RkyRL16tUrujw/P19+v1/l5eXNWr1LS0uVn5/f6rEiy0tLS1VQUNBsn2OPPbbVfWbOnKkZM2ZEn1dWVqp3796aMGGCMjIyDuWUusTba0r05xe/VP90UzdddrbcbnesQ8J+BAIBLViwQOeeey55imPkyR7Ik32QK3sgT/ZAnuyhK/JUX1+vrVu3Ki0tTUlJSZKkzE55pY7lcrnk8XiiDZsZGRnNWnudTqe8Xq8yMjI0ffp0XXTRRXrzzTe1YMECnX322fqv//ovTZ8+XZdeeqnOOOMMvfXWW3rvvfc0efJk/fznP9fDDz980LGYpqmqqiqlp6fLMA7uwkFHqq+vV3Jyss4444xoDqXGntcHo12Ft2mauummm/TKK69o0aJF6t+/f7P1o0ePltvt1sKFC3XppZdKktatW6eioiKNHTu21WP2799f+fn5WrhwYbTQrqys1PLly/Wzn/2s1X28Xq+8Xm+L5W63O67/YSvslirJavmO91hhIU/2QJ7sgTzZB7myB/JkD+TJHjozT6FQSIZhyOFwyOFo19jWMWcYhoYNG6ZgMKgVK1ZEu5rv3r1b69at07Bhw6Ln1LdvX/385z/Xz3/+c82cOVN//vOf9Ytf/EKSdRvxtddeq2uvvVZ//OMfdccdd+h3v/vdQccR6V4eeR+7msPhkGEYLf5O2vM3067Ce9q0aZo3b55ee+01paenR+/BzszMVHJysjIzM3XddddpxowZ6t69uzIyMnTTTTdp7NixzQZWGzJkiObMmaOLL75YhmHolltu0W9+8xsNHDhQ/fv317333qvCwkJNnjy5PeHFvdx06+pIpV+HPKIgAAAAAHSVgQMH6qKLLtINN9ygP/7xj0pPT9fdd9+tnj176qKLLpIk3XLLLZo0aZIGDRqkvXv36oMPPtDQoUMlSffdd59Gjx6tYcOGyefz6Y033oiuO5K0q/B+6qmnJEnjxo1rtvzZZ5/VNddcI0l69NFH5XA4dOmll8rn82nixIn67//+72bbr1u3LjoiuiTdeeedqqmp0Y033qjy8nKddtppmj9/frNm/ESQm2G10gdMQ1X1QfXweGIcEQAAAAC07dlnn9XNN9+sH/zgB/L7/dGu45EW31AopGnTpmnbtm3KyMjQeeedp0cffVSS5PF4NHPmTG3evFnJyck6/fTT9cILL8TydGKi3V3NDyQpKUlPPvmknnzyyYM+jmEYeuCBB/TAAw+0JxzbSXI7lZ7kUlV9UDurfOqREb8DwQEAAAA4ci1atCj6e7du3fTXv/51v9v+4Q9/2O+6e+65R/fcc09HhmZL9rrJIAHkpFmt3ruqfQfYEgAAAACQCCi8u1huutW9fGeVP8aRAAAAAAC6AoV3F8tJb2jxrqLFGwAAAACOBBTeXSyXwhsAAAAAjigU3l0s0uK9k8IbAAAAAI4IFN5djMHVAAAAAODIQuHdxehqDgAAAABHFgrvLtbY1ZxRzQEAAADgSEDh3cUi04lV+4Kq84diHA0AAAAAoLNReHexNK9LbocpSdpZVR/jaAAAAAAAnY3Cu4sZhqEMt/U7I5sDAAAAwMEJBAKxDuGQUXjHQIbV21w7Kym8AQAAAMSn+fPn67TTTlNWVpZ69OihH/zgB9q4cWN0/bZt2zRlyhR1795dqampOuGEE7R8+fLo+n/961868cQTlZSUpOzsbF188cXRdYZh6NVXX23
"text/plain": [
"<Figure size 1200x600 with 1 Axes>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"NUMBER OF HIDDEN LAYERS = 1\n",
"HIDDEN LAYER SIZES = (286,)\n",
"REGULARIZATION = 0.001\n",
"\n",
"50 EPOCHS\n",
"train_accuracy = 74.1%\n",
"val_accuracy = 56.79%\n",
"test_accuracy = 50.19%\n",
"\n",
"200 EPOCHS\n",
"train_accuracy = 92.63%\n",
"val_accuracy = 56.79%\n",
"test_accuracy = 54.44%\n"
]
},
{
"data": {
"image/png": "iVBORw0KGgoAAAANSUhEUgAAA94AAAINCAYAAADIsKceAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjYuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8o6BhiAAAACXBIWXMAAA9hAAAPYQGoP6dpAABygElEQVR4nO3deXxU1cH/8e+dNTtrIAmEVUB2ERVxRUEWrQpaF7QVrctTK1WLWsW6gPZXrNalLo/WFftYxNoq2rqxKKCCCyAqq6wJSxLWJGSdycz9/TGZSUI2ApnM3Mnn/XrNKzN3PXdOBvKdc+45hmmapgAAAAAAQFjYIl0AAAAAAABiGcEbAAAAAIAwIngDAAAAABBGBG8AAAAAAMKI4A0AAAAAQBgRvAEAAAAACCOCNwAAAAAAYUTwBgAAAAAgjByRLkBz8Pv92r17t5KTk2UYRqSLAwAAAACIcaZp6tChQ8rIyJDN1nCbdkwE7927dyszMzPSxQAAAAAAtDI7duxQ165dG9wmJoJ3cnKypMAFp6SkRLg0DfN6vZo/f77Gjh0rp9MZ6eKgHtSTNVBP1kA9WQd1ZQ3UkzVQT9ZAPVlDtNZTYWGhMjMzQ3m0ITERvIPdy1NSUiwRvBMSEpSSkhJVvzSoiXqyBurJGqgn66CurIF6sgbqyRqoJ2uI9no6ktudGVwNAAAAAIAwIngDAAAAABBGBG8AAAAAAMIoJu7xBgAAAIBo5vP55PV6I10MS/J6vXI4HCorK5PP52vRc9vtdjkcjmOetprgDQAAAABhVFRUpJ07d8o0zUgXxZJM01RaWpp27NhxzAH4aCQkJCg9PV0ul+uoj0HwBgAAAIAw8fl82rlzpxISEpSamhqR4Gh1fr9fRUVFSkpKks3WcndLm6Ypj8ejvXv3atu2berTp89Rn5/gDQAAAABh4vV6ZZqmUlNTFR8fH+niWJLf75fH41FcXFyLBm9Jio+Pl9PpVFZWVqgMR4PB1QAAAAAgzGjptq7mCPsEbwAAAAAAwojgDQAAAABAGBG8AQAAAAA1jBo1SrfffnukixEzCN4AAAAAAIQRwRsAAAAAgDAieAMAAABACzFNUyWeiog8TNM8qjIfPHhQ11xzjdq1a6eEhARNmDBBmzZtCq3PysrShRdeqHbt2ikxMVEDBw7Uhx9+GNr36quvDk2n1qdPH7322mvN8l5aCfN4AwAAAEALKfX6NOCBTyJy7nUPjVOCq+kR8Nprr9WmTZv0/vvvKyUlRXfffbfOP/98rVu3Tk6nU7fccos8Ho+WLl2qxMRErVu3TklJSZKk+++/X+vWrdNHH32kjh07avPmzSotLW3uS4t6BG8AAAAAQJ2CgfvLL7/UaaedJkn6xz/+oczMTM2bN0+XXXaZsrOzdemll2rw4MGSpF69eoX2z87O1rBhw3TSSSdJknr06NHi1xANCN4taE9hmVZs26+NBYbOj3RhAAAAALS4eKdd6x4aF7FzN9X69evlcDg0YsSI0LIOHTqoX79+Wr9+vSTp1ltv1c0336z58+drzJgxuvTSSzVkyBBJ0s0336xLL71Uq1at0tixYzVx4sRQgG9NuMe7Ba3eka/fvLlaH2bztgMAAACtkWEYSnA5IvIwDCMs13TDDTdo69at+uUvf6kff/xRJ510kp555hlJ0oQJE5SVlaXf/e532r17t0aPHq0777wzLOWIZiTAFhTvCnzD5PFHuCAAAAAAcAT69++viooKff3116Fl+/fv18aNGzVgwIDQsszMTP3617/WO++8ozvuuEMvvfRSaF1qaqqmTJmiN954Q0899ZRefPHFFr2GaEBX8xYU7NpB8AYAAABgBX369NHFF1+sG2+8UX/729+UnJyse+65R126dNHFF18sSbr99ts1YcIE9e3bVwcPHtRnn32m/v37S5IeeOABDR8+XAMHDlR5ebn++9//hta1JrR4t6C4yuDt9UW4IAAAAABwhF577TUNHz5cP/vZzzRy5EiZpqkPP/xQTqdTkuTz+XTLLbeof//+Gj9+vPr27av//d//lSS5XC5Nnz5dQ4YM0VlnnSW73a65c+dG8nIighbvFkRXcwAAAABWsHjx4tDzdu3a6e9//3u92wbv567Lfffdp/vuu685i2ZJtHi3ILqaAwAAAEDrQ/BuQcHg7TMNVfhI3wAAAADQGhC8W1Cwq7kklVUQvAEAAACgNSB4tyC3o+rtLmOENQAAAABoFQjeLcgwDMU7A295KcEbAAAAAFoFgncLC04pVualqzkAAAAAtAYE7xYWHwretHgDAAAAQGvQ5OC9dOlSXXjhhcrIyJBhGJo3b16N9YZh1Pl47LHH6j3mjBkzam1//PHHN/lirCCOruYAAAAA0Ko0OXgXFxdr6NCheu655+pcn5OTU+Px6quvyjAMXXrppQ0ed+DAgTX2++KLL5paNEugqzkAAAAAtC6Opu4wYcIETZgwod71aWlpNV6/9957Ouecc9SrV6+GC+Jw1No3FgW7mpd6aPEGAAAAgNagycG7KfLy8vTBBx/o9ddfb3TbTZs2KSMjQ3FxcRo5cqRmzZqlbt261blteXm5ysvLQ68LCwslSV6vV16vt3kKHyZuhyFJKirzRH1ZW7Ng3VBH0Y16sgbqyTqoK2ugnqyBerKGlqgnr9cr0zTl9/vl99Pr9WiYpilJ8ng8crlcLX5+v98v0zTl9Xplt9tDy5vye2OYwas4CoZh6N1339XEiRPrXP/oo4/qkUce0e7duxUXF1fvcT766CMVFRWpX79+ysnJ0cyZM7Vr1y6tWbNGycnJtbafMWOGZs6cWWv5nDlzlJCQcLSX0yJe3mDTjwdtuqKXT6d1Puq3HgAAAIAFBHv2ZmZmRiQ0HouFCxfqL3/5i9avXy+73a6TTz5ZjzzyiHr27ClJ2rVrlx544AF9+umn8ng86tu3rx577DGddNJJkgI577HHHtO6deuUmJiokSNH6o033pAktWvXTm+88YYuuOCC0Pm6d++uWbNm6aqrrlJ2draGDh2qV155Ra+88opWrlypJ554QuPHj9ddd92l5cuXKz8/Xz169NC0adP085//PHQcv9+vZ555Rq+//rp27dql1NRUXXvttbrzzjt10UUXqV+/fjXGINu3b58GDBigt99+W2effXat98Hj8WjHjh3Kzc1VRUVFaHlJSYmuuuoqFRQUKCUlpcH3Mqwt3q+++qquvvrqBkO3pBpd14cMGaIRI0aoe/fu+uc//6nrr7++1vbTp0/XtGnTQq8LCwuVmZmpsWPHNnrBkTb/0Gr9eHCPeh7XT+ef2XD3e0SO1+vVggULdN5558npdEa6OKgH9WQN1JN1UFfWQD1ZA/VkDS1RT2VlZdqxY4eSkpICucg0JW9JWM7VKGeCZBhHvLlpmrrzzjs1ZMgQFRUV6cEHH9SUKVO0atUqlZSU6KKLLlKXLl303nvvKS0tTatWrVJ8fLxSUlL0wQcf6Je//KXuvfde/d///Z88Ho8++uijGnktuG2QYRiKi4tTSkqKkpKSJEkPP/ywHnvsMfXp00cdOnSQaZo69dRT9Yc//EEpKSn68MMP9etf/1qDBg3SKaecIkm655579PLLL+vxxx/XGWecoZycHG3YsEEpKSm66aabdOutt+rpp5+W2+2WFMitXbp00c9+9jMZdbw/ZWVlio+P11lnnVUj2wZ7Xh+JsAXvzz//XBs3btRbb73V5H3btm2rvn37avPmzXWud7vdoTepOqfTGfX/sCW4A+Xz+BX1ZYU1fqdAPVkF9WQd1JU1UE/WQD1ZQzjryefzyTAM2Ww22Ww2yVMsPdI1LOdq1L27JVfiEW9+2WWX1Xj92muvKTU1VRs2bNCyZcu0d+9effvtt2rfvr0kqW/fvqFtZ82apSuvvFIPPfRQaNmwYcNqHC/0ntSxLLj89ttv1yWXXKLCwkKlpKTIZrPprrvuCm1/6623av78+frXv/6lU089VYcOHdLTTz+tZ599Vtddd50kqU+fPjrrrLMkST//+c9166236j//+Y8uv/xySdLrr7+ua6+9tkY38sPLZBhGrd+TpvzOhG0e71d
"text/plain": [
"<Figure size 1200x600 with 1 Axes>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"for result in all_results:\n",
" print_result(result)\n",
" get_plot(result)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.6"
},
"orig_nbformat": 4,
"vscode": {
"interpreter": {
"hash": "31f2aee4e71d21fbe5cf8b01ff0e069b9275f58929596ceb00d14d90e3e16cd6"
}
}
},
"nbformat": 4,
"nbformat_minor": 2
}