forked from pms/uczenie-maszynowe
Przykłady sieci neuronowych
This commit is contained in:
parent
fa504b4782
commit
b1eea2f09f
@ -1,7 +1,25 @@
|
|||||||
{
|
{
|
||||||
"cells": [
|
"cells": [
|
||||||
{
|
{
|
||||||
"attachments": {},
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": []
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": []
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "raw",
|
||||||
|
"metadata": {},
|
||||||
|
"source": []
|
||||||
|
},
|
||||||
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
@ -11,7 +29,6 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"attachments": {},
|
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
@ -23,7 +40,6 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"attachments": {},
|
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
@ -31,7 +47,6 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"attachments": {},
|
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
@ -44,7 +59,6 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"attachments": {},
|
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
|
226
lab/Sieci_CNN_Keras.ipynb
Normal file
226
lab/Sieci_CNN_Keras.ipynb
Normal file
@ -0,0 +1,226 @@
|
|||||||
|
{
|
||||||
|
"cells": [
|
||||||
|
{
|
||||||
|
"attachments": {},
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {
|
||||||
|
"slideshow": {
|
||||||
|
"slide_type": "-"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"source": [
|
||||||
|
"### AITech — Uczenie maszynowe — laboratoria\n",
|
||||||
|
"# 11. Sieci neuronowe (Keras)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"attachments": {},
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"Keras to napisany w języku Python interfejs do platformy TensorFlow, służącej do uczenia maszynowego.\n",
|
||||||
|
"\n",
|
||||||
|
"Aby z niego korzystać, trzeba zainstalować bibliotekę TensorFlow:\n",
|
||||||
|
" * `pip`: https://www.tensorflow.org/install\n",
|
||||||
|
" * `conda`: https://docs.anaconda.com/anaconda/user-guide/tasks/tensorflow"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"attachments": {},
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"Przykład implementacji sieci neuronowej do rozpoznawania cyfr ze zbioru MNIST, według https://keras.io/examples/vision/mnist_convnet"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 4,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# Konieczne importy\n",
|
||||||
|
"\n",
|
||||||
|
"import numpy as np\n",
|
||||||
|
"from tensorflow import keras\n",
|
||||||
|
"from tensorflow.keras import layers"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 5,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"name": "stdout",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/mnist.npz\n",
|
||||||
|
"11493376/11490434 [==============================] - 1s 0us/step\n",
|
||||||
|
"x_train shape: (60000, 28, 28, 1)\n",
|
||||||
|
"60000 train samples\n",
|
||||||
|
"10000 test samples\n"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"# Przygotowanie danych\n",
|
||||||
|
"\n",
|
||||||
|
"num_classes = 10\n",
|
||||||
|
"input_shape = (28, 28, 1)\n",
|
||||||
|
"\n",
|
||||||
|
"# podział danych na zbiory uczący i testowy\n",
|
||||||
|
"(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()\n",
|
||||||
|
"\n",
|
||||||
|
"# skalowanie obrazów do przedziału [0, 1]\n",
|
||||||
|
"x_train = x_train.astype(\"float32\") / 255\n",
|
||||||
|
"x_test = x_test.astype(\"float32\") / 255\n",
|
||||||
|
"# upewnienie się, że obrazy mają wymiary (28, 28, 1)\n",
|
||||||
|
"x_train = np.expand_dims(x_train, -1)\n",
|
||||||
|
"x_test = np.expand_dims(x_test, -1)\n",
|
||||||
|
"print(\"x_train shape:\", x_train.shape)\n",
|
||||||
|
"print(x_train.shape[0], \"train samples\")\n",
|
||||||
|
"print(x_test.shape[0], \"test samples\")\n",
|
||||||
|
"\n",
|
||||||
|
"# konwersja danych kategorycznych na binarne\n",
|
||||||
|
"y_train = keras.utils.to_categorical(y_train, num_classes)\n",
|
||||||
|
"y_test = keras.utils.to_categorical(y_test, num_classes)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 6,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"name": "stdout",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"Model: \"sequential\"\n",
|
||||||
|
"_________________________________________________________________\n",
|
||||||
|
"Layer (type) Output Shape Param # \n",
|
||||||
|
"=================================================================\n",
|
||||||
|
"conv2d (Conv2D) (None, 26, 26, 32) 320 \n",
|
||||||
|
"_________________________________________________________________\n",
|
||||||
|
"max_pooling2d (MaxPooling2D) (None, 13, 13, 32) 0 \n",
|
||||||
|
"_________________________________________________________________\n",
|
||||||
|
"conv2d_1 (Conv2D) (None, 11, 11, 64) 18496 \n",
|
||||||
|
"_________________________________________________________________\n",
|
||||||
|
"max_pooling2d_1 (MaxPooling2 (None, 5, 5, 64) 0 \n",
|
||||||
|
"_________________________________________________________________\n",
|
||||||
|
"flatten (Flatten) (None, 1600) 0 \n",
|
||||||
|
"_________________________________________________________________\n",
|
||||||
|
"dropout (Dropout) (None, 1600) 0 \n",
|
||||||
|
"_________________________________________________________________\n",
|
||||||
|
"dense (Dense) (None, 10) 16010 \n",
|
||||||
|
"=================================================================\n",
|
||||||
|
"Total params: 34,826\n",
|
||||||
|
"Trainable params: 34,826\n",
|
||||||
|
"Non-trainable params: 0\n",
|
||||||
|
"_________________________________________________________________\n"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"# Stworzenie modelu\n",
|
||||||
|
"\n",
|
||||||
|
"model = keras.Sequential(\n",
|
||||||
|
" [\n",
|
||||||
|
" keras.Input(shape=input_shape),\n",
|
||||||
|
" layers.Conv2D(32, kernel_size=(3, 3), activation=\"relu\"),\n",
|
||||||
|
" layers.MaxPooling2D(pool_size=(2, 2)),\n",
|
||||||
|
" layers.Conv2D(64, kernel_size=(3, 3), activation=\"relu\"),\n",
|
||||||
|
" layers.MaxPooling2D(pool_size=(2, 2)),\n",
|
||||||
|
" layers.Flatten(),\n",
|
||||||
|
" layers.Dropout(0.5),\n",
|
||||||
|
" layers.Dense(num_classes, activation=\"softmax\"),\n",
|
||||||
|
" ]\n",
|
||||||
|
")\n",
|
||||||
|
"\n",
|
||||||
|
"model.summary()"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 9,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"name": "stdout",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"422/422 [==============================] - 38s 91ms/step - loss: 0.0556 - accuracy: 0.9826 - val_loss: 0.0412 - val_accuracy: 0.9893\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"text/plain": [
|
||||||
|
"<tensorflow.python.keras.callbacks.History at 0x1a50b35a070>"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"execution_count": 9,
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "execute_result"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"# Uczenie modelu\n",
|
||||||
|
"\n",
|
||||||
|
"batch_size = 128\n",
|
||||||
|
"epochs = 15\n",
|
||||||
|
"\n",
|
||||||
|
"model.compile(loss=\"categorical_crossentropy\", optimizer=\"adam\", metrics=[\"accuracy\"])\n",
|
||||||
|
"\n",
|
||||||
|
"model.fit(x_train, y_train, epochs=1, batch_size=batch_size, epochs=epochs, validation_split=0.1)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 10,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"name": "stdout",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"Test loss: 0.03675819933414459\n",
|
||||||
|
"Test accuracy: 0.988099992275238\n"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"# Ewaluacja modelu\n",
|
||||||
|
"\n",
|
||||||
|
"score = model.evaluate(x_test, y_test, verbose=0)\n",
|
||||||
|
"print(\"Test loss:\", score[0])\n",
|
||||||
|
"print(\"Test accuracy:\", score[1])"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"celltoolbar": "Slideshow",
|
||||||
|
"kernelspec": {
|
||||||
|
"display_name": "Python 3 (ipykernel)",
|
||||||
|
"language": "python",
|
||||||
|
"name": "python3"
|
||||||
|
},
|
||||||
|
"language_info": {
|
||||||
|
"codemirror_mode": {
|
||||||
|
"name": "ipython",
|
||||||
|
"version": 3
|
||||||
|
},
|
||||||
|
"file_extension": ".py",
|
||||||
|
"mimetype": "text/x-python",
|
||||||
|
"name": "python",
|
||||||
|
"nbconvert_exporter": "python",
|
||||||
|
"pygments_lexer": "ipython3",
|
||||||
|
"version": "3.10.6"
|
||||||
|
},
|
||||||
|
"livereveal": {
|
||||||
|
"start_slideshow_at": "selected",
|
||||||
|
"theme": "amu"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nbformat": 4,
|
||||||
|
"nbformat_minor": 4
|
||||||
|
}
|
@ -1,6 +1,7 @@
|
|||||||
{
|
{
|
||||||
"cells": [
|
"cells": [
|
||||||
{
|
{
|
||||||
|
"attachments": {},
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"slideshow": {
|
"slideshow": {
|
||||||
@ -13,6 +14,7 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
"attachments": {},
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
@ -24,6 +26,7 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
"attachments": {},
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
@ -32,9 +35,23 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 4,
|
"execution_count": 1,
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [
|
||||||
|
{
|
||||||
|
"name": "stderr",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"2023-05-25 10:52:05.523296: I tensorflow/core/platform/cpu_feature_guard.cc:193] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 FMA\n",
|
||||||
|
"To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.\n",
|
||||||
|
"2023-05-25 10:52:06.689624: W tensorflow/compiler/xla/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libcudart.so.11.0'; dlerror: libcudart.so.11.0: cannot open shared object file: No such file or directory\n",
|
||||||
|
"2023-05-25 10:52:06.689658: I tensorflow/compiler/xla/stream_executor/cuda/cudart_stub.cc:29] Ignore above cudart dlerror if you do not have a GPU set up on your machine.\n",
|
||||||
|
"2023-05-25 10:52:09.444585: W tensorflow/compiler/xla/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libnvinfer.so.7'; dlerror: libnvinfer.so.7: cannot open shared object file: No such file or directory\n",
|
||||||
|
"2023-05-25 10:52:09.444822: W tensorflow/compiler/xla/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libnvinfer_plugin.so.7'; dlerror: libnvinfer_plugin.so.7: cannot open shared object file: No such file or directory\n",
|
||||||
|
"2023-05-25 10:52:09.444839: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Cannot dlopen some TensorRT libraries. If you would like to use Nvidia GPU with TensorRT, please make sure the missing libraries mentioned above are installed properly.\n"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
"source": [
|
"source": [
|
||||||
"# Konieczne importy\n",
|
"# Konieczne importy\n",
|
||||||
"\n",
|
"\n",
|
||||||
@ -45,16 +62,14 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 5,
|
"execution_count": 2,
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [
|
"outputs": [
|
||||||
{
|
{
|
||||||
"name": "stdout",
|
"name": "stdout",
|
||||||
"output_type": "stream",
|
"output_type": "stream",
|
||||||
"text": [
|
"text": [
|
||||||
"Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/mnist.npz\n",
|
"x_train shape: (60000, 784)\n",
|
||||||
"11493376/11490434 [==============================] - 1s 0us/step\n",
|
|
||||||
"x_train shape: (60000, 28, 28, 1)\n",
|
|
||||||
"60000 train samples\n",
|
"60000 train samples\n",
|
||||||
"10000 test samples\n"
|
"10000 test samples\n"
|
||||||
]
|
]
|
||||||
@ -63,8 +78,8 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"# Przygotowanie danych\n",
|
"# Przygotowanie danych\n",
|
||||||
"\n",
|
"\n",
|
||||||
"num_classes = 10\n",
|
"num_classes = 10 # liczba klas\n",
|
||||||
"input_shape = (28, 28, 1)\n",
|
"input_shape = (784,) # wymiary wejścia\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# podział danych na zbiory uczący i testowy\n",
|
"# podział danych na zbiory uczący i testowy\n",
|
||||||
"(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()\n",
|
"(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()\n",
|
||||||
@ -72,9 +87,9 @@
|
|||||||
"# skalowanie obrazów do przedziału [0, 1]\n",
|
"# skalowanie obrazów do przedziału [0, 1]\n",
|
||||||
"x_train = x_train.astype(\"float32\") / 255\n",
|
"x_train = x_train.astype(\"float32\") / 255\n",
|
||||||
"x_test = x_test.astype(\"float32\") / 255\n",
|
"x_test = x_test.astype(\"float32\") / 255\n",
|
||||||
"# upewnienie się, że obrazy mają wymiary (28, 28, 1)\n",
|
"# spłaszczenie dwuwymiarowych obrazów do jednowymiarowych wektorów\n",
|
||||||
"x_train = np.expand_dims(x_train, -1)\n",
|
"x_train = x_train.reshape(60000, 784) # 784 = 28 * 28\n",
|
||||||
"x_test = np.expand_dims(x_test, -1)\n",
|
"x_test = x_test.reshape(10000, 784)\n",
|
||||||
"print(\"x_train shape:\", x_train.shape)\n",
|
"print(\"x_train shape:\", x_train.shape)\n",
|
||||||
"print(x_train.shape[0], \"train samples\")\n",
|
"print(x_train.shape[0], \"train samples\")\n",
|
||||||
"print(x_test.shape[0], \"test samples\")\n",
|
"print(x_test.shape[0], \"test samples\")\n",
|
||||||
@ -86,7 +101,7 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 6,
|
"execution_count": 3,
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [
|
"outputs": [
|
||||||
{
|
{
|
||||||
@ -95,27 +110,31 @@
|
|||||||
"text": [
|
"text": [
|
||||||
"Model: \"sequential\"\n",
|
"Model: \"sequential\"\n",
|
||||||
"_________________________________________________________________\n",
|
"_________________________________________________________________\n",
|
||||||
"Layer (type) Output Shape Param # \n",
|
" Layer (type) Output Shape Param # \n",
|
||||||
"=================================================================\n",
|
"=================================================================\n",
|
||||||
"conv2d (Conv2D) (None, 26, 26, 32) 320 \n",
|
" dense (Dense) (None, 512) 401920 \n",
|
||||||
"_________________________________________________________________\n",
|
" \n",
|
||||||
"max_pooling2d (MaxPooling2D) (None, 13, 13, 32) 0 \n",
|
" dense_1 (Dense) (None, 256) 131328 \n",
|
||||||
"_________________________________________________________________\n",
|
" \n",
|
||||||
"conv2d_1 (Conv2D) (None, 11, 11, 64) 18496 \n",
|
" dense_2 (Dense) (None, 10) 2570 \n",
|
||||||
"_________________________________________________________________\n",
|
" \n",
|
||||||
"max_pooling2d_1 (MaxPooling2 (None, 5, 5, 64) 0 \n",
|
|
||||||
"_________________________________________________________________\n",
|
|
||||||
"flatten (Flatten) (None, 1600) 0 \n",
|
|
||||||
"_________________________________________________________________\n",
|
|
||||||
"dropout (Dropout) (None, 1600) 0 \n",
|
|
||||||
"_________________________________________________________________\n",
|
|
||||||
"dense (Dense) (None, 10) 16010 \n",
|
|
||||||
"=================================================================\n",
|
"=================================================================\n",
|
||||||
"Total params: 34,826\n",
|
"Total params: 535,818\n",
|
||||||
"Trainable params: 34,826\n",
|
"Trainable params: 535,818\n",
|
||||||
"Non-trainable params: 0\n",
|
"Non-trainable params: 0\n",
|
||||||
"_________________________________________________________________\n"
|
"_________________________________________________________________\n"
|
||||||
]
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "stderr",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"2023-05-25 10:52:13.751127: W tensorflow/compiler/xla/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libcuda.so.1'; dlerror: libcuda.so.1: cannot open shared object file: No such file or directory\n",
|
||||||
|
"2023-05-25 10:52:13.752395: W tensorflow/compiler/xla/stream_executor/cuda/cuda_driver.cc:265] failed call to cuInit: UNKNOWN ERROR (303)\n",
|
||||||
|
"2023-05-25 10:52:13.752552: I tensorflow/compiler/xla/stream_executor/cuda/cuda_diagnostics.cc:156] kernel driver does not appear to be running on this host (ELLIOT): /proc/driver/nvidia/version does not exist\n",
|
||||||
|
"2023-05-25 10:52:13.755949: I tensorflow/core/platform/cpu_feature_guard.cc:193] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 FMA\n",
|
||||||
|
"To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.\n"
|
||||||
|
]
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"source": [
|
"source": [
|
||||||
@ -123,89 +142,96 @@
|
|||||||
"\n",
|
"\n",
|
||||||
"model = keras.Sequential(\n",
|
"model = keras.Sequential(\n",
|
||||||
" [\n",
|
" [\n",
|
||||||
" keras.Input(shape=input_shape),\n",
|
" keras.Input(shape=input_shape), # warstwa wejściowa\n",
|
||||||
" layers.Conv2D(32, kernel_size=(3, 3), activation=\"relu\"),\n",
|
" layers.Dense(512, activation=\"relu\", input_shape=(784,)), # warstwa ukryta 1\n",
|
||||||
" layers.MaxPooling2D(pool_size=(2, 2)),\n",
|
" layers.Dense(256, activation=\"relu\"), # warstwa ukryta 2\n",
|
||||||
" layers.Conv2D(64, kernel_size=(3, 3), activation=\"relu\"),\n",
|
" layers.Dense(num_classes, activation=\"softmax\"), # warstwa wyjściowa\n",
|
||||||
" layers.MaxPooling2D(pool_size=(2, 2)),\n",
|
|
||||||
" layers.Flatten(),\n",
|
|
||||||
" layers.Dropout(0.5),\n",
|
|
||||||
" layers.Dense(num_classes, activation=\"softmax\"),\n",
|
|
||||||
" ]\n",
|
" ]\n",
|
||||||
")\n",
|
")\n",
|
||||||
"\n",
|
"\n",
|
||||||
"model.summary()"
|
"model.summary() # wyświetlmy podsumowanie modelu"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 9,
|
"execution_count": 4,
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [
|
"outputs": [
|
||||||
{
|
{
|
||||||
"name": "stdout",
|
"name": "stdout",
|
||||||
"output_type": "stream",
|
"output_type": "stream",
|
||||||
"text": [
|
"text": [
|
||||||
"422/422 [==============================] - 38s 91ms/step - loss: 0.0556 - accuracy: 0.9826 - val_loss: 0.0412 - val_accuracy: 0.9893\n"
|
"Epoch 1/10\n",
|
||||||
|
"422/422 [==============================] - 9s 18ms/step - loss: 0.2402 - accuracy: 0.9290 - val_loss: 0.1133 - val_accuracy: 0.9652\n",
|
||||||
|
"Epoch 2/10\n",
|
||||||
|
"422/422 [==============================] - 7s 16ms/step - loss: 0.0878 - accuracy: 0.9728 - val_loss: 0.0776 - val_accuracy: 0.9763\n",
|
||||||
|
"Epoch 3/10\n",
|
||||||
|
"422/422 [==============================] - 7s 18ms/step - loss: 0.0552 - accuracy: 0.9829 - val_loss: 0.0688 - val_accuracy: 0.9792\n",
|
||||||
|
"Epoch 4/10\n",
|
||||||
|
"422/422 [==============================] - 7s 16ms/step - loss: 0.0381 - accuracy: 0.9881 - val_loss: 0.0632 - val_accuracy: 0.9823\n",
|
||||||
|
"Epoch 5/10\n",
|
||||||
|
"422/422 [==============================] - 7s 17ms/step - loss: 0.0286 - accuracy: 0.9908 - val_loss: 0.0782 - val_accuracy: 0.9788\n",
|
||||||
|
"Epoch 6/10\n",
|
||||||
|
"422/422 [==============================] - 7s 17ms/step - loss: 0.0227 - accuracy: 0.9926 - val_loss: 0.0733 - val_accuracy: 0.9807\n",
|
||||||
|
"Epoch 7/10\n",
|
||||||
|
"422/422 [==============================] - 7s 17ms/step - loss: 0.0167 - accuracy: 0.9944 - val_loss: 0.0824 - val_accuracy: 0.9798\n",
|
||||||
|
"Epoch 8/10\n",
|
||||||
|
"422/422 [==============================] - 11s 26ms/step - loss: 0.0158 - accuracy: 0.9948 - val_loss: 0.0765 - val_accuracy: 0.9823\n",
|
||||||
|
"Epoch 9/10\n",
|
||||||
|
"422/422 [==============================] - 8s 18ms/step - loss: 0.0154 - accuracy: 0.9950 - val_loss: 0.0761 - val_accuracy: 0.9802\n",
|
||||||
|
"Epoch 10/10\n",
|
||||||
|
"422/422 [==============================] - 7s 17ms/step - loss: 0.0115 - accuracy: 0.9963 - val_loss: 0.0924 - val_accuracy: 0.9768\n"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"data": {
|
"data": {
|
||||||
"text/plain": [
|
"text/plain": [
|
||||||
"<tensorflow.python.keras.callbacks.History at 0x1a50b35a070>"
|
"<keras.callbacks.History at 0x7f780e55f4c0>"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"execution_count": 9,
|
"execution_count": 4,
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"output_type": "execute_result"
|
"output_type": "execute_result"
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"source": [
|
"source": [
|
||||||
|
"# Kompilacja modelu\n",
|
||||||
|
"model.compile(\n",
|
||||||
|
" loss=\"categorical_crossentropy\", # standardowa funkcja kosztu dla kalsyfikacji wieloklasowej\n",
|
||||||
|
" optimizer=\"adam\", # optymalizator\n",
|
||||||
|
" metrics=[\"accuracy\"], # lista metryk\n",
|
||||||
|
")\n",
|
||||||
|
"\n",
|
||||||
"# Uczenie modelu\n",
|
"# Uczenie modelu\n",
|
||||||
"\n",
|
"model.fit(\n",
|
||||||
"batch_size = 128\n",
|
" x_train,\n",
|
||||||
"epochs = 15\n",
|
" y_train,\n",
|
||||||
"\n",
|
" batch_size=128, # wielkość wsadu (paczki)\n",
|
||||||
"model.compile(loss=\"categorical_crossentropy\", optimizer=\"adam\", metrics=[\"accuracy\"])\n",
|
" epochs=10, # liczba epok\n",
|
||||||
"\n",
|
" validation_split=0.1, # wielkość zbioru walidacyjnego\n",
|
||||||
"model.fit(x_train, y_train, epochs=1, batch_size=batch_size, epochs=epochs, validation_split=0.1)"
|
")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 10,
|
"execution_count": 5,
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [
|
"outputs": [
|
||||||
{
|
{
|
||||||
"name": "stdout",
|
"name": "stdout",
|
||||||
"output_type": "stream",
|
"output_type": "stream",
|
||||||
"text": [
|
"text": [
|
||||||
"Test loss: 0.03675819933414459\n",
|
"Test loss: 0.10677255690097809\n",
|
||||||
"Test accuracy: 0.988099992275238\n"
|
"Test accuracy: 0.9757000207901001\n"
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"source": [
|
"source": [
|
||||||
"# Ewaluacja modelu\n",
|
"# Ewaluacja modelu\n",
|
||||||
"\n",
|
|
||||||
"score = model.evaluate(x_test, y_test, verbose=0)\n",
|
"score = model.evaluate(x_test, y_test, verbose=0)\n",
|
||||||
"print(\"Test loss:\", score[0])\n",
|
"print(\"Test loss:\", score[0])\n",
|
||||||
"print(\"Test accuracy:\", score[1])"
|
"print(\"Test accuracy:\", score[1])"
|
||||||
]
|
]
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"## Zadanie 11 (6 punktów)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"Zaimplementuj rozwiązanie wybranego problemu klasyfikacyjnego za pomocą prostej sieci neuronowej stworzonej przy użyciu biblioteki Keras."
|
|
||||||
]
|
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"metadata": {
|
"metadata": {
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
{
|
{
|
||||||
"cells": [
|
"cells": [
|
||||||
{
|
{
|
||||||
|
"attachments": {},
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"slideshow": {
|
"slideshow": {
|
||||||
@ -13,6 +14,7 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
"attachments": {},
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
@ -174,6 +176,7 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
"attachments": {},
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
@ -704,6 +707,7 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
"attachments": {},
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
@ -711,25 +715,12 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
"attachments": {},
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"Tutaj artykuł o tym, jak stworzyć dataloader dla danych z własnego pliku CSV: https://androidkt.com/load-pandas-dataframe-using-dataset-and-dataloader-in-pytorch"
|
"Tutaj artykuł o tym, jak stworzyć dataloader dla danych z własnego pliku CSV: https://androidkt.com/load-pandas-dataframe-using-dataset-and-dataloader-in-pytorch"
|
||||||
]
|
]
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"## Zadanie 10 (6 punktów)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"Zaimplementuj rozwiązanie wybranego problemu klasyfikacyjnego za pomocą prostej sieci neuronowej stworzonej przy użyciu biblioteki PyTorch."
|
|
||||||
]
|
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"metadata": {
|
"metadata": {
|
||||||
|
Loading…
Reference in New Issue
Block a user