{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {
    "slideshow": {
     "slide_type": "-"
    }
   },
   "source": [
    "### AITech — Uczenie maszynowe — laboratoria\n",
    "# 11. Sieci neuronowe (Keras)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Keras to napisany w języku Python interfejs do platformy TensorFlow, służącej do uczenia maszynowego.\n",
    "\n",
    "Aby z niego korzystać, trzeba zainstalować bibliotekę TensorFlow:\n",
    " * `pip`: https://www.tensorflow.org/install\n",
    " * `conda`: https://docs.anaconda.com/anaconda/user-guide/tasks/tensorflow"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Przykład implementacji sieci neuronowej do rozpoznawania cyfr ze zbioru MNIST, według https://keras.io/examples/vision/mnist_convnet"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2023-05-25 10:52:05.523296: I tensorflow/core/platform/cpu_feature_guard.cc:193] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations:  AVX2 FMA\n",
      "To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.\n",
      "2023-05-25 10:52:06.689624: W tensorflow/compiler/xla/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libcudart.so.11.0'; dlerror: libcudart.so.11.0: cannot open shared object file: No such file or directory\n",
      "2023-05-25 10:52:06.689658: I tensorflow/compiler/xla/stream_executor/cuda/cudart_stub.cc:29] Ignore above cudart dlerror if you do not have a GPU set up on your machine.\n",
      "2023-05-25 10:52:09.444585: W tensorflow/compiler/xla/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libnvinfer.so.7'; dlerror: libnvinfer.so.7: cannot open shared object file: No such file or directory\n",
      "2023-05-25 10:52:09.444822: W tensorflow/compiler/xla/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libnvinfer_plugin.so.7'; dlerror: libnvinfer_plugin.so.7: cannot open shared object file: No such file or directory\n",
      "2023-05-25 10:52:09.444839: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Cannot dlopen some TensorRT libraries. If you would like to use Nvidia GPU with TensorRT, please make sure the missing libraries mentioned above are installed properly.\n"
     ]
    }
   ],
   "source": [
    "# Konieczne importy\n",
    "\n",
    "import numpy as np\n",
    "from tensorflow import keras\n",
    "from tensorflow.keras import layers"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "x_train shape: (60000, 784)\n",
      "60000 train samples\n",
      "10000 test samples\n"
     ]
    }
   ],
   "source": [
    "# Przygotowanie danych\n",
    "\n",
    "num_classes = 10  # liczba klas\n",
    "input_shape = (784,)  # wymiary wejścia\n",
    "\n",
    "# podział danych na zbiory uczący i testowy\n",
    "(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()\n",
    "\n",
    "# skalowanie pikseli do przedziału [0, 1]\n",
    "x_train = x_train.astype(\"float32\") / 255\n",
    "x_test = x_test.astype(\"float32\") / 255\n",
    "# spłaszczenie dwuwymiarowych obrazów do jednowymiarowych wektorów\n",
    "x_train = x_train.reshape(60000, 784)  # 784 = 28 * 28\n",
    "x_test = x_test.reshape(10000, 784)\n",
    "print(\"x_train shape:\", x_train.shape)\n",
    "print(x_train.shape[0], \"train samples\")\n",
    "print(x_test.shape[0], \"test samples\")\n",
    "\n",
    "# konwersja danych kategorycznych na binarne\n",
    "y_train = keras.utils.to_categorical(y_train, num_classes)\n",
    "y_test = keras.utils.to_categorical(y_test, num_classes)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Model: \"sequential\"\n",
      "_________________________________________________________________\n",
      " Layer (type)                Output Shape              Param #   \n",
      "=================================================================\n",
      " dense (Dense)               (None, 512)               401920    \n",
      "                                                                 \n",
      " dense_1 (Dense)             (None, 256)               131328    \n",
      "                                                                 \n",
      " dense_2 (Dense)             (None, 10)                2570      \n",
      "                                                                 \n",
      "=================================================================\n",
      "Total params: 535,818\n",
      "Trainable params: 535,818\n",
      "Non-trainable params: 0\n",
      "_________________________________________________________________\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2023-05-25 10:52:13.751127: W tensorflow/compiler/xla/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libcuda.so.1'; dlerror: libcuda.so.1: cannot open shared object file: No such file or directory\n",
      "2023-05-25 10:52:13.752395: W tensorflow/compiler/xla/stream_executor/cuda/cuda_driver.cc:265] failed call to cuInit: UNKNOWN ERROR (303)\n",
      "2023-05-25 10:52:13.752552: I tensorflow/compiler/xla/stream_executor/cuda/cuda_diagnostics.cc:156] kernel driver does not appear to be running on this host (ELLIOT): /proc/driver/nvidia/version does not exist\n",
      "2023-05-25 10:52:13.755949: I tensorflow/core/platform/cpu_feature_guard.cc:193] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations:  AVX2 FMA\n",
      "To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.\n"
     ]
    }
   ],
   "source": [
    "# Stworzenie modelu\n",
    "\n",
    "model = keras.Sequential(\n",
    "    [\n",
    "        keras.Input(shape=input_shape),  # warstwa wejściowa\n",
    "        layers.Dense(512, activation=\"relu\", input_shape=(784,)),  # warstwa ukryta 1\n",
    "        layers.Dense(256, activation=\"relu\"),  # warstwa ukryta 2\n",
    "        layers.Dense(num_classes, activation=\"softmax\"),  # warstwa wyjściowa\n",
    "    ]\n",
    ")\n",
    "\n",
    "model.summary()  # wyświetlmy podsumowanie modelu"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 1/10\n",
      "422/422 [==============================] - 9s 18ms/step - loss: 0.2402 - accuracy: 0.9290 - val_loss: 0.1133 - val_accuracy: 0.9652\n",
      "Epoch 2/10\n",
      "422/422 [==============================] - 7s 16ms/step - loss: 0.0878 - accuracy: 0.9728 - val_loss: 0.0776 - val_accuracy: 0.9763\n",
      "Epoch 3/10\n",
      "422/422 [==============================] - 7s 18ms/step - loss: 0.0552 - accuracy: 0.9829 - val_loss: 0.0688 - val_accuracy: 0.9792\n",
      "Epoch 4/10\n",
      "422/422 [==============================] - 7s 16ms/step - loss: 0.0381 - accuracy: 0.9881 - val_loss: 0.0632 - val_accuracy: 0.9823\n",
      "Epoch 5/10\n",
      "422/422 [==============================] - 7s 17ms/step - loss: 0.0286 - accuracy: 0.9908 - val_loss: 0.0782 - val_accuracy: 0.9788\n",
      "Epoch 6/10\n",
      "422/422 [==============================] - 7s 17ms/step - loss: 0.0227 - accuracy: 0.9926 - val_loss: 0.0733 - val_accuracy: 0.9807\n",
      "Epoch 7/10\n",
      "422/422 [==============================] - 7s 17ms/step - loss: 0.0167 - accuracy: 0.9944 - val_loss: 0.0824 - val_accuracy: 0.9798\n",
      "Epoch 8/10\n",
      "422/422 [==============================] - 11s 26ms/step - loss: 0.0158 - accuracy: 0.9948 - val_loss: 0.0765 - val_accuracy: 0.9823\n",
      "Epoch 9/10\n",
      "422/422 [==============================] - 8s 18ms/step - loss: 0.0154 - accuracy: 0.9950 - val_loss: 0.0761 - val_accuracy: 0.9802\n",
      "Epoch 10/10\n",
      "422/422 [==============================] - 7s 17ms/step - loss: 0.0115 - accuracy: 0.9963 - val_loss: 0.0924 - val_accuracy: 0.9768\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "<keras.callbacks.History at 0x7f780e55f4c0>"
      ]
     },
     "execution_count": 4,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# Kompilacja modelu\n",
    "model.compile(\n",
    "    loss=\"categorical_crossentropy\",  # standardowa funkcja kosztu dla kalsyfikacji wieloklasowej\n",
    "    optimizer=\"adam\",  # optymalizator\n",
    "    metrics=[\"accuracy\"],  # lista metryk\n",
    ")\n",
    "\n",
    "# Uczenie modelu\n",
    "model.fit(\n",
    "    x_train,\n",
    "    y_train,\n",
    "    batch_size=128,  # wielkość wsadu (paczki)\n",
    "    epochs=10,  # liczba epok\n",
    "    validation_split=0.1,  # wielkość zbioru walidacyjnego\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Test loss: 0.10677255690097809\n",
      "Test accuracy: 0.9757000207901001\n"
     ]
    }
   ],
   "source": [
    "# Ewaluacja modelu\n",
    "score = model.evaluate(x_test, y_test, verbose=0)\n",
    "print(\"Test loss:\", score[0])\n",
    "print(\"Test accuracy:\", score[1])"
   ]
  }
 ],
 "metadata": {
  "celltoolbar": "Slideshow",
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.12"
  },
  "livereveal": {
   "start_slideshow_at": "selected",
   "theme": "amu"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}