zuma/wyk/4_Sieci_neuronowe.ipynb

3072 lines
204 KiB
Plaintext
Raw Permalink Normal View History

2021-05-05 14:43:56 +02:00
{
"cells": [
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "slide"
}
},
"source": [
"### Uczenie maszynowe\n",
"# 4. Sieci neuronowe wprowadzenie"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {
"slideshow": {
"slide_type": "notes"
}
},
"outputs": [],
"source": [
"# Przydatne importy\n",
"\n",
"import matplotlib\n",
"import matplotlib.pyplot as plt\n",
"import numpy as np\n",
"\n",
"%matplotlib inline"
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "slide"
}
},
"source": [
"## 4.1. Perceptron"
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"source": [
"https://www.youtube.com/watch?v=cNxadbrN_aI"
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"source": [
"<img style=\"margin: auto\" heighth=\"100%\" src=\"http://m.natemat.pl/b94a41cd7322e1b8793e4644e5f82683,641,0,0,0.png\" alt=\"Frank Rosenblatt\"/>"
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"source": [
"<img style=\"margin: auto\" src=\"http://m.natemat.pl/02943a7dc0f638d786b78cd5c9e75742,641,0,0,0.png\" heighth=\"100%\" alt=\"Frank Rosenblatt\"/>"
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"source": [
"<img style=\"margin: auto\" heighth=\"100%\" src=\"https://upload.wikimedia.org/wikipedia/en/5/52/Mark_I_perceptron.jpeg\" alt=\"perceptron\"/>"
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"source": [
"### Pierwszy perceptron liniowy\n",
"\n",
"* Frank Rosenblatt, 1957\n",
"* aparat fotograficzny podłączony do 400 fotokomórek (rozdzielczość obrazu: 20 x 20)\n",
"* wagi potencjometry aktualizowane za pomocą silniczków"
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"source": [
"### Uczenie perceptronu\n",
"\n",
"Cykl uczenia perceptronu Rosenblatta:\n",
"\n",
"1. Sfotografuj planszę z kolejnym obiektem.\n",
"1. Zaobserwuj, która lampka zapaliła się na wyjściu.\n",
"1. Sprawdź, czy to jest właściwa lampka.\n",
"1. Wyślij sygnał „nagrody” lub „kary”."
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"source": [
"### Funkcja aktywacji\n",
"\n",
"Funkcja bipolarna:\n",
"\n",
"$$ g(z) = \\left\\{ \n",
"\\begin{array}{rl}\n",
"1 & \\textrm{gdy $z > \\theta_0$} \\\\\n",
"-1 & \\textrm{wpp.}\n",
"\\end{array}\n",
"\\right. $$\n",
"\n",
"gdzie $z = \\theta_0x_0 + \\ldots + \\theta_nx_n$,<br/>\n",
"$\\theta_0$ to próg aktywacji,<br/>\n",
"$x_0 = 1$. "
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {
"slideshow": {
"slide_type": "notes"
}
},
"outputs": [],
"source": [
"def bipolar_plot():\n",
" matplotlib.rcParams.update({'font.size': 16})\n",
"\n",
" plt.figure(figsize=(8,5))\n",
" x = [-1,-.23,1] \n",
" y = [-1, -1, 1]\n",
" plt.ylim(-1.2,1.2)\n",
" plt.xlim(-1.2,1.2)\n",
" plt.plot([-2,2],[1,1], color='black', ls=\"dashed\")\n",
" plt.plot([-2,2],[-1,-1], color='black', ls=\"dashed\")\n",
" plt.step(x, y, lw=3)\n",
" ax = plt.gca()\n",
" ax.spines['right'].set_color('none')\n",
" ax.spines['top'].set_color('none')\n",
" ax.xaxis.set_ticks_position('bottom')\n",
" ax.spines['bottom'].set_position(('data',0))\n",
" ax.yaxis.set_ticks_position('left')\n",
" ax.spines['left'].set_position(('data',0))\n",
"\n",
" plt.annotate(r'$\\theta_0$',\n",
" xy=(-.23,0), xycoords='data',\n",
" xytext=(-50, +50), textcoords='offset points', fontsize=26,\n",
" arrowprops=dict(arrowstyle=\"->\"))\n",
"\n",
" plt.show()"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"outputs": [
{
"data": {
"image/png": "iVBORw0KGgoAAAANSUhEUgAAAcwAAAEeCAYAAAAHLSWiAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjEsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8QZhcZAAAgAElEQVR4nO3de1yUZf7/8fcoZzEVT7mmwapYQsUqth5yKaxdUpHWFHTDxNrU1HbxlNpiWlp00KLdVkOtNA+PBH0Y5LElxbJsk3bZDAo1wbTNNHU7iKDA/fvDL/NrHA43h2EYeD0fj3nIXNd9DZ+5He4398x1X1gMwxAAAKhaC2cXAACAKyAwAQAwgcAEAMAEAhMAABMITAAATCAwAQAwwa2afq45AZwgIiJCu3btcnYZQHNlqaiRM0ygEfruu++cXQKAqxCYAACYQGACAGACgQkAgAkEJgAAJhCYAACYQGACAGACgQkAgAkEJgAAJhCYAACYQGACAGACgQkAgAkEJgAAJhCYAACYQGACAGACgQkAgAkEJlCFkydP6pFHHtHAgQPl4+Mji8WigoICU2PLysqUmJgof39/eXl56ZZbbtGWLVscWzAAhyEwgSocPXpUKSkpateunYYMGVKjsQsWLNCiRYs0ffp07dy5UwMGDNCYMWO0Y8cOB1ULwJEshmFU1V9lJ9DUlZWVqUWLK79Xrl69Wg899JDy8/Pl7+9f5bjTp0+rW7dumjdvnp544glr+9ChQ3XmzBl9+umnVY4PDQ1VVlZWnesHUCuWiho5wwSqUB6WNbV7925dunRJsbGxNu2xsbE6dOiQ8vPz66M8AA2IwAQcICcnR56enurZs6dNe1BQkCQpNzfXGWUBqIMq35K9/fbb7Trj4uIUFxen7777TqNHj7Yb8/DDDysmJkYnTpzQ+PHj7fpnzZqlyMhI5eXlafLkyXb9CQkJuvPOO5Wdna34+Hi7/qefflqDBg3Shx9+qMcee8yuPykpSSEhIcrIyNCSJUvs+pOTk9W7d2+9/fbbWrZsmV3/unXr1K1bN23atEkrVqyw69+8ebM6dOigNWvWaM2aNXb9O3bskI+Pj5YvX66UlBS7/szMTEnS0qVLtW3bNps+b29v7dy5U5K0ePFivfvuuzb97du3t04amT9/vg4cOGDTf91112n9+vWSpPj4eGVnZ9v0BwYGauXKlZKkSZMm6fDhwzb9ISEhSkpKknTlTOjkyZM2/QMHDlRiYqIk6d5779XZs2dt+ocOHaoFCxZIku6++25dvHjRpn/EiBGaPXu2JOn222/X1aKjozV16lQVFhZq2LBhdv11fe2FjJ2lf/zXXRculdr1NTbfrI1XlwlJzi4DaBAFzwyX1HiOe5mZmRW+JetWq2cHuKCdX1lUXNb4wxJA48SkHzQb/vO2O7sE0zjDRHNSfobZiHCGCZSrzQ9oTWbJvvHGG5owYYKOHDli8znmmjVrNHHiRB07dkwBAQGVjg/NWKisxncQAZo1Jv0ADhARESEPDw9t2LDBpn39+vUKDg6uMiwBNE6cYQLV2Lx5syTpk08+kSTt3LlTHTt2VMeOHRUWFiZJcnNz04QJE/Tqq69Kkjp16qQZM2YoMTFRrVu3Vt++fbVp0ybt2bNHaWlpznkiAOqEwASqMWbMGJv7U6dOlSSFhYVZZz2XlpaqtNR2QtFTTz0lX19fvfTSSzp16pR69+6tlJQURUZGNkjdAOoXgQlUo5qJcZVu07JlSyUkJCghIcERZQFoYHyGCQCACQQmAAAmEJgAAJhAYAIAYAKBCQCACQQmAAAmEJgAAJhAYAIAYAKBCQCACQQmAAAmEJgAAJhAYAIAYAKBCQCACQQmAAAmEJgAAJhAYAIAYAKBCQCACQQmAAAmEJgAAJhAYAIAYAKBCQCACQQmAAAmEJgAAJhAYAIAYAKBCQCACQQmAAAmEJgAAJhAYAIAYAKBCQCACQQmAAAmEJgAAJhAYAIAYAKBCQCACQQmAAAmEJgAAJhAYAIAYAKBiTr73//+p8WLFyskJEStW7eWn5+fwsPDtWPHDmeXBgD1xs3ZBcC1ZWZm6g9/+IO++eYbm/a9e/cqMzNTK1as0OTJk51UHQDUH84wUWuZmZkaNmyYvvnmG8XGxurgwYM6f/68Pv74Yw0cOFCGYWjmzJk6efKks0sFgDojMFErp0+f1tixY3Xx4kU999xzWrdunUJDQ9W2bVv1799faWlp8vX1VWFhoTZu3OjscgGgzghM1Mrs2bP17bffasSIEZozZ45df8eOHTV48GBJ0r59+xq6PACodwQmauyLL77Qhg0bZLFY9Nxzz1W6XceOHSVJx48fb6jSAMBhCEzUWHJyssrKynTnnXfqxhtvrHS7y5cv2/wLAK6MwESNlJWV6c0335Qk3XfffVVue+7cOUmSt7e3w+tylBMnTmj06NFq06aNrrnmGo0aNUpfffWVqbEWi6XCW3Z2toOrBuAIXFaCGsnOztapU6ckSXFxcYqLi6t2TLdu3RxclWMUFhYqPDxcnp6eWrt2rSwWixISEnTHHXfo008/VatWrap9jLi4OLvLagIDAx1VMgAHIjBRI7WZwNOrVy8HVOJ4q1at0rFjx5SXl6eePXtKkm6++Wb16tVLycnJmjlzZrWP0bVrVw0YMMDRpQJoALwlixr517/+JUn63e9+p4sXL1Z627p1q3VM3759nVVunaSnp2vAgAHWsJSkgIAADR48WGlpaU6sDIAzEJiokcOHD0uSunfvLi8vr0pvH330kXXMb37zG5vH2LRpk/r37y9vb2+1b99e0dHROnbsWIM+DzNycnIUHBxs1x4UFKTc3FxTj7FixQp5enrKx8dH4eHhev/99+u7TAANhMBEjZSv2uPn51fldtu3b5ck3Xjjjerevbu1/ZVXXtHYsWPl7u6uF198UTNnztSePXs0cOBA05NpGsq5c+fUrl07u3Y/Pz+dP3++2vGxsbFavny5MjIytHLlSp09e1bh4eHKzMx0QLUAHI3PMFEjFy9elCR5eXlVus0XX3yhzz77TJI0fvx4a/u5c+f06KOPKiQkRPv27ZO7u7sk6e6771b//v312GOPaf369Q6svuYsFotdm2EYpsauW7fO+vWQIUMUFRWl4OBgJSQkaP/+/Xbbr1y5UitXrpQknTlzppYVA3AUzjBRIx4eHpKkCxcuVLrN8uXLJV25nOTns2jfeust/fjjj/rzn/9sDUvpymec4eHh2rJliwoLCx1TeC20a9fOemnMz50/f77CM8/qtG7dWsOHD9fBgwcr7J80aZKysrKUlZVlXfQBQONBYKJGrr/+eklSXl5ehf35+fnWs6Tp06erS5cu1r6PP/5YkqxL5v3cbbfdpqKiIuuZaWMQFBSknJwcu/bc3Fz16dOnVo9pGEaFZ60AGj8CEzUSFhYmSXrnnXf03//+16bvwoULiomJUXFxsQIDA7Vw4UKb/q+//lqSdN1119k9bnlbY/rLJiNHjtRHH31kMyGpoKBAH3zwgUaOHFnjx/vhhx+0fft2/frXv67PMgE0EAITNTJx4kS5ubmpuLhYI0eOtP5Jr3feeUeDBw/WwYMH5efnp9TUVLsL+8vfbvX09LR73PLVgBrTW7IPPfSQ/P39FRUVpbS0NKWnpysqKkrdunWzWYzg+PHjcnNz05NPPmltW7p0qR566CFt3LhRmZmZWrt2rQYPHqxTp05pyZIlzng6AOqIST+okRtvvFELFy7UggUL9Mknn+jWW2+16f/lL3+pLVu26Oabb7Yb6+PjI0kqLi62Wy6vqKjIZpvGoFWrVtqzZ49mzJih8ePHyzAMDR06VElJSfL19bVuZxiGSktLVVZWZm3r3bu3tm7dqq1bt+r777/XNddco8GDB+vVV1+122cAXAOBiRpLSEhQYGCgkpKSlJOTI8MwFBgYqJiYGE2bNq3S0OvataukK2+7Xr36z4kTJyRV/HatM3Xv3l1btmypcht/f3+7mbORkZGKjIx0ZGkAGhiBiVqJjo5WdHR0jcbceuutSk5O1oEDB+wC84MPPpCXl1e
"text/plain": [
"<Figure size 576x360 with 1 Axes>"
]
},
"metadata": {
"needs_background": "light"
},
"output_type": "display_data"
}
],
"source": [
"bipolar_plot()"
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"source": [
"### Perceptron schemat\n",
"\n",
"<img src=\"perceptron.png\" width=\"60%\"/>"
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"source": [
"#### Perceptron zasada działania\n",
"\n",
"1. Ustal wartości początkowe $\\theta$ (wektor 0 lub liczby losowe blisko 0).\n",
"1. Dla każdego przykładu $(x^{(i)}, y^{(i)})$, dla $i=1,\\ldots,m$\n",
" * Oblicz wartość wyjścia $o^{(i)} = g(\\theta^{T}x^{(i)}) = g(\\sum_{j=0}^{n} \\theta_jx_j^{(i)})$\n",
" * Wykonaj aktualizację wag (tzw. *perceptron rule*):\n",
" $$ \\theta := \\theta + \\Delta \\theta $$\n",
" $$ \\Delta \\theta = \\alpha(y^{(i)}-o^{(i)})x^{(i)} $$"
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"source": [
"$$\\theta_j := \\theta_j + \\Delta \\theta_j $$\n",
"\n",
"Jeżeli przykład został sklasyfikowany **poprawnie**:\n",
"\n",
"* $y^{(i)}=1$ oraz $o^{(i)}=1$ : $$\\Delta\\theta_j = \\alpha(1 - 1)x_j^{(i)} = 0$$\n",
"* $y^{(i)}=-1$ oraz $o^{(i)}=-1$ : $$\\Delta\\theta_j = \\alpha(-1 - -1)x_j^{(i)} = 0$$"
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "fragment"
}
},
"source": [
"Czyli: jeżeli trafiłeś, to nic nie zmieniaj."
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"source": [
"$$\\theta_j := \\theta_j + \\Delta \\theta_j $$\n",
"\n",
"Jeżeli przykład został sklasyfikowany **niepoprawnie**:\n",
"\n",
"* $y^{(i)}=1$ oraz $o^{(i)}=-1$ : $$\\Delta\\theta_j = \\alpha(1 - -1)x_j^{(i)} = 2 \\alpha x_j^{(i)}$$\n",
"* $y^{(i)}=-1$ oraz $o^{(i)}=1$ : $$\\Delta\\theta_j = \\alpha(-1 - 1)x_j^{(i)} = -2 \\alpha x_j^{(i)}$$"
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "fragment"
}
},
"source": [
"Czyli: przesuń wagi w odpowiednią stronę."
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"source": [
"### Perceptron zalety\n",
"\n",
"* intuicyjny i prosty\n",
"* łatwy w implementacji\n",
"* jeżeli dane można liniowo oddzielić, algorytm jest zbieżny w skończonym czasie"
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"source": [
"### Perceptron wady\n",
"\n",
"* jeżeli danych nie można oddzielić liniowo, algorytm nie jest zbieżny"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {
"slideshow": {
"slide_type": "notes"
}
},
"outputs": [],
"source": [
"def plot_perceptron():\n",
" plt.figure(figsize=(12,3))\n",
"\n",
" plt.subplot(131)\n",
" plt.ylim(-0.2,1.2)\n",
" plt.xlim(-0.2,1.2)\n",
"\n",
" plt.title('AND')\n",
" plt.plot([1,0,0], [0,1,0], 'ro', markersize=10)\n",
" plt.plot([1], [1], 'go', markersize=10)\n",
"\n",
" ax = plt.gca()\n",
" ax.spines['right'].set_color('none')\n",
" ax.spines['top'].set_color('none')\n",
" ax.xaxis.set_ticks_position('none')\n",
" ax.spines['bottom'].set_position(('data',0))\n",
" ax.yaxis.set_ticks_position('none')\n",
" ax.spines['left'].set_position(('data',0))\n",
"\n",
" plt.xticks(np.arange(0, 2, 1.0))\n",
" plt.yticks(np.arange(0, 2, 1.0))\n",
"\n",
"\n",
" plt.subplot(132)\n",
" plt.ylim(-0.2,1.2)\n",
" plt.xlim(-0.2,1.2)\n",
"\n",
" plt.plot([1,0,1], [0,1,1], 'go', markersize=10)\n",
" plt.plot([0], [0], 'ro', markersize=10)\n",
"\n",
" ax = plt.gca()\n",
" ax.spines['right'].set_color('none')\n",
" ax.spines['top'].set_color('none')\n",
" ax.xaxis.set_ticks_position('none')\n",
" ax.spines['bottom'].set_position(('data',0))\n",
" ax.yaxis.set_ticks_position('none')\n",
" ax.spines['left'].set_position(('data',0))\n",
"\n",
" plt.title('OR')\n",
" plt.xticks(np.arange(0, 2, 1.0))\n",
" plt.yticks(np.arange(0, 2, 1.0))\n",
"\n",
"\n",
" plt.subplot(133)\n",
" plt.ylim(-0.2,1.2)\n",
" plt.xlim(-0.2,1.2)\n",
"\n",
" plt.title('XOR')\n",
" plt.plot([1,0], [0,1], 'go', markersize=10)\n",
" plt.plot([0,1], [0,1], 'ro', markersize=10)\n",
"\n",
" ax = plt.gca()\n",
" ax.spines['right'].set_color('none')\n",
" ax.spines['top'].set_color('none')\n",
" ax.xaxis.set_ticks_position('none')\n",
" ax.spines['bottom'].set_position(('data',0))\n",
" ax.yaxis.set_ticks_position('none')\n",
" ax.spines['left'].set_position(('data',0))\n",
"\n",
" plt.xticks(np.arange(0, 2, 1.0))\n",
" plt.yticks(np.arange(0, 2, 1.0))\n",
"\n",
" plt.show()"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"outputs": [
{
"data": {
"image/png": "iVBORw0KGgoAAAANSUhEUgAAAqwAAADGCAYAAAAXMtIlAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjEsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8QZhcZAAAWVUlEQVR4nO3df5DUd33H8dd7cRO5XJYcaUQn1YEh0Qop2IYqmBACiS1YQ1J6RB1Bo2PUOUwnV5z6MzVJUepUClYnJiZq6FG1sM4kqYBOmhFsMFQ5M57lgt4dXEhik4YcJnCXHyu8+8f3e7C3t7e3y+339nN3z8fMdzb3/X73u5/95vPm+9rvfr+fNXcXAAAAEKpUrRsAAAAAlEJgBQAAQNAIrAAAAAgagRUAAABBI7ACAAAgaARWAAAABI3ACgAAgKARWEeJmW03Mzez/ymxjsfTz4ZYfmW8fFPB/HvznutmljOzHjP7VbzsXWY2qdrvCZhozOwdZpY1s6fM7GUzO2JmPzazj5jZq4qs311QmyfM7Fkz22lmy2rxHoDxwMw+GdfU14ZY/iYz6zOzg2Z2TsGyt5lZi5k9bmYvmdlRM/upma01s8lDbO/hIrXcY2a7zOy9SbxHDGT8cEDyzOx1kp5Q9AHBJL3V3X9eZL38/xmN7v79guVXSvqxpK+4+8158++V9AFJd0l6On6djKQ3S1ooabKkn0l6t7t3V+t9ARNFHEbvkvQhSccl/UDSIUnnS1om6fWS9kl6l7s/k/e8bkmvlfSP8ayzFdXlNYrq9APu/q+j8y6A8SM+CbNH0lslLXH3XQXLHpb0tiLLvijp05JelrRT0q8l1Uv6c0kXS+qQ9E537yx4vYclXSZpvaRXJKUlXSTpOklnSfqsu38xgbeKfu7OlPAk6VOSXNKX48evD7GeS/qtpBclPSZpUsHyK+N1NhXMvzee/5Yi2zxfUku8/ICk+lrvDyamsTZJ2hDX0COSXluw7GxJX89b/qq8Zd2Sfldke9fH6x+u9XtjYhqrk6IPfy9JOijpnLz5n4zr66sF6zfnHQtnFixLSbo1Xt4h6dyC5Q/Hy+oL5l8u6YSkXkmTa71PxvPEJQGj4wZJz0v6nKTfSHqPmb16iHX/T9LXJP1R/LwRcffnJL1f0oOS3iTp4yPdJjCRmNkbJd0s6TlJy9396fzl7v6ypCZJP5E0X9G3HcPZpuhM7evN7ILqthiYGNz9MUl/L2mGpC9JkpnNknSbpC5FJ4sUzz9f0j8oCrjXuHtXwbZOuvutkr6r6Mzp35bZhocldUqqU3TcRkIIrAkzs8sUBcWsu78kaYuk8yStKPG09YoC7udLBNuyefQxsP+riutHuj1ggvmAon8rv+HuzxZboaDGPljmdi1+zI2secCEtkHSf0tqMrOrJW1W9BX9h9y9N2+96yWdI2mbu3eU2N4X4scPnUFbqOUEEViT13/waokftyj6WmHIg5q790j6J0XXxa2pUjt+qqiY5ha7OQTAkN4ePz40zHo/kfR7SX9Wxk2O71F08HzM3X83wvYBE5a7n1D0beTLiq4tnyfpX9z9JwWrllXH7r5f0jOS3hDff1KSmS1SdO3rEUWXEiAhBJcExXcmXi/psKKDmdz9kJn9VNISM3uDux8e4umbFH19/2kzu9vdXxhJW9z9FTPrkTRN0lRFlx4AGN5r48cnS63k7i+a2XOKaux8na6xV5vZrfF/ny1plqKbrl5U9T6QAhOWux8wsy2SPqwobH6myGpl1XHeOtMkvU7S/xYs+4yZvaIoP10s6a8knZT08fjyICSEwJqsRknnSvpa/JVhvxZFdxveIOn2Yk90914zW6foetZPKLpOZ6Rs+FUAjEB/jeXX+9mSPl+w3ouSlrn77lFpFTCOmdkMSf1DS01TdDb1P0eyyfix2DBKny74+4Sk97r7thG8HsrAJQHJ6v/af0vB/K2KhsW4wcxKhchvKLr7sdnMXjOShpjZ2YrOrJ6Q1DOSbQETTP9NVn9YaqX4evOpimo7v8aed3dzd5M0RdGB9aSkrWZWcpsASouPod9UdInNzYpuqrrbzOoLVi2rjmMXFjwn37lxLddLeqeimzG/bWZzKm07KkNgTYiZzZR0Rfzn/vwBhxUdzM5SdGfjlUNtw91zis7M1CsaYWAk3q7ojPov3f33I9wWMJE8Ej9eNcx6VyiqsZ/H19UN4u4vuPv3JH1E0msUDYcF4Mw1SVos6R53/4qiY+Z0nR77uF9ZdRyPMjBN0ZBzhZcDnOLuve6+U9JKRWH528OcgMIIEViTc4OirxV+rOjTX+F0f7zecHcUf0dSm6SPKirCisVF1P81xr+fyTaACWyzoq8Gb4yHxhmkoMa+PdwG3f07kvZKepeZLaxWQ4GJJL4U4EuKfphnbTx7g6IfymmKb4jqt1XRpTgr4xNKQ+m//vVb5bQhvrnr+5L+VNHNlEgIgTUBZpZSNBTOCUnvc/cPF06KbsZ6TtJfm1lmqG25+0lJn1V0Rrbis6xmNlXRAfcdin7R446K3xAwgbn7ryV9VdIfSLrfzKblLzezs+LlVyoaXqfcX666LX68tSoNBSaQ+EPitxSd3byx/8bk+NuNDyq6NOebZlYXzz+i6OzrqyX9Rxx287eXMrPPSXqfonFV/7mC5tym6EPtLfHxHwngpqtkXK1oSKrtQ32lEN+1/2+S/kbSuyXdPdTG3P0H8c/CXT7M637MzJ5WdGY3o2gQ40Ua+NOsxyt9MwD0CUXjJ79fUoeZFf406xsktUq6Nr6UZ1ju/kMz+5miEUMujwcgB1CeNYo+JN7j7j/KX+Du7WZ2u6IxVdfp9I8AfFnRB8+/k9RuZjsU/ZhP/0+zvlFRWF3m7sfKbYi7/8rM7lM0YsD1kr43gveFIdjAm9dRDWb2XUVfDax092yJ9d4i6VFJe919QXx96y/d/S1F1r1c0n/Ff37F3W/OW3avBv66zglJxxQNzdEqKStpR3y2FsAZMrO/UHR5znxFB77jii7Z+a6kbxWGVTPrlnSeu583xPb+UtHYkQ+5+9UJNh0YN+Kzo79SdD/IJcWGfYzHG98r6U8kXe7uj+QtW6Bo2MiFiq5XfVHRz7VmJd3h7n1FtvewotF9zi124ifveN4u6Y853lYfgRUAAABB41oLAAAABI3ACgAAgKARWAEAABA0AisAAACCRmAFAABA0IYLrD6a09KlS0f19ZiYzmAK3ajuD2qWKfApdKO6P6hXpjEwDSmoM6xHjhypdRMAVICaBcYO6hVjWVCBFQAAAChEYAUAAEDQCKwAAAAIGoEVAAAAQSOwAgAAIGgEVgAAAASNwAoAAICgEVgBAAAQNAIrAAAAgkZgBQAAQNAIrAAAAAgagRUAAABBI7ACAAAgaARWAAAABI3ACgAAgKBVPbA++eSTuummm7RgwQLV1dXJzNTd3V3tlwFQBdQrMHZQr5jIqh5YOzs7tXXrVjU0NGjhwoXDP6GrS2pqkjIZqbU1emxqiuYDRXT1dKlpe5My6zNK3ZZSZn1GTdub1NVDn6lUxfWqgfu/9bet7H+URL1WD/WKUZGfy1KpYHKZuXup5SUXFnPy5EmlUlEOvueee3TjjTfq0KFDmj59+uCVd+6UGhulXE7K5TRP0j5JSqejKZuVli2rtAkYx3Z27FTjtkblTuSUO5k7NT+dSis9Ka3syqyWXZxon7EkN14FFdVsRfWqIvv/LkkfHdX9jzGEeh0W9YqwFOSyU0Yvlw1Zs1U/w9pfTMPq6op2Sl/fwJ0iRX/39UXLOdOKWFdPlxq3Naov1zfg4CdJuZM59eX61LitkTMHFSi7XsX+R2XoL9VHvSJRgeey2t10tWHD4B1SKJeTNm4cnfYgeBse2aDcidJ9Jncip4176TNJYP+jEvSX2mL/o2KB57LaBdYtW8rbMS0to9MeBG9L25ZBZwoK5U7m1NJGn0kC+x+VoL/UFvsfFQs8l9UusB4/Xt31MO4df6W8vlDueqgM+x+VoL/UFvsfFQs8l9UusNbXV3c9jHv1Z5XXF8pdD5Vh/6MS9JfaYv+jYoHnsto
"text/plain": [
"<Figure size 864x216 with 3 Axes>"
]
},
"metadata": {
"needs_background": "light"
},
"output_type": "display_data"
}
],
"source": [
"plot_perceptron()"
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"source": [
"### Funkcje aktywacji\n",
"\n",
"Zamiast funkcji bipolarnej możemy zastosować funkcję sigmoidalną jako funkcję aktywacji."
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {
"slideshow": {
"slide_type": "notes"
}
},
"outputs": [],
"source": [
"def plot_activation_functions():\n",
" plt.figure(figsize=(16,7))\n",
" plt.subplot(121)\n",
" x = [-2,-.23,2] \n",
" y = [-1, -1, 1]\n",
" plt.ylim(-1.2,1.2)\n",
" plt.xlim(-2.2,2.2)\n",
" plt.plot([-2,2],[1,1], color='black', ls=\"dashed\")\n",
" plt.plot([-2,2],[-1,-1], color='black', ls=\"dashed\")\n",
" plt.step(x, y, lw=3)\n",
" ax = plt.gca()\n",
" ax.spines['right'].set_color('none')\n",
" ax.spines['top'].set_color('none')\n",
" ax.xaxis.set_ticks_position('bottom')\n",
" ax.spines['bottom'].set_position(('data',0))\n",
" ax.yaxis.set_ticks_position('left')\n",
" ax.spines['left'].set_position(('data',0))\n",
"\n",
" plt.annotate(r'$\\theta_0$',\n",
" xy=(-.23,0), xycoords='data',\n",
" xytext=(-50, +50), textcoords='offset points', fontsize=26,\n",
" arrowprops=dict(arrowstyle=\"->\"))\n",
"\n",
" plt.subplot(122)\n",
" x2 = np.linspace(-2,2,100)\n",
" y2 = np.tanh(x2+ 0.23)\n",
" plt.ylim(-1.2,1.2)\n",
" plt.xlim(-2.2,2.2)\n",
" plt.plot([-2,2],[1,1], color='black', ls=\"dashed\")\n",
" plt.plot([-2,2],[-1,-1], color='black', ls=\"dashed\")\n",
" plt.plot(x2, y2, lw=3)\n",
" ax = plt.gca()\n",
" ax.spines['right'].set_color('none')\n",
" ax.spines['top'].set_color('none')\n",
" ax.xaxis.set_ticks_position('bottom')\n",
" ax.spines['bottom'].set_position(('data',0))\n",
" ax.yaxis.set_ticks_position('left')\n",
" ax.spines['left'].set_position(('data',0))\n",
"\n",
" plt.annotate(r'$\\theta_0$',\n",
" xy=(-.23,0), xycoords='data',\n",
" xytext=(-50, +50), textcoords='offset points', fontsize=26,\n",
" arrowprops=dict(arrowstyle=\"->\"))\n",
"\n",
" plt.show()"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"outputs": [
{
"data": {
"image/png": "iVBORw0KGgoAAAANSUhEUgAAA4sAAAGKCAYAAACl0NTvAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjEsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8QZhcZAAAgAElEQVR4nOzdd3hUZfrG8XvSOxAIEHrokEgNCCKigIIi4KJiWVxAWXApKygqKEVWFHVRwZ+A4NqWoiAugqAioOCKsvSW0CGhk0ACCenJnN8fWQazQwlkMicz8/1cVy7zvmfOzJM45Mmdc857LIZhCAAAAACA3/MyuwAAAAAAQNlDWAQAAAAA2CEsAgAAAADsEBYBAAAAAHYIiwAAAAAAOz7X2c5SqUAxdO/eXd9//73ZZQCuwGJ2AW6A3gwUA70ZKLar9maOLAIOcPbsWbNLAAAAv0NvBkqOsAgAAAAAsENYBAAAAADYISwCAAAAAOwQFgEAAAAAdgiLAAAAAAA7hEUAAAAAgB3CIgAAAADADmERAAAAAGCHsAgAAAAAsENYBAAAAADYISwCAAAAAOwQFgEAAAAAdgiLAAAAAAA7hEUAAAAAgB3CIgAAAADADmERAAAAAGCHsAgAAAAAsENYBAAAAADYISwCAAAAAOwQFgEAAAAAdgiLAAAAAAA7hEUAAAAAgB3CIgAAAADADmERAAAAAGCHsAgAAAAAsENYhNs4fvy4RowYofbt2ysoKEgWi0UJCQnF2tdqtWrKlCmqU6eOAgIC1Lx5c3311VelWzAAAG6O3gy4NsIi3MbBgwe1aNEiVahQQR07dryhfcePH69XXnlFw4cP13fffad27drp4Ycf1rfffltK1QIA4P7ozYBrsxiGca3t19wIlCVWq1VeXoV///jHP/6hP//5zzpy5Ijq1Klzzf2SkpJUs2ZNjRkzRpMmTbLNd+nSRcnJydq5c+d1Xzs2NlabN28uUf2Ah7CYXYAboDfDZdCbAZdw1d7MkUW4jUvN6EatXLlSubm56tevX5H5fv36adeuXTpy5IgjygMAwOPQmwHX5rQji3feeafdXN++fTV06FBlZmbqvvvus9s+YMAADRgwQGfPntVDDz1kt/0vf/mLHnnkER07dkxPPPGE3fbnnntOPXv21L59+zRkyBC77ePGjVPXrl21fft2jRw50m7766+/rttuu02//vqrXnrpJbvt06ZNU4sWLbR69WpNnjzZbvvs2bPVqFEjffPNN3r77bftts+dO1c1a9bUwoULNWvWLLvtixcvVqVKlfTpp5/q008/tdv+7bffKigoSDNnztSiRYvstq9du1aSNHXqVC1fvrzItsDAQH333XeSpFdffVVr1qwpsr1ixYq26wLGjh2r3377rcj2GjVqaN68eZKkkSNHavv27UW2N2zYUHPmzJEkDR48WPv37y+yvUWLFpo2bZqkwh/8x48fL7K9ffv2mjJliiTpwQcf1Llz54ps79Kli8aPHy9Juvfee5WVlVVke0REhBYvXqwjR45owIABtvkLkbE6X6ODDG8/OdKpz0Yqsv80hz4nUBYlvNGjpE/BkcWSozfTm92qN1/y+/de48aNdfz4cd1xxx227QMGDFDTpk116623KiYmRhUrViyy//++97Zs2aLWrVvbtvPeWyvJM997999/v0aPHi3JvX7uXfp/6gAcWQQklUpQBAAAjpWfny8fHx+7+fDwcNv2K5kzZ4569OihLVu2KC8vr1RrBDwB1yzCLV3tuog6Y1aUyutxZBGegiOLZQK9GS7pRq5Z/POf/6zly5fr1KlTReYPHDighg0b6p///OcVj+D8HtcsAsV21d5s/ycbwENc+qX3xRdf1PTp05WVlSWL5fK/lY0bN+rWW2/V8uXL1aPHtX9Bjl09UZtL/ks0AABQ4RHE1NRUGYZRpDenpqbatgPuKCe/QGcv5io5PUdn03N09mKOzmXk6uzFHKVk5ColI1fnLuYqNTNXF3PytXPiPUX+jTgaYREeLzo6Wjk5OTp06JDq169vm4+Pj5ckNW3a1KzSAADwSPRmuBvDMHQ+M08nL2Tp9IVsnbyQrdMXsnQmLUdn0rJ1Ji1bSek5Op95Y6dPZ+YWKNi/9CIdYREer3v37vLz89P8+fM1ceJE2/y8efMUExOjqKgoE6sDAMDz0Jvhii5k5ikxJUPHUrJ0LDVTx1IydTw1SyfOZ+nk+Sxl5hY4/DVTMnIJi0BxLV68WJK0ZcsWSdJ3332niIgIRUREqFOnTlfcp3Llyho1apSmTJmi0NBQtWrVSgsXLtSPP/6opUuXOq12AADcUXF6s4+Pj/r376+PPvpIEr0ZZVdmbr4OJ2fo8NkMHU6+qCNnM5RwLlOJ5zJu+KjglXh7WVQx2E+Vw/xVKaTwo2KInyoF+ys82E/hIX6qGOxX+Hmwn4L8SjfOERbhVh5++OEi46FDh0qSOnXqdM3lhV977TWFhIRo+vTpOn36tBo1aqRFixapZ8+epVkuAABurzi9uaCgQAUFRY+60JthpqzcAu0/k659p9O1/0y6DiRd1MGkizpxPuv6O19FsJ+3IssHKrJcgCLLBahqucLPq4T5q3JogKqEBSg82E/eXmVnLThWQ4VH+f1qqA5Y1dGGFdeAYis7HdB10ZuBYqA3o7iS03O0++QFxZ9MK/w4laaEcxm6dkyy5+/jpdoVg1QrPEg1KgSpZniQalQIVI0KgapePlDlAn1LdTGaEmA1VAAAAACeLT07TzuOXdCO4+e18/h57Tx+QacuZBd7f28vi2pXDFLdSiGqFxGsqEqFH7UrBqtyqL+8ytBRQUcgLAIAAABwO4Zh6GhKpjYeSdHWo6namnhe+5PSi3XE0Msi1akYrEZVQ9WoaqgaVglV/cohqlMxWH4+XqVffBlBWAQAAADg8gzD0JGzGfrt8Dn9duicNiWk6ExaznX3C/D1UtPIMEVXK6foamFqWi1MDauEKsDX2wlVl22ERQAAAAAu6ezFHP1y4Kx+PpCsXw+e0+m0a59S6mWRGlcNU8ta5dW8Rnk1q1lO9SNC5OPtOUcLbwRhEQAAAIBLKLAa2nH8vH7ck6S1+5O0+0TaNR8f6u+j1nUqqE2dcLWqVUHNapQr1fsSuhu+UwAAAADKrKzcAv18IFk/xJ3R2n1JOpeRe9XHhvr76Na6FdW+XkW1qxuuxlXDytStKFwNYREAAABAmXIxJ19r9pzR97tPa+2+ZGXlFVzxcd5eFrWqVV53NIhQx4YRiqkWximlDkRYBAAAAGC67LwCrd2XpGU7TmrNniTl5Fuv+LhKIf7q3DhCnRtXVof6lRQa4OvkSj0HYREAAACAKQzD0KaEVP1r63Gt2HlK6Tn5V3xc/coh6hZdRfc0rapbqpdzu/sZllWERQAAAABOdfpCthZtPqbFW47raErmFR/TuGqoetwSqXtvqar6lUOdXCEkwiIAAAAAJyiwGlq7L0mfbzyqH/cmyWrYP6ZOxSD1al5NPZtXU4MqBESzERYBAAAAlJrzmblauOmY/vlbok6cz7LbHhbgo/ubV9ODrWqoVa3yslg4xbSsICwCAAAAcLiDSen66JcjWrLthLLz7Berua1eRT3atpbuaVpFAb7eJlSI6yEsAgAAAHCISwvWzPn5kFbvSbLbXiHIV31ja+rRtrUUVSnYhApxIwiLAAAAAErEMAz9tC9J7/94UFuPnrfb3jQyTAM61FGv5tU4iuhCCIsAAAAAborVamjVnjP6vx8PaPeJNLvtXZtU0eA76qpNnQpci+iCCIsAAAAAbohhGFq7L1l/X7lP8aeKhkQ/by/1aVVdgzrWVf3KISZVCEcgLAIAAAAoto1HUvT3lXu1KSG1yLy/j5f+eGttDelUV1XCAkyqDo5EWAQAAABwXQeTLmrKt3u0Zm/RhWsCfL30p/Z1NKhjlCqHEhLdCWERAAAAwFWdu5ij6WsOaP5/jqrAatjmfb0terxtLQ3rXJ+Q6KYIiwAAAADs5BdYNXdDot5ZtV/p2fm2eYtF+kOL6hp1d0PVDA8ysUKUNsIiAAAAgCI2JaR
"text/plain": [
"<Figure size 1152x504 with 2 Axes>"
]
},
"metadata": {
"needs_background": "light"
},
"output_type": "display_data"
}
],
"source": [
"plot_activation_functions()"
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"source": [
"### Perceptron a regresja liniowa"
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"source": [
"<img src=\"reglin.png\" width=\"70%\"/>"
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"source": [
"### Uczenie regresji liniowej:\n",
"* Model: $$h_{\\theta}(x) = \\sum_{i=0}^n \\theta_ix_i$$\n",
"* Funkcja kosztu (błąd średniokwadratowy): $$J(\\theta) = \\frac{1}{m} \\sum_{i=1}^{m} (h_{\\theta}(x^{(i)}) - y^{(i)})^2$$\n",
"* Po obliczeniu $\\nabla J(\\theta)$ - zwykły SGD."
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"source": [
"### Perceptron a dwuklasowa regresja logistyczna"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"<img src=\"reglog.png\" width=\"60%\"/>"
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"source": [
"### Uczenie dwuklasowej regresji logistycznej:\n",
"* Model: $h_{\\theta}(x) = \\sigma(\\sum_{i=0}^n \\theta_ix_i) = P(1|x,\\theta)$\n",
"* Funkcja kosztu (entropia krzyżowa): $$\\begin{eqnarray} J(\\theta) &=& -\\frac{1}{m} \\sum_{i=1}^{m} \\big( y^{(i)}\\log P(1|x^{(i)},\\theta) \\\\ && + (1-y^{(i)})\\log(1-P(1|x^{(i)},\\theta)) \\big) \\end{eqnarray}$$\n",
"* Po obliczeniu $\\nabla J(\\theta)$ - zwykły SGD."
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"source": [
"### Perceptron a wieloklasowa regresja logistyczna"
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"source": [
"<img src=\"multireglog.png\" width=\"40%\"/>"
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"source": [
"### Wieloklasowa regresja logistyczna\n",
"* Model (dla $c$ klasyfikatorów binarnych): \n",
"$$\\begin{eqnarray}\n",
"h_{(\\theta^{(1)},\\dots,\\theta^{(c)})}(x) &=& \\mathrm{softmax}(\\sum_{i=0}^n \\theta_{i}^{(1)}x_i, \\ldots, \\sum_{i=0}^n \\theta_i^{(c)}x_i) \\\\ \n",
"&=& \\left[ P(k|x,\\theta^{(1)},\\dots,\\theta^{(c)}) \\right]_{k=1,\\dots,c} \n",
"\\end{eqnarray}$$"
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"source": [
"* Funkcja kosztu (**przymując model regresji binarnej**): $$\\begin{eqnarray} J(\\theta^{(k)}) &=& -\\frac{1}{m} \\sum_{i=1}^{m} \\big( y^{(i)}\\log P(k|x^{(i)},\\theta^{(k)}) \\\\ && + (1-y^{(i)})\\log P(\\neg k|x^{(i)},\\theta^{(k)}) \\big) \\end{eqnarray}$$\n",
"* Po obliczeniu $\\nabla J(\\theta)$, **c-krotne** uruchomienie SGD, zastosowanie $\\mathrm{softmax}(X)$ do niezależnie uzyskanych klasyfikatorów binarnych."
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"source": [
"* Przyjmijmy: \n",
"$$ \\Theta = (\\theta^{(1)},\\dots,\\theta^{(c)}) $$\n",
"\n",
"$$h_{\\Theta}(x) = \\left[ P(k|x,\\Theta) \\right]_{k=1,\\dots,c}$$\n",
"\n",
"$$\\delta(x,y) = \\left\\{\\begin{array}{cl} 1 & \\textrm{gdy } x=y \\\\ 0 & \\textrm{wpp.}\\end{array}\\right.$$"
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"source": [
"* Wieloklasowa funkcja kosztu $J(\\Theta)$ (kategorialna entropia krzyżowa):\n",
"$$ J(\\Theta) = -\\frac{1}{m}\\sum_{i=1}^{m}\\sum_{k=1}^{c} \\delta({y^{(i)},k}) \\log P(k|x^{(i)},\\Theta) $$"
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"source": [
"* Gradient $\\nabla J(\\Theta)$:\n",
"$$ \\dfrac{\\partial J(\\Theta)}{\\partial \\Theta_{j,k}} = -\\frac{1}{m}\\sum_{i = 1}^{m} (\\delta({y^{(i)},k}) - P(k|x^{(i)}, \\Theta)) x^{(i)}_j \n",
"$$\n",
"\n",
"* Liczymy wszystkie wagi jednym uruchomieniem SGD"
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"source": [
"## Podsumowanie\n",
"\n",
"* W przypadku jednowarstowej sieci neuronowej wystarczy znać gradient funkcji kosztu.\n",
"* Wtedy liczymy tak samo jak w przypadku regresji liniowej, logistycznej, wieloklasowej logistycznej itp. (wymienione modele to szczególne przypadki jednowarstwowych sieci neuronowych).\n",
"* Regresja liniowa i binarna regresja logistyczna to jeden neuron.\n",
"* Wieloklasowa regresja logistyczna to tyle neuronów, ile klas."
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "fragment"
}
},
"source": [
"Funkcja aktywacji i funkcja kosztu są **dobierane do problemu**."
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "slide"
}
},
"source": [
"## 4.2. Funkcje aktywacji"
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"source": [
2022-05-21 08:03:16 +02:00
"\n",
2021-05-05 14:43:56 +02:00
"* Każda funkcja aktywacji ma swoje zalety i wady.\n",
"* Różne rodzaje funkcji aktywacji nadają się do różnych zastosowań."
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {
"slideshow": {
"slide_type": "notes"
}
},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"/home/pawel/.local/lib/python2.7/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n",
" from ._conv import register_converters as _register_converters\n",
"Using TensorFlow backend.\n"
]
}
],
"source": [
"%matplotlib inline\n",
"\n",
"import math\n",
"import matplotlib.pyplot as plt\n",
"import numpy as np\n",
"import random\n",
"\n",
"import keras\n",
"from keras.datasets import mnist\n",
"from keras.models import Sequential\n",
"from keras.layers import Dense, Dropout, SimpleRNN, LSTM\n",
"from keras.optimizers import Adagrad, Adam, RMSprop, SGD\n",
"\n",
"from IPython.display import YouTubeVideo"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {
"slideshow": {
"slide_type": "notes"
}
},
"outputs": [],
"source": [
"def plot(fun):\n",
" x = np.arange(-3.0, 3.0, 0.01)\n",
" y = [fun(x_i) for x_i in x]\n",
" fig = plt.figure(figsize=(14, 7))\n",
" ax = fig.add_subplot(111)\n",
" fig.subplots_adjust(left=0.1, right=0.9, bottom=0.1, top=0.9)\n",
" ax.set_xlim(-3.0, 3.0)\n",
" ax.set_ylim(-1.5, 1.5)\n",
" ax.grid()\n",
" ax.plot(x, y)\n",
" plt.show()"
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "slide"
}
},
"source": [
"### Funkcja logistyczna"
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "fragment"
}
},
"source": [
"$$ g(x) = \\frac{1}{1 + e^{-x}} $$"
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "fragment"
}
},
"source": [
"* Przyjmuje wartości z przedziału $(0, 1)$."
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"source": [
"#### Funkcja logistyczna wykres"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {
"slideshow": {
"slide_type": "fragment"
}
},
"outputs": [
{
"data": {
"image/png": "iVBORw0KGgoAAAANSUhEUgAAA1cAAAG2CAYAAACTRXz+AAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMS4yLCBo\ndHRwOi8vbWF0cGxvdGxpYi5vcmcvNQv5yAAAIABJREFUeJzt3XmsXudh3/nfw7vvGy/3VRu1Wou1\n2HAWOk4cx0jjpq1bezpN0mXUdhq0M0Axk04GCaaDAl2AWVNMa6RB00GRNJhOWs/UbdI0ZdJ2YFmW\nLVnWZslcxEUSl7vxbrzbmT/el1ekxCvJvke8l+TnA7x43/O+5/I8NB6R/Pqc87ylqqoAAACwPls2\negAAAAA3A3EFAABQA3EFAABQA3EFAABQA3EFAABQA3EFAABQg1riqpTya6WUs6WUb6/x+eFSymQp\n5dnm45fqOC4AAMBm0VrTr/OPk/xKkn/yHvv8h6qqfrKm4wEAAGwqtZy5qqrqD5OM1fFrAQAA3Iiu\n5z1XHy+lPFdK+dellPuu43EBAAA+dHVdFvh+vpFkf1VV06WUzyb5F0nuvNaOpZQnkzyZJJ2dnR/d\nt2/fdRoiN4qVlZVs2WItFq5mXrAWc4NrMS9Yi7nBtXznO985X1XV6PvtV6qqquWApZQDSf7fqqru\n/wD7Hk/yaFVV599rv0OHDlWvvPJKLePj5nHkyJEcPnx4o4fBJmNesBZzg2sxL1iLucG1lFKeqarq\n0ffb77pkeSllRymlNF8/3jzuhetxbAAAgOuhlssCSym/keRwkq2llFNJfjlJW5JUVfUPkvyJJH+5\nlLKUZC7JF6q6TpkBAABsArXEVVVVX3yfz38ljaXaAQAAbkru1gMAAKiBuAIAAKiBuAIAAKiBuAIA\nAKiBuAIAAKiBuAIAAKiBuAIAAKiBuAIAAKiBuAIAAKiBuAIAAKiBuAIAAKiBuAIAAKiBuAIAAKiB\nuAIAAKiBuAIAAKiBuAIAAKiBuAIAAKiBuAIAAKiBuAIAAKiBuAIAAKiBuAIAAKiBuAIAAKiBuAIA\nAKiBuAIAAKiBuAIAAKiBuAIAAKiBuAIAAKiBuAIAAKiBuAIAAKiBuAIAAKiBuAIAAKiBuAIAAKiB\nuAIAAKiBuAIAAKiBuAIAAKiBuAIAAKiBuAIAAKiBuAIAAKiBuAIAAKiBuAIAAKiBuAIAAKiBuAIA\nAKiBuAIAAKiBuAIAAKiBuAIAAKiBuAIAAKiBuAIAAKiBuAIAAKiBuAIAAKiBuAIAAKiBuAIAAKiB\nuAIAAKiBuAIAAKiBuAIAAKiBuAIAAKiBuAIAAKiBuAIAAKiBuAIAAKhBLXFVSvm1UsrZUsq31/i8\nlFL+t1LKa6WUb5VSHqnjuAAAAJtFXWeu/nGSz7zH5z+R5M7m48kk/0dNxwUAANgUaomrqqr+MMnY\ne+zyuST/pGr4apLBUsrOOo4NAACwGVyve652Jzl5xfap5nsAAAA3hdaNHsA7lVKeTOPSwYyOjubI\nkSMbOyA2nenpafOCdzEvWIu5wbWYF6zF3GA9rldcnU6y94rtPc333qWqqi8l+VKSHDp0qDp8+PCH\nPjhuLEeOHIl5wTuZF6zF3OBazAvWYm6wHtfrssAvJ/mZ5qqBH0syWVXVG9fp2AAAAB+6Ws5clVJ+\nI8nhJFtLKaeS/HKStiSpquofJPlKks8meS3JbJI/W8dxAQAANota4qqqqi++z+dVkr9Sx7EAAAA2\no023oAUAAMCHpaqqXFpaycylpcwuLGdmYSkzl5Yyc2k5swuN55mFt7d//L4dH/jXFlcAAMCmd2lp\nOdPzS5m+tJSLzefp+aXMLFy9/fbni1e9N7uwvPq8vFJ9oGOWkuwd7v7AYxRXAADAh+bymaKpucVM\nzi1man4xU3NLmZpvbs8t5uIVETQ9v3T1dvP1wvLK+x6rZUtJb0drejta09fZeB7sbs+e4e70tLek\nu73xXndHS3raW9Pd3tLcbl39vKejJT0drelpb01n25aUUvKnPuDvVVwBAADvaXG5EUdT80vvGUlT\n80tXvG5+Prf4vmHU0bplNYZ6m8+7Brve9d7qo7M1fVe+39mavo621RjaKOIKAABuESsrVS7OL2Vi\nbiHjs4sZn13IZPN5fHYxE7MLmWhuT1zx+cVLS+/567ZuKRnoakv/5Udna3YPdTXe62xLf1dr+jvb\n3t6ns3X1dV9nazpaW67T/wIfLnEFAAA3oKqqMjW/lAvTlzI2s5ALMwu5ML2QsZlLzTBqxNL47EIm\n5hYz0dxe63ajUpL+zrYMdbdloLs9I73tuWNbbwa72zLY1Z7B7mtFUuP1Rp8x2izEFQAAbAKXY2ls\nZiEXpi/lwszCNV5fjqhLGZ9dyOLytUupq60lQ91tGexuz1BPW3YOdGWwuy1D3Y1IGuxuf/vz5vNA\nV1tatgik9RBXAADwIamqKhOzizk3fSnnLjYeZy/Or76+MLOQ882zTWMza8dSb0drRnrbM9zTnt2D\nnfnI7oEM97ZnpKe9+X7H6uuh7vZ0tt0cl9ndaMQVAAB8j+YXl5uh1Iym6Us5NzV/VURdfv9awdTZ\ntiWjfR3Z2tuR3YOdeWB3f0Z6G4E03NO++los3VjEFQAANC2tVDk9MZc3J+fz1tT828/N142IunTN\nBR5KSUZ6OjLa15FtfR25c3tfRvs6Mtr79nujzUdvR6t7lG5C4goAgJteVVW5eGkpb02+HUpvR9Ol\nvDU1nzcm53Nh+lKq3/39q362vWVLtg90ZEd/Z+7Z0Z8fuvPtSLocT9v6OzLc3Z7Wli0b9DtkMxBX\nAADc8OYXl3NmYi5nJuZzZmIupyfmGtuTc3ljohFRswvL7/q5we627OjvzPb+zty7sz/z42/mYw/e\nvfrejoHODHW3OcvEByKuAADY1FZWqlyYWXg7mK6Mp2ZMXZhZuOpnSkm29XVk50BX7t7Zlx8+NJod\nzVi6/Ly9v/Nd9zIdOTKWw4/vu56/PW4i4goAgA21vFLlran5nBybzcnxuZwcm70qpM5MzmdhaeWq\nn+lub8nuwa7sGuzK/bsHsnuwM7ua27sHu7K9vzPtrS7R4/oSVwAAfKguL0d+cnw2J8fm8vrYbPP1\nbE6Nz+X0+FwWlt+Op1KS7X2d2T3UlQf2DObH7+9shNTA2/HU32VBCDYfcQUAwLrNLy7n5NhsI5yu\nOAP1ejOgpt+xut5Qd1v2Dnfn3p39+fR927NvuDt7h7qzd7g7uwY709Fq6XFuPOIKAIAPZG5hOSfG\nZnL8/GyOX5jJiQszOXZ+JicuzOaNyfmr9u1s27IaS08cHM7e4cbrxntd6ets26DfBXx4xBUAAKtm\nF5Zy4sJsjp+fyfELs1cF1JtTVwfUcE97Dox05+O3jeTA1p7sH+nOnmY8jfZ2uGyPW464AgC4xSwt\nr+T1sdkcPTeT756bztFzMzl2YSbHz8/k7MVLV+27tbc9+0d68ok7tubASHf2b+3JwZGe7BvpzkCX\ns09wJXEFAHCTmpxdzGvnpnP03HS+e26m+Tyd18dms7hcre63tbc9B7f25IfuGs2Bke4c2NqTAyON\nM1Eu34MPTlwBANzAlpZXcmp8bvUM1JXPV373U1tLyf6RntyxrTefvm9Hbh/tzW2jPbl9a28GugUU\n1EFcAQDcAJaWV3L8wmxefetivvPWdF49ezGvvjWdY+dnrlrGfKSnPbeN9uTH7t3eiKfR3tw22pu9\nQ11pbfG9T/BhElcAAJvI5Yh67Wwjor7zViOijp6fvupSvr3DXblrW18O3z2a20d7m4+eDHa3b+Do\n4dYmrgAANsDS8kpOjL19Juo7b13Ma2cbl/RdeSbqckR98u5tuXNbb+7a3pfbt/Wku90/42Cz8V8l\nAMCH7Pz0pbz8xsW8/OZUXmo+v3p2OgtL746oHz40mru29YkouAH5rxUAoCbzi8t57ex0Xn7zYl5+\nY6rx/OZUzk+/vbDEtr6
"text/plain": [
"<matplotlib.figure.Figure at 0x7fdda9490fd0>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"plot(lambda x: 1 / (1 + math.exp(-x)))"
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "slide"
}
},
"source": [
"### Tangens hiperboliczny"
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "fragment"
}
},
"source": [
"$$ g(x) = \\tanh x = \\frac{e^{x} - e^{-x}}{e^{x} + e^{-x}} $$"
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "fragment"
}
},
"source": [
"* Przyjmuje wartości z przedziału $(-1, 1)$.\n",
"* Powstaje z funkcji logistycznej przez przeskalowanie i przesunięcie."
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"source": [
"#### Tangens hiperboliczny wykres"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {
"slideshow": {
"slide_type": "fragment"
}
},
"outputs": [
{
"data": {
"image/png": "iVBORw0KGgoAAAANSUhEUgAAA1cAAAG2CAYAAACTRXz+AAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMS4yLCBo\ndHRwOi8vbWF0cGxvdGxpYi5vcmcvNQv5yAAAIABJREFUeJzs3Xd0HOW9h/Hvu7vq1bKKZVtyt9xx\nw6ZjUw0pQCgBQjc4cOEGktz0nnADaeSShCQQejVOaCZ0AqLjinuVu+Si3tuW9/6htRFGsmVrpFlJ\nz+ecPTuzO9r5kTNH8pPZnTXWWgEAAAAAOsfj9gAAAAAA0BsQVwAAAADgAOIKAAAAABxAXAEAAACA\nA4grAAAAAHAAcQUAAAAADnAkrowxDxpjio0xa9p5fpYxpsoYsyJ8+6kT+wUAAACASOFz6HUelvQX\nSY8eYpv3rLVfdGh/AAAAABBRHDlzZa19V1K5E68FAAAAAD1Rd37m6nhjzEpjzCvGmPHduF8AAAAA\n6HJOvS3wcJZLGmKtrTXGnCvpeUmj2trQGDNP0jxJio2NnZabm9tNI6KnCIVC8ni4Fgs+i+MC7eHY\nQFs4LtAejg20ZdOmTaXW2ozDbWestY7s0BgzVNK/rbUTOrDtdknTrbWlh9ouLy/Pbty40ZH50Hvk\n5+dr1qxZbo+BCMNxgfZwbKAtHBdoD8cG2mKMWWatnX647boly40xA4wxJrw8I7zfsu7YNwAAAAB0\nB0feFmiMeUrSLEnpxphCST+TFCVJ1tq/S7pI0k3GmICkBkmXWqdOmQEAAABABHAkrqy1lx3m+b+o\n5VLtAAAAANAr8Wk9AAAAAHAAcQUAAAAADiCuAAAAAMABxBUAAAAAOIC4AgAAAAAHEFcAAAAA4ADi\nCgAAAAAcQFwBAAAAgAOIKwAAAABwAHEFAAAAAA4grgAAAADAAcQVAAAAADiAuAIAAAAABxBXAAAA\nAOAA4goAAAAAHEBcAQAAAIADiCsAAAAAcABxBQAAAAAOIK4AAAAAwAHEFQAAAAA4gLgCAAAAAAcQ\nVwAAAADgAOIKAAAAABxAXAEAAACAA4grAAAAAHAAcQUAAAAADiCuAAAAAMABxBUAAAAAOIC4AgAA\nAAAHEFcAAAAA4ADiCgAAAAAcQFwBAAAAgAOIKwAAAABwAHEFAAAAAA4grgAAAADAAcQVAAAAADiA\nuAIAAAAABxBXAAAAAOAA4goAAAAAHEBcAQAAAIADiCsAAAAAcABxBQAAAAAOIK4AAAAAwAHEFQAA\nAAA4gLgCAAAAAAcQVwAAAADgAOIKAAAAABxAXAEAAACAA4grAAAAAHAAcQUAAAAADiCuAAAAAMAB\nxBUAAAAAOIC4AgAAAAAHEFcAAAAA4ADiCgAAAAAcQFwBAAAAgAOIKwAAAABwAHEFAAAAAA5wJK6M\nMQ8aY4qNMWvaed4YY/5kjCkwxqwyxkx1Yr8AAAAAECmcOnP1sKQ5h3j+HEmjwrd5kv7m0H4BAAAA\nICI4ElfW2ncllR9ik/MkPWpbfCwp1RiT7cS+AQAAACAS+LppP4Mk7Wq1Xhh+bE837R8AAABAL2St\nVchKwZBVyFoFQ1ZBaxUKhZfD68GQVSikT5ftp8+HPvPYZ19rdFZSh2fprrjqMGPMPLW8dVAZGRnK\nz893dyBEnNraWo4LfA7HBdrDsYG2cFygPX3x2LDWKmilYEjyh6SAtQqE1OrWsu4Ph4m/jecCISlg\nJX8oHCe25bHQ/tcOv37IhuMmvL7/uQNRc2Bd4SjSZ7YNHfSzofDzXWnuhOgOb9tdcVUkKafV+uDw\nY59jrb1P0n2SlJeXZ2fNmtXlw6Fnyc/PF8cFDsZxgfZwbKAtHBdoTyQcG6GQVYM/qPrmoOqbA6pr\nCqoxEFSjP6gmf0iN/v3r4WV/SA3+oJr8wQPr+7c/sE0g9Jnn/cGQmgMhNQVblq3DgeLzGPm8Rj6P\nJ3zfsuz1GEV5Tfi+Zd3n9SjGYz77Mwf9vNdjFOXxyOs1ivIYeT2eA6/jC697PZLHY+Q1LY97wvcH\nbsa0PO/Rp8/tf2z/zxx4TAceG5qeoJ929L/b2f8Z27VQ0i3GmPmSZkqqstbylkAAAAD0eNZa1TcH\nVdMYUE2jX9WNAVU3+lXTGFB9U0B1zcFP78OxVN/c/uP1zcGjmiPa51Gsz6PYKK9io7yKi/IqNsqj\nmCivUuKiFJsUo9gor2J8HsVEeRTl9Sja51G0t+UWtX+51X3UgXujaJ9HMZ95rO3to7xGxhiH/1fu\nGRyJK2PMU5JmSUo3xhRK+pmkKEmy1v5d0suSzpVUIKle0rVO7BcAAABwgj8YUkV9s4pqQlq0tUwV\n9X5VNTSruuHzwVTT6G95vGn/ekDB0OFP/UR7PYqP8Soh2qf4aK/iY3xKiPYqNT5aCTFexUe3rO9/\nfP99XDiWYsOx9Jlln/dAMHk8fTNoIokjcWWtvewwz1tJNzuxLwAAAOBQgiGr8rpmldQ0qbS2SRX1\nzaqoa1ZFvV+V9S33FfXNqmx1X9sU+PQFPvj4M69njJQY41NybJSSYlvus1NiNTo2UclxLY8lxX56\nn9zqPiHGp4Ron+KivYr2OfUtSIhUEXdBCwAAAOBgoZBVWatg2n9rWW9utdyk8rpmtXciKTnWp34J\n0UqNj1b/xGiNzExUanyU+sVHq198lHZvL9CJ0ye3PJYQ3RJI0T7OCqFDiCsAAAC4yh8MqbimSXur\nGrW3qlF7qhpa7qsbta+qUXuqGrWvulGBNoopxudRRlKM0hNjNLhfvKbkpiojMUbp4cfSE2OUltAS\nTilxUfJ5D332KL9pu04ald5V/6no5YgrAAAAdKlGf1CFFfXaVdGgwvJ6FVY0aFdFvYoqGrSnqlEl\ntU2fu1pdbJRH2SlxGpAcq5nD0jQgJVZZybHKSIo5EFPpidFKjPH12YsnIPIQVwAAAOgUa61Kapq0\ntbRO20vrtLNVQBVWNKikpukz20f7PBrcL06DUuM0ZkCyBqTEHrhlp8QqOzlOyXFEE3oe4goAAAAd\nUtXg17bSOm0rrdW2kjptLa3TtnBQ1bW6fLjXYzQwNVY5/eI1Oy9DOf3ilZMWr5y0OA3uF6+MxBg+\nw4ReibgCAADAZ5TWNmnT3hpt2lejTcW12ryvRltL6lRW13xgG4+RBveL17D0BB07NE3DMxI0tH+C\nhqUnKDsl9rCfbQJ6I+IKAACgj6pu9GvDnnBEHbjVqrxVRKXERWl0VqLOGp+lYekJGpaeqGHpLWei\nYnxeF6cHIg9xBQAA0AeU1DRp7e4qrd1dfeB+R1n9gecTY3walZWos8ZlaVRWkvKykjQ6K1EZSTF8\n9gnoIOIKAACglymuadSKnZVaXfRpTO2r/vSiErlp8Ro/MFkXTxus8QNTNHpAkgamxBJRQCcRVwAA\nAD1YUyCodbur9cnOSn2yq1Kf7KxQYUWDpJbPRY3MTNSJI9I1bmCyxg9M0biByUqJi3J5aqB3Iq4A\nAAB6kH3VjVq8rVzLd1bok52VWre7Ws3BkCRpYEqspuT20zUnDNWU3FSNH5ii2Cg+FwV0F+IKAAAg\nQllrVVjRoEXbyrV4W5kWbSs/8Dmp2CiPJg1O1bUnDdWUnH6akpuqrORYlycG+jbiCgAAIILsKq/X\nBwWlWrStXIu2lml3VaMkKTU+SscOTdOVxw3RjGFpGpudrCgudw5EFOIKAADARdWNfn1YUKb3C0r0\n/uZSbQ+fmUpPjNbMYf114/A0zRiWptGZSXzxLhDhiCsAAIBuFAiGtLKwUu9uKtV7m0u0srBKwZBV\nfLRXxw/vr6tPGKqTR6VrREYiV+8DehjiCgAAoItVN/r1zsYSvbWhWG9vLFZlvV/GSJMGp+qmU0fo\n5FHpmpLbT9E+3uYH9GTEFQAAQBfYUVanN9cX6z/r92nxtnIFQlb94qN0Wl6mThubqZNGpis1Ptrt\nMQE4iLgCAABwgLVW6/Z
"text/plain": [
"<matplotlib.figure.Figure at 0x7fdda93e9590>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"plot(lambda x: math.tanh(x))"
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "slide"
}
},
"source": [
"### ReLU (*Rectifier Linear Unit*)"
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "fragment"
}
},
"source": [
"$$ g(x) = \\max(0, x) $$"
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"source": [
"#### ReLU zalety\n",
"* Mniej podatna na problem zanikającego gradientu (*vanishing gradient*) niż funkcje sigmoidalne, dzięki czemu SGD jest szybciej zbieżna.\n",
"* Prostsze obliczanie gradientu.\n",
"* Dzięki zerowaniu ujemnych wartości, wygasza neurony, „rozrzedzając” sieć (*sparsity*), co przyspiesza obliczenia."
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "fragment"
}
},
"source": [
"#### ReLU wady\n",
"* Dla dużych wartości gradient może „eksplodować”.\n",
"* „Wygaszanie” neuronów."
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"source": [
"#### ReLU wykres"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {
"slideshow": {
"slide_type": "fragment"
}
},
"outputs": [
{
"data": {
"image/png": "iVBORw0KGgoAAAANSUhEUgAAA1cAAAG2CAYAAACTRXz+AAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMS4yLCBo\ndHRwOi8vbWF0cGxvdGxpYi5vcmcvNQv5yAAAGJhJREFUeJzt3X+sZOd91/HPN/6RRQk0AS+NYzvE\npdalpkBLLNcRVbkmTnFMFTelkWwhkQDVtgirBQmBi0UCgUitkABBItpVY9VJo6QWqZul2eIkda7d\nCNzaiZzWXmfdxQR2t4lN7CT0KmnM1g9/7Gx7vZ659+7Oc2fOzLxe0tXOj7PzPH98vfbb58zZaq0F\nAACA6bxk3hsAAABYBuIKAACgA3EFAADQgbgCAADoQFwBAAB0IK4AAAA66BJXVXVnVT1dVY9OeH+9\nqr5WVY+Mft7RY10AAIChuLDT5/x8kvckef82x/x6a+0HOq0HAAAwKF3OXLXWHkjybI/PAgAAWESz\n/M7V66vqc1X1q1X152e4LgAAwJ7rdVngTj6b5M+01jar6qYkv5zkqnEHVtWBJAeSZN++fa97zWte\nM6Mtsiief/75vOQl7sXCC5kLJjEbjGMukme+0fJ7/6/lWy6uvHJfzXs7g2E2GOeJJ574cmtt/07H\nVWuty4JV9dokv9Ja+85dHPuFJNe01r683XFra2vt6NGjXfbH8tjY2Mj6+vq8t8HAmAsmMRuMs8pz\n0VrLP//oo/mFB/93fvSvfltuv/HPpUpcnbHKs8FkVfWZ1to1Ox03kyyvqlfV6J/aqrp2tO4zs1gb\nAIDThBXsrS6XBVbVh5KsJ7mkqk4keWeSi5KktfYzSX44yd+vqlNJvpHkltbrlBkAADsSVrD3usRV\na+3WHd5/T07fqh0AgBkTVjAbvq0HALDEhBXMjrgCAFhSwgpmS1wBACwhYQWzJ64AAJaMsIL5EFcA\nAEtEWMH8iCsAgCUhrGC+xBUAwBIQVjB/4goAYMEJKxgGcQUAsMCEFQyHuAIAWFDCCoZFXAEALCBh\nBcMjrgAAFoywgmESVwAAC0RYwXCJKwCABSGsYNjEFQDAAhBWMHziCgBg4IQVLAZxBQAwYMIKFoe4\nAgAYKGEFi0VcAQAMkLCCxSOuAAAGRljBYhJXAAADIqxgcYkrAICBEFaw2MQVAMAACCtYfOIKAGDO\nhBUsB3EFADBHwgqWh7gCAJgTYQXLRVwBAMyBsILlI64AAGZMWMFyElcAADMkrGB5iSsAgBkRVrDc\nxBUAwAwIK1h+4goAYI8JK1gN4goAYA8JK1gd4goAYI8IK1gt4goAYA8IK1g94goAoDNhBatJXAEA\ndCSsYHWJKwCAToQVrDZxBQDQgbACxBUAwJSEFZCIKwCAqQgr4AxxBQBwnoQVsJW4AgA4D8IKOJu4\nAgA4R8IKGEdcAQCcA2EFTCKuAAB2SVgB2xFXAAC7IKyAnYgrAIAdCCtgN8QVAMA2hBWwW+IKAGAC\nYQWcC3EFADCGsALOlbgCADiLsALOh7gCANhCWAHnS1wBAIwIK2Aa4goAIMIKmJ64AgBWnrACehBX\nAMBKE1ZAL+IKAFhZwgroqUtcVdWdVfV0VT064f2qqv9QVceq6req6i/3WBcA4HwJK6C3Xmeufj7J\njdu8/6YkV41+DiT5T53WBQA4Z8IK2AsX9viQ1toDVfXabQ65Ocn7W2styYNV9YqqurS19sUe6wMA\n7FZrLR848lzuOy6sgL5m9Z2ry5Ic3/L8xOg1AICZOXPG6r7jp4QV0F2XM1c9VdWBnL50MPv378/G\nxsZ8N8TgbG5umgtexFwwidngjD86Y3UqN1zWct2+L+X++5+a97YYGH9mMI1ZxdXJJFdseX756LUX\naa0dTHIwSdbW1tr6+vqeb47FsrGxEXPB2cwFk5gNkq1nrE5fCnjdvi/l+uuvn/e2GCB/ZjCNWV0W\neCjJ3x7dNfC6JF/zfSsAYBbcvAKYlS5nrqrqQ0nWk1xSVSeSvDPJRUnSWvuZJIeT3JTkWJKvJ/k7\nPdYFANiOsAJmqdfdAm/d4f2W5B/0WAsAYDeEFTBrs7osEABgZoQVMA/iCgBYKsIKmBdxBQAsDWEF\nzJO4AgCWgrAC5k1cAQALT1gBQyCuAICFJqyAoRBXAMDCElbAkIgrAGAhCStgaMQVALBwhBUwROIK\nAFgowgoYKnEFACwMYQUMmbgCABaCsAKGTlwBAIMnrIBFIK4AgEETVsCiEFcAwGAJK2CRiCsAYJCE\nFbBoxBUAMDjCClhE4goAGBRhBSwqcQUADIawAhaZuAIABkFYAYtOXAEAcyesgGUgrgCAuRJWwLIQ\nVwDA3AgrYJmIKwBgLoQVsGzEFQAwc8IKWEbiCgCYKWEFLCtxBQDMjLAClpm4AgBmQlgBy05cAQB7\nTlgBq0BcAQB7SlgBq0JcAQB7RlgBq0RcAQB7QlgBq0ZcAQDdCStgFYkrAKArYQWsKnEFAHQjrIBV\nJq4AgC6EFbDqxBUAMDVhBSCuAIApCSuA08QVAHDehBXAHxFXAMB5EVYALySuAIBzJqwAXkxcAQDn\nRFgBjCeuAIBdE1YAk4krAGBXhBXA9sQVALAjYQWwM3EFAGxLWAHsjrgCACYSVgC7J64AgLGEFcC5\nEVcAwIsIK4BzJ64AgBcQVgDnR1wBAH9IWAGcP3EFACQRVgDTElcAgLAC6EBcAcCKE1YAfYgrAFhh\nwgqgH3EFACtKWAH0Ja4AYAUJK4D+xBUArBhhBbA3xBUArBBhBbB3usRVVd1YVUer6lhV3T7m/bdX\n1f+pqkdGPz/SY10AYPeEFcDeunDaD6iqC5K8N8kbk5xI8lBVHWqtHTnr0F9srd027XoAwLkTVgB7\nr8eZq2uTHGutPdlaey7Jh5Pc3OFzAYAOhBXAbEx95irJZUmOb3l+Isn3jDnub1bV9yV5Isk/aq0d\nH3NMqupAkgNJsn///mxsbHTYIstkc3PTXPAi5oJJVn02Wmv5wJHnct/xU7npyoty3b4v5f77n5r3\ntuZu1eeCycwG0+gRV7vxX5J8qLX2zar60SR3Jflr4w5srR1McjBJ1tbW2vr6+oy2yKLY2NiIueBs\n5oJJVnk2zpyxuu+4M1ZnW+W5YHtmg2n0uCzwZJIrtjy/fPTaH2qtPdNa++bo6c8leV2HdQGACVwK\nCDB7PeLqoSRXVdWVVXVxkluSHNp6QFVduuXpm5M83mFdAGAMYQUwH1NfFthaO1VVtyW5N8kFSe5s\nrT1WVe9K8nBr7VCSH6+qNyc5leTZJG+fdl0A4MWEFcD8dPnOVWvtcJLDZ732ji2PfzLJT/ZYCwAY\nT1gBzFeXv0QYAJgvYQUwf+IKABacsAIYBnEFAAtMWAEMh7gCgAUlrACGRVwBwAISVgDDI64AYMEI\nK4BhElcAsECEFcBwiSsAWBDCCmDYxBUALABhBTB84goABk5YASwGcQUAAyasABaHuAKAgRJWAItF\nXAHAAAkrgMUjrgBgYIQVwGISVwAwIMIKYHGJKwAYCGEFsNjEFQAMgLACWHziCgDmTFgBLAdxBQBz\nJKwAloe4AoA5EVYAy0VcAcAcCCuA5SOuAGDGhBXAchJXADBDwgpgeYkrAJgRYQWw3MQVAMyAsAJY\nfuIKAPaYsAJYDeIKAPaQsAJYHeIKAPaIsAJYLeIKAPaAsAJYPeIKADoTVgCrSVwBQEfCCmB1iSsA\n6ERYAaw2cQUAHQgrAMQVAExJWAGQiCsAmIqwAuAMcQUA50lYAbCVuAKA8yCsADibuAKAcySsABhH\nXAHAORBWAEwirgBgl4QVANsRVwCwC8IKgJ2IKwDYgbACYDfEFQBsQ1gBsFviCgAmEFYAnAtxBQBj\nCCsAzpW4AoCzCCsAzoe4AoAthBUA50tcAcCIsAJgGuIKACKsAJieuAJg5QkrAHoQVwCsNGEFQC/i\nCoCVJawA6ElcAbCShBU
"text/plain": [
"<matplotlib.figure.Figure at 0x7fdda936c6d0>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"plot(lambda x: max(0, x))"
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "slide"
}
},
"source": [
"### Softplus"
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "fragment"
}
},
"source": [
"$$ g(x) = \\log(1 + e^{x}) $$"
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "fragment"
}
},
"source": [
"* Wygładzona wersja ReLU."
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"source": [
"#### Softplus wykres"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {
"slideshow": {
"slide_type": "fragment"
}
},
"outputs": [
{
"data": {
"image/png": "iVBORw0KGgoAAAANSUhEUgAAA1cAAAG2CAYAAACTRXz+AAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMS4yLCBo\ndHRwOi8vbWF0cGxvdGxpYi5vcmcvNQv5yAAAIABJREFUeJzt3Xd0XOWd//HPV733YlmWuyz3go0J\nJWCHXoIDgQCppKxDyi7pIWEDu+THhkAKKbtJ2IQEshAgEEIzmBaFboONuy13W5Zly+oa9dE8vz80\nNsZItrCudUfS+3WOjmdGF93vnvOs7HfunWfMOScAAAAAQP9E+T0AAAAAAAwFxBUAAAAAeIC4AgAA\nAAAPEFcAAAAA4AHiCgAAAAA8QFwBAAAAgAc8iSszu9vMqsxsXS/fX2BmDWa2Kvx1kxfnBQAAAIBI\nEePRz/mTpF9Luvcox7zsnLvEo/MBAAAAQETx5MqVc+4lSbVe/CwAAAAAGIwG8j1Xp5rZajN72sym\nDeB5AQAAAOCE8+q2wGNZKWmMcy5gZhdJ+ruk4p4ONLPFkhZLUkJCwtzRo0cP0IgYLEKhkKKi2IsF\n78a6QG9YG+iJX+uiMyQdaAmpIyRlxJsy4m3AZ8DR8TsDPdm8eXO1cy73WMeZc86TE5rZWElPOuem\n9+HYnZLmOeeqj3ZcSUmJKysr82Q+DB2lpaVasGCB32MgwrAu0BvWBnrix7p4em2lvv3wGsVEm35+\n1WwtLMkb0POjb/idgZ6Y2Qrn3LxjHTcgV67MbISk/c45Z2bz1X07Ys1AnBsAAMBPnV0h3fb0Jv3h\nlR2aVZSh//nESSrMSPR7LAAngCdxZWZ/kbRAUo6Z7ZF0s6RYSXLO/VbSFZK+ZGZBSa2SrnZeXTID\nAACIUPsa2vSV+1dqxa46XXvaWH3/oimKi+GWM2Co8iSunHPXHOP7v1b3Vu0AAADDwitbqnX9A2+r\nrbNLv7pmjj48a6TfIwE4wQZqQwsAAIBhIRRy+tWLW3XnC5tVnJei//nEXE3MS/F7LAADgLgCAADw\nSG1zh7724Cq9tPmALptTqFsvm66kOP65BQwX/H87AACAB97eXaev3LdS1YEO/ddlM3TN/CKZsdU6\nMJwQVwAAAP3gnNM9r+3UrUs2akR6gh750mmaMSrd77EA+IC4AgAAOE6B9qC++8gaPbWmUudMydNP\nr5yt9KRYv8cC4BPiCgAA4DiU7WvSl+5boZ3Vzbrhwsla/MHxioriNkBgOCOuAAAA3qe/vlWumx5b\nr5SEGN3/Lx/QB8Zn+z0SgAhAXAEAAPRRc3tQP3hsnf62skKnjs/WL66ZrbzUBL/HAhAhiCsAAIA+\n2LSvUV+5b6W2Vzfra+cU618/VKxobgMEcBjiCgAA4Cicc3rwzXLd/Ph6pSXG6r4vnKLTJuT4PRaA\nCERcAQAA9CLQHtSNj67VY6v26oyJOfr5VbOVmxrv91gAIhRxBQAA0IP1exv0r/e/rZ01zfrmuZP0\n5YUTuQ0QwFERVwAAAIdxzun/lu3WD5/coMykWHYDBNBnxBUAAEBYY1unvve3tXpqTaXOnJSrn39s\nlrJTuA0QQN8QVwAAAJLW7mnQV/+yUnvqWvWdC0p03ZkT+FBgAO8LcQUAAIY155zufX2Xbn1qo7JT\n4vTA4g/o5LFZfo8FYBAirgAAwLBV39Kh7z6yRkvX79eHJufpJ1fOUlZynN9jARikiCsAADAsLdte\no689uErVgXZ9/6LJ+sIZ47kNEEC/EFcAAGBYCXaF9OiWDj2x9A2NzkrSI186TTNHZfg9FoAhgLgC\nAADDRkV9q77+wCot39mpy+cU6paPTFdKPP8cAuANfpsAAIBh4Zl1lfruI2sV7ArpX2bE6carZvs9\nEoAhhrgCAABDWltnl3745Abdt2y3Zo5K1y+vnqOd6970eywAQxBxBQAAhqyyfU3617+s1Ob9AS0+\nc7y+dV6J4mKitNPvwQAMScQVAAAYcpxz+r9lu/X/ntyg1IQY3fO5+TprUq7fYwEY4ogrAAAwpBz+\n2VVnTsrVT6+cpdzUeL/HAjAMEFcAAGDIWL6jVtc/8LaqA+268aIp+vwZ4/jsKgADhrgCAACDXmdX\nSHc+v1m/Kd3GZ1cB8A1xBQAABrXtBwL6+oOrtHpPg66cO0o3XzqNz64C4At+8wAAgEHJOacH3izX\nLU9sUFxMlH7ziZN04YwCv8cCMIwRVwAAYNCpCbTrhr+t1XMb9uv0idn66ZWzNSI9we+xAAxzxBUA\nABhUSsuq9O2H16ihpVP/fvEUfe50Nq0AEBmIKwAAMCi0dXbptqc36U+v7VRJfqru/dx8TSlI83ss\nADiEuAIAABFvw95Gfe3Bt7V5f0CfPX2svnvBZCXERvs9FgC8C3EFAAAiVijk9IdXduiOpWVKT4rV\nPZ+br7Mm5fo9FgD0iLgCAAARqbKhVd/662q9urVG503N120fnams5Di/xwKAXhFXAAAg4jy5Zq9u\nfHSdOoIh3Xb5DF11cpHM2LQCQGQjrgAAQMSob+nQDx5brydW79WsogzdedVsjctJ9nssAOgT4goA\nAESE0rIqfefhNapt7tA3z52kLy2YoJjoKL/HAoA+I64AAICvmtuDunXJRt2/bLcm5afo7mtP1vTC\ndL/HAoD3jbgCAAC+eXNnrb750GqV17Xoi2eO19fPncQW6wAGLeIKAAAMuLbOLv38uc266+XtGpWZ\nqAcXn6r547L8HgsA+oW4AgAAA2r93gZ948HVKtvfpGvmj9aNF09RSjz/JAEw+PGbDAAADIhgV0i/\n/ec23fn8FmUlx+mP156shZPz/B4LADxDXAEAgBNu+4GAvvHQaq0qr9clMwv0w0XTlckHAgMYYogr\nAABwwoRCTve+vlO3PbNJ8THR+uU1c3TprJF+jwUAJwRxBQAATohdNc369sNrtHxHrc6alKvbr5ip\n/LQEv8cCgBOGuAIAAJ4KhZzueX2nbn+mTDFRpts/OlNXzhslM/N7NAA4oYgrAADgmZ3VzfrOw2u0\nfGf31arbPjpDBemJfo8FAAOCuAIAAP0WCjn98bWdumPpJsVGR+mOK2bqirlcrQIwvBBXAACgX3ZU\nN+s7D6/WmzvrtLAkVz+6fKZGpPPeKgDDD3EFAACOS1fI6Y+v7tAdS8sUFxOln1w5Sx89qZCrVQCG\nLeIKAAC8b9sPBPTth9doxa46fWhynv7rshlcrQIw7BFXAACgz7pCTne/skM/ebZM8TFR+tnHZumy\nOVytAgCJuAIAAH20sbJRNzyyRqv3NOjsyXn6r8tn8LlVAHAY4goAABxVe7BLv35xq35Tuk1pibH6\nxdWzdemskVytAoAjEFcAAKBXb+2s1XcfWaNtB5p1+ZxC/fslU5WVHOf3WAAQkYgrAADwHoH2oG5/\nZpP+/MYujUxP1J8+e7IWlOT5PRYARDTiCgAAvMs/NlXpxkfXqrKxTZ85day+dX6JUuL5JwMAHIsn\nvynN7G5Jl0iqcs5N7+H7JukXki6S1CLpWufcSi/ODQAAvFETaNctT27QY6v2amJeih6+7jTNHZPp\n91gAMGh49T9D/UnSryXd28v3L5RUHP46RdJvwn8CAACfOef02Kq9uuXJDWpq69T1ZxfrywsnKD4m\n2u/RAGBQ8SSunHMvmdnYoxyySNK9zjkn6Q0zyzCzAudcpRfnBwAAx6e8tkU3PbZO/yg7oNlFGfrx\nR2eqZESq32MBwKA0UDdQF0oqP+z5nvBrxBUAAD7o7Arp7ld26M7nt8hM+sElU3XtaWMVHcX26gBw\nvCLu3almtljSYknKzc1VaWmpvwMh4gQCAdYF3oN1gd6wNt5ra32X7lnfofKmkObkReuTU+KUHdyl\nl1/a5fdoA4Z1gd6wNtAfAxVXFZKKDns+Kvzaezjn7pJ0lySVlJS4BQsWnPDhMLiUlpaKdYEjsS7Q\nG9bGOxpaO3XH0k26b9l
"text/plain": [
"<matplotlib.figure.Figure at 0x7fdde8452e10>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"plot(lambda x: math.log(1 + math.exp(x)))"
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "slide"
}
},
"source": [
"### Problem zanikającego gradientu (*vanishing gradient problem*)"
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "fragment"
}
},
"source": [
"* Sigmoidalne funkcje aktywacji ograniczają wartości na wyjściach neuronów do niewielkich przedziałów ($(-1, 1)$, $(0, 1)$ itp.).\n",
"* Jeżeli sieć ma wiele warstw, to podczas propagacji wstecznej mnożymy przez siebie wiele małych wartości → obliczony gradient jest mały.\n",
"* Im więcej warstw, tym silniejszy efekt zanikania."
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"source": [
"#### Sposoby na zanikający gradient\n",
"\n",
"* Modyfikacja algorytmu optymalizacji (*RProp*, *RMSProp*)\n",
"* Użycie innej funckji aktywacji (ReLU, softplus)\n",
"* Dodanie warstw *dropout*\n",
"* Nowe architektury (LSTM itp.)\n",
"* Więcej danych, zwiększenie mocy obliczeniowej"
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "slide"
}
},
"source": [
"## 4.3. Wielowarstwowe sieci neuronowe\n",
"\n",
"czyli _Artificial Neural Networks_ (ANN) lub _Multi-Layer Perceptrons_ (MLP)"
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"source": [
"<img src=\"nn1.png\" width=\"70%\"/>"
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"source": [
"### Architektura sieci\n",
"\n",
"* Sieć neuronowa jako graf neuronów. \n",
"* Organizacja sieci przez warstwy.\n",
"* Najczęściej stosowane są sieci jednokierunkowe i gęste."
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"source": [
"* $n$-warstwowa sieć neuronowa ma $n+1$ warstw (nie liczymy wejścia).\n",
"* Rozmiary sieci określane poprzez liczbę neuronów lub parametrów."
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"source": [
"### Sieć neuronowa jednokierunkowa (*feedforward*)\n",
"\n",
"* Mając daną $n$-warstwową sieć neuronową oraz jej parametry $\\Theta^{(1)}, \\ldots, \\Theta^{(L)} $ oraz $\\beta^{(1)}, \\ldots, \\beta^{(L)} $ liczymy:<br/><br/> \n",
"$$a^{(l)} = g^{(l)}\\left( a^{(l-1)} \\Theta^{(l)} + \\beta^{(l)} \\right). $$"
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"source": [
"<img src=\"nn2.png\" width=70%/>"
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"source": [
"* Funkcje $g^{(l)}$ to tzw. **funkcje aktywacji**.<br/>\n",
"Dla $i = 0$ przyjmujemy $a^{(0)} = \\mathrm{x}$ (wektor wierszowy cech) oraz $g^{(0)}(x) = x$ (identyczność)."
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "fragment"
}
},
"source": [
"* Parametry $\\Theta$ to wagi na połączeniach miedzy neuronami dwóch warstw.<br/>\n",
"Rozmiar macierzy $\\Theta^{(l)}$, czyli macierzy wag na połączeniach warstw $a^{(l-1)}$ i $a^{(l)}$, to $\\dim(a^{(l-1)}) \\times \\dim(a^{(l)})$."
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"source": [
"* Parametry $\\beta$ zastępują tutaj dodawanie kolumny z jedynkami do macierzy cech.<br/>Macierz $\\beta^{(l)}$ ma rozmiar równy liczbie neuronów w odpowiedniej warstwie, czyli $1 \\times \\dim(a^{(l)})$."
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "fragment"
}
},
"source": [
"* **Klasyfikacja**: dla ostatniej warstwy $L$ (o rozmiarze równym liczbie klas) przyjmuje się $g^{(L)}(x) = \\mathop{\\mathrm{softmax}}(x)$.\n",
"* **Regresja**: pojedynczy neuron wyjściowy jak na obrazku. Funkcją aktywacji może wtedy być np. funkcja identycznościowa."
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"source": [
"* Pozostałe funkcje aktywacji najcześciej mają postać sigmoidy, np. sigmoidalna, tangens hiperboliczny.\n",
"* Mogą mieć też inny kształt, np. ReLU, leaky ReLU, maxout."
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"source": [
2021-05-05 15:23:24 +02:00
"### Jak uczyć sieci neuronowe?"
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "fragment"
}
},
"source": [
"* W poznanych do tej pory algorytmach (regresja liniowa, regresja logistyczna) do uczenia używaliśmy funkcji kosztu, jej gradientu oraz algorytmu gradientu prostego (GD/SGD)"
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "fragment"
}
},
"source": [
"* Dla sieci neuronowych potrzebowalibyśmy również znaleźć gradient funkcji kosztu."
2021-05-05 14:43:56 +02:00
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"source": [
2021-05-05 15:23:24 +02:00
"* Sprowadza się to do bardziej ogólnego problemu:<br/>jak obliczyć gradient $\\nabla f(x)$ dla danej funkcji $f$ i wektora wejściowego $x$?"
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "slide"
}
},
"source": [
"## 4.4. Metoda propagacji wstecznej wprowadzenie"
2021-05-05 14:43:56 +02:00
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"source": [
2021-05-05 15:23:24 +02:00
"### Pochodna funkcji\n",
2021-05-05 14:43:56 +02:00
"\n",
2021-05-05 15:23:24 +02:00
"* **Pochodna** mierzy, jak szybko zmienia się wartość funkcji względem zmiany jej argumentów:\n",
2021-05-05 14:43:56 +02:00
"\n",
2021-05-05 15:23:24 +02:00
"$$ \\frac{d f(x)}{d x} = \\lim_{h \\to 0} \\frac{ f(x + h) - f(x) }{ h } $$"
2021-05-05 14:43:56 +02:00
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"source": [
2021-05-05 15:23:24 +02:00
"### Pochodna cząstkowa i gradient\n",
2021-05-05 14:43:56 +02:00
"\n",
2021-05-05 15:23:24 +02:00
"* **Pochodna cząstkowa** mierzy, jak szybko zmienia się wartość funkcji względem zmiany jej *pojedynczego argumentu*."
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "fragment"
}
},
"source": [
"* **Gradient** to wektor pochodnych cząstkowych:\n",
2021-05-05 14:43:56 +02:00
"\n",
2021-05-05 15:23:24 +02:00
"$$ \\nabla f = \\left( \\frac{\\partial f}{\\partial x_1}, \\ldots, \\frac{\\partial f}{\\partial x_n} \\right) $$"
2021-05-05 14:43:56 +02:00
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"source": [
2021-05-05 15:23:24 +02:00
"#### Gradient przykłady"
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "fragment"
}
},
"source": [
"$$ f(x_1, x_2) = x_1 + x_2 \\qquad \\to \\qquad \\frac{\\partial f}{\\partial x_1} = 1, \\quad \\frac{\\partial f}{\\partial x_2} = 1, \\quad \\nabla f = (1, 1) $$ "
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "fragment"
}
},
"source": [
"$$ f(x_1, x_2) = x_1 \\cdot x_2 \\qquad \\to \\qquad \\frac{\\partial f}{\\partial x_1} = x_2, \\quad \\frac{\\partial f}{\\partial x_2} = x_1, \\quad \\nabla f = (x_2, x_1) $$ "
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"source": [
"$$ f(x_1, x_2) = \\max(x_1 + x_2) \\hskip{12em} \\\\\n",
"\\to \\qquad \\frac{\\partial f}{\\partial x_1} = \\mathbb{1}_{x \\geq y}, \\quad \\frac{\\partial f}{\\partial x_2} = \\mathbb{1}_{y \\geq x}, \\quad \\nabla f = (\\mathbb{1}_{x \\geq y}, \\mathbb{1}_{y \\geq x}) $$ "
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"source": [
"### Własności pochodnych cząstkowych\n",
2021-05-05 14:43:56 +02:00
"\n",
2021-05-05 15:23:24 +02:00
"Jezeli $f(x, y, z) = (x + y) \\, z$ oraz $x + y = q$, to:\n",
"$$f = q z,\n",
"\\quad \\frac{\\partial f}{\\partial q} = z,\n",
"\\quad \\frac{\\partial f}{\\partial z} = q,\n",
"\\quad \\frac{\\partial q}{\\partial x} = 1,\n",
"\\quad \\frac{\\partial q}{\\partial y} = 1 $$"
2021-05-05 14:43:56 +02:00
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
2021-05-05 15:23:24 +02:00
"slide_type": "subslide"
2021-05-05 14:43:56 +02:00
}
},
"source": [
2021-05-05 15:23:24 +02:00
"### Reguła łańcuchowa\n",
"\n",
"$$ \\frac{\\partial f}{\\partial x} = \\frac{\\partial f}{\\partial q} \\, \\frac{\\partial q}{\\partial x},\n",
"\\quad \\frac{\\partial f}{\\partial y} = \\frac{\\partial f}{\\partial q} \\, \\frac{\\partial q}{\\partial y} $$"
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"source": [
"### Propagacja wsteczna prosty przykład"
2021-05-05 14:43:56 +02:00
]
},
{
"cell_type": "code",
2021-05-05 15:23:24 +02:00
"execution_count": 2,
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
2021-05-05 14:43:56 +02:00
"outputs": [],
2021-05-05 15:23:24 +02:00
"source": [
"# Dla ustalonego wejścia\n",
"x = -2; y = 5; z = -4"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {
"slideshow": {
"slide_type": "fragment"
}
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"(3, -12)\n"
]
}
],
"source": [
"# Krok w przód\n",
"q = x + y\n",
"f = q * z\n",
"print(q, f)"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {
"slideshow": {
"slide_type": "fragment"
}
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[-4, -4, 3]\n"
]
}
],
"source": [
"# Propagacja wsteczna dla f = q * z\n",
"dz = q\n",
"dq = z\n",
"# Propagacja wsteczna dla q = x + y\n",
"dx = 1 * dq # z reguły łańcuchowej\n",
"dy = 1 * dq # z reguły łańcuchowej\n",
"print([dx, dy, dz])"
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"source": [
"<img src=\"exp1.png\" />"
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"source": [
"* Właśnie tak wygląda obliczanie pochodnych metodą propagacji wstecznej!"
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "fragment"
}
},
"source": [
"* Spróbujmy czegoś bardziej skomplikowanego:<br/>metodą propagacji wstecznej obliczmy pochodną funkcji sigmoidalnej."
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"source": [
"### Propagacja wsteczna funkcja sigmoidalna"
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "fragment"
}
},
"source": [
"Funkcja sigmoidalna:\n",
"\n",
"$$f(\\theta,x) = \\frac{1}{1+e^{-(\\theta_0 x_0 + \\theta_1 x_1 + \\theta_2)}}$$"
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"source": [
"$$\n",
"\\begin{array}{lcl}\n",
"f(x) = \\frac{1}{x} \\quad & \\rightarrow & \\quad \\frac{df}{dx} = -\\frac{1}{x^2} \\\\\n",
"f_c(x) = c + x \\quad & \\rightarrow & \\quad \\frac{df}{dx} = 1 \\\\\n",
"f(x) = e^x \\quad & \\rightarrow & \\quad \\frac{df}{dx} = e^x \\\\\n",
"f_a(x) = ax \\quad & \\rightarrow & \\quad \\frac{df}{dx} = a \\\\\n",
"\\end{array}\n",
"$$"
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"source": [
"<img src=\"exp2.png\" />"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[0.3932238664829637, -0.5898357997244456]\n",
"[-0.19661193324148185, -0.3932238664829637, 0.19661193324148185]\n"
]
}
],
"source": [
"# Losowe wagi i dane\n",
"w = [2,-3,-3]\n",
"x = [-1, -2]\n",
"\n",
"# Krok w przód\n",
"dot = w[0]*x[0] + w[1]*x[1] + w[2]\n",
"f = 1.0 / (1 + math.exp(-dot)) # funkcja sigmoidalna\n",
"\n",
"# Krok w tył\n",
"ddot = (1 - f) * f # pochodna funkcji sigmoidalnej\n",
"dx = [w[0] * ddot, w[1] * ddot]\n",
"dw = [x[0] * ddot, x[1] * ddot, 1.0 * ddot]\n",
"\n",
"print(dx)\n",
"print(dw)"
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"source": [
"### Obliczanie gradientów podsumowanie"
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"source": [
"* Gradient $f$ dla $x$ mówi, jak zmieni się całe wyrażenie przy zmianie wartości $x$."
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"source": [
"* Gradienty łączymy, korzystając z **reguły łańcuchowej**."
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"source": [
"* W kroku \"wstecz\" gradienty informują, które części grafu powinny być zwiększone lub zmniejszone (i z jaką siłą), aby zwiększyć wartość na wyjściu."
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"source": [
"* W kontekście implementacji chcemy dzielić funkcję $f$ na części, dla których można łatwo obliczyć gradienty."
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "slide"
}
},
"source": [
"## 4.5. Uczenie wielowarstwowych sieci neuronowych metodą propagacji wstecznej"
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"source": [
"Mając algorytm SGD oraz gradienty wszystkich wag, moglibyśmy trenować każdą sieć."
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"source": [
"* Niech $\\Theta = (\\Theta^{(1)},\\Theta^{(2)},\\Theta^{(3)},\\beta^{(1)},\\beta^{(2)},\\beta^{(3)})$\n",
"* Funkcja sieci neuronowej z grafiki:\n",
"$$\\small h_\\Theta(x) = \\tanh(\\tanh(\\tanh(x\\Theta^{(1)}+\\beta^{(1)})\\Theta^{(2)} + \\beta^{(2)})\\Theta^{(3)} + \\beta^{(3)})$$\n",
"* Funkcja kosztu dla regresji:\n",
"$$J(\\Theta) = \\dfrac{1}{2m} \\sum_{i=1}^{m} (h_\\Theta(x^{(i)})- y^{(i)})^2 $$"
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"source": [
"* Jak obliczymy gradienty?\n",
"\n",
"$$\\nabla_{\\Theta^{(l)}} J(\\Theta) = ? \\quad \\nabla_{\\beta^{(l)}} J(\\Theta) = ?$$"
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"source": [
"### W kierunku propagacji wstecznej\n",
"\n",
"* Pewna (niewielka) zmiana wagi $\\Delta z^l_j$ dla $j$-ego neuronu w warstwie $l$ pociąga za sobą (niewielką) zmianę kosztu: \n",
"\n",
"$$\\frac{\\partial J(\\Theta)}{\\partial z^{l}_j} \\Delta z^{l}_j$$"
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"source": [
"* Jeżeli $\\frac{\\partial J(\\Theta)}{\\partial z^{l}_j}$ jest duża, $\\Delta z^l_j$ ze znakiem przeciwnym zredukuje koszt.\n",
"* Jeżeli $\\frac{\\partial J(\\Theta)}{\\partial z^l_j}$ jest bliska zeru, koszt nie będzie mocno poprawiony."
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"source": [
"* Definiujemy błąd $\\delta^l_j$ neuronu $j$ w warstwie $l$: \n",
"\n",
"$$\\delta^l_j := \\dfrac{\\partial J(\\Theta)}{\\partial z^l_j}$$ \n",
"$$\\delta^l := \\nabla_{z^l} J(\\Theta) \\quad \\textrm{ (zapis wektorowy)} $$"
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"source": [
"### Podstawowe równania propagacji wstecznej\n",
"\n",
"$$\n",
"\\begin{array}{rcll}\n",
"\\delta^L & = & \\nabla_{a^L}J(\\Theta) \\odot { \\left( g^{L} \\right) }^{\\prime} \\left( z^L \\right) & (BP1) \\\\[2mm]\n",
"\\delta^{l} & = & \\left( \\left( \\Theta^{l+1} \\right) \\! ^\\top \\, \\delta^{l+1} \\right) \\odot {{ \\left( g^{l} \\right) }^{\\prime}} \\left( z^{l} \\right) & (BP2)\\\\[2mm]\n",
"\\nabla_{\\beta^l} J(\\Theta) & = & \\delta^l & (BP3)\\\\[2mm]\n",
"\\nabla_{\\Theta^l} J(\\Theta) & = & a^{l-1} \\odot \\delta^l & (BP4)\\\\\n",
"\\end{array}\n",
"$$\n"
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"source": [
"#### (BP1)\n",
"$$ \\delta^L_j \\; = \\; \\frac{ \\partial J }{ \\partial a^L_j } \\, g' \\!\\! \\left( z^L_j \\right) $$\n",
"$$ \\delta^L \\; = \\; \\nabla_{a^L}J(\\Theta) \\odot { \\left( g^{L} \\right) }^{\\prime} \\left( z^L \\right) $$\n",
"Błąd w ostatniej warstwie jest iloczynem szybkości zmiany kosztu względem $j$-tego wyjścia i szybkości zmiany funkcji aktywacji w punkcie $z^L_j$."
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"source": [
"#### (BP2)\n",
"$$ \\delta^{l} \\; = \\; \\left( \\left( \\Theta^{l+1} \\right) \\! ^\\top \\, \\delta^{l+1} \\right) \\odot {{ \\left( g^{l} \\right) }^{\\prime}} \\left( z^{l} \\right) $$\n",
"Aby obliczyć błąd w $l$-tej warstwie, należy przemnożyć błąd z następnej ($(l+1)$-szej) warstwy przez transponowany wektor wag, a uzyskaną macierz pomnożyć po współrzędnych przez szybkość zmiany funkcji aktywacji w punkcie $z^l$."
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"source": [
"#### (BP3)\n",
"$$ \\nabla_{\\beta^l} J(\\Theta) \\; = \\; \\delta^l $$\n",
"Błąd w $l$-tej warstwie jest równy wartości gradientu funkcji kosztu."
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"source": [
"#### (BP4)\n",
"$$ \\nabla_{\\Theta^l} J(\\Theta) \\; = \\; a^{l-1} \\odot \\delta^l $$\n",
"Gradient funkcji kosztu względem wag $l$-tej warstwy można obliczyć jako iloczyn po współrzędnych $a^{l-1}$ przez $\\delta^l$."
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"source": [
"### Algorytm propagacji wstecznej"
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"source": [
"Dla pojedynczego przykładu $(x,y)$:\n",
"1. **Wejście**: Ustaw aktywacje w warstwie cech $a^{(0)}=x$ \n",
"2. **Feedforward:** dla $l=1,\\dots,L$ oblicz \n",
"$z^{(l)} = a^{(l-1)} \\Theta^{(l)} + \\beta^{(l)}$ oraz $a^{(l)}=g^{(l)} \\!\\! \\left( z^{(l)} \\right)$\n",
"3. **Błąd wyjścia $\\delta^{(L)}$:** oblicz wektor $$\\delta^{(L)}= \\nabla_{a^{(L)}}J(\\Theta) \\odot {g^{\\prime}}^{(L)} \\!\\! \\left( z^{(L)} \\right) $$"
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"source": [
"4. **Propagacja wsteczna błędu:** dla $l = L-1,L-2,\\dots,1$ oblicz $$\\delta^{(l)} = \\delta^{(l+1)}(\\Theta^{(l+1)})^T \\odot {g^{\\prime}}^{(l)} \\!\\! \\left( z^{(l)} \\right) $$\n",
"5. **Gradienty:** \n",
" * $\\dfrac{\\partial}{\\partial \\Theta_{ij}^{(l)}} J(\\Theta) = a_i^{(l-1)}\\delta_j^{(l)} \\textrm{ oraz } \\dfrac{\\partial}{\\partial \\beta_{j}^{(l)}} J(\\Theta) = \\delta_j^{(l)}$"
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"source": [
"W naszym przykładzie:\n",
"\n",
"$$\\small J(\\Theta) = \\frac{1}{2} \\left( a^{(L)} - y \\right) ^2 $$\n",
"$$\\small \\dfrac{\\partial}{\\partial a^{(L)}} J(\\Theta) = a^{(L)} - y$$\n",
"\n",
"$$\\small \\tanh^{\\prime}(x) = 1 - \\tanh^2(x)$$"
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"source": [
"<img src=\"nn3.png\" width=\"65%\"/>"
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"source": [
"### Algorytm SGD z propagacją wsteczną\n",
"\n",
"Pojedyncza iteracja:\n",
"1. Dla parametrów $\\Theta = (\\Theta^{(1)},\\ldots,\\Theta^{(L)})$ utwórz pomocnicze macierze zerowe $\\Delta = (\\Delta^{(1)},\\ldots,\\Delta^{(L)})$ o takich samych wymiarach (dla uproszczenia opuszczono wagi $\\beta$)."
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"source": [
"2. Dla $m$ przykładów we wsadzie (*batch*), $i = 1,\\ldots,m$:\n",
" * Wykonaj algortym propagacji wstecznej dla przykładu $(x^{(i)}, y^{(i)})$ i przechowaj gradienty $\\nabla_{\\Theta}J^{(i)}(\\Theta)$ dla tego przykładu;\n",
" * $\\Delta := \\Delta + \\dfrac{1}{m}\\nabla_{\\Theta}J^{(i)}(\\Theta)$\n",
"3. Wykonaj aktualizację wag: $\\Theta := \\Theta - \\alpha \\Delta$"
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"source": [
"### Propagacja wsteczna podsumowanie\n",
"\n",
"* Algorytm pierwszy raz wprowadzony w latach 70. XX w.\n",
"* W 1986 David Rumelhart, Geoffrey Hinton i Ronald Williams pokazali, że jest znacznie szybszy od wcześniejszych metod.\n",
"* Obecnie najpopularniejszy algorytm uczenia sieci neuronowych."
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "slide"
}
},
"source": [
2021-05-19 18:55:21 +02:00
"## 4.6. Przykłady implementacji wielowarstwowych sieci neuronowych"
2021-05-05 15:23:24 +02:00
]
},
{
2021-05-19 18:55:21 +02:00
"cell_type": "markdown",
2021-05-05 15:23:24 +02:00
"metadata": {
"slideshow": {
2021-05-19 18:55:21 +02:00
"slide_type": "notes"
2021-05-05 15:23:24 +02:00
}
},
"source": [
2021-05-19 18:55:21 +02:00
"### Uwaga!\n",
2021-05-05 15:23:24 +02:00
"\n",
2021-05-19 18:55:21 +02:00
"Poniższe przykłady wykorzystują interfejs [Keras](https://keras.io), który jest częścią biblioteki [TensorFlow](https://www.tensorflow.org).\n",
2021-05-05 15:23:24 +02:00
"\n",
2021-05-19 18:55:21 +02:00
"Aby uruchomić TensorFlow w środowisku Jupyter, należy wykonać następujące czynności:\n",
2021-05-05 15:23:24 +02:00
"\n",
2021-05-19 18:55:21 +02:00
"#### Przed pierwszym uruchomieniem (wystarczy wykonać tylko raz)\n",
2021-05-05 15:23:24 +02:00
"\n",
2021-05-19 18:55:21 +02:00
"Instalacja biblioteki TensorFlow w środowisku Anaconda:\n",
"\n",
"1. Uruchom *Anaconda Navigator*\n",
"1. Wybierz kafelek *CMD.exe Prompt*\n",
"1. Kliknij przycisk *Launch*\n",
"1. Pojawi się konsola. Wpisz następujące polecenia, każde zatwierdzając wciśnięciem klawisza Enter:\n",
"```\n",
"conda create -n tf tensorflow\n",
"conda activate tf\n",
"conda install pandas matplotlib\n",
"jupyter notebook\n",
"```\n",
"\n",
"#### Przed każdym uruchomieniem\n",
"\n",
"Jeżeli chcemy korzystać z biblioteki TensorFlow, to środowisko Jupyter Notebook należy uruchomić w następujący sposób:\n",
"\n",
"1. Uruchom *Anaconda Navigator*\n",
"1. Wybierz kafelek *CMD.exe Prompt*\n",
"1. Kliknij przycisk *Launch*\n",
"1. Pojawi się konsola. Wpisz następujące polecenia, każde zatwierdzając wciśnięciem klawisza Enter:\n",
"```\n",
"conda activate tf\n",
"jupyter notebook\n",
"```"
2021-05-05 15:23:24 +02:00
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"source": [
"### Przykład: MNIST\n",
"\n",
"_Modified National Institute of Standards and Technology database_"
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "fragment"
}
},
"source": [
"* Zbiór cyfr zapisanych pismem odręcznym\n",
"* 60 000 przykładów uczących, 10 000 przykładów testowych\n",
"* Rozdzielczość każdego przykładu: 28 × 28 = 784 piksele"
]
},
{
"cell_type": "code",
2021-05-19 18:55:21 +02:00
"execution_count": 52,
2021-05-05 15:23:24 +02:00
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"outputs": [],
"source": [
"# źródło: https://github.com/keras-team/keras/examples/minst_mlp.py\n",
"\n",
2021-05-19 18:55:21 +02:00
"from tensorflow import keras\n",
"from tensorflow.keras.datasets import mnist\n",
"from tensorflow.keras.layers import Dense, Dropout\n",
2021-05-05 15:23:24 +02:00
"\n",
"# załaduj dane i podziel je na zbiory uczący i testowy\n",
"(x_train, y_train), (x_test, y_test) = mnist.load_data()"
]
},
{
"cell_type": "code",
2021-05-19 18:55:21 +02:00
"execution_count": 53,
2021-05-05 15:23:24 +02:00
"metadata": {
"slideshow": {
"slide_type": "notes"
}
},
"outputs": [],
"source": [
2021-05-19 18:55:21 +02:00
"from matplotlib import pyplot as plt\n",
"\n",
2021-05-05 15:23:24 +02:00
"def draw_examples(examples, captions=None):\n",
" plt.figure(figsize=(16, 4))\n",
" m = len(examples)\n",
" for i, example in enumerate(examples):\n",
" plt.subplot(100 + m * 10 + i + 1)\n",
" plt.imshow(example, cmap=plt.get_cmap('gray'))\n",
" plt.show()\n",
" if captions is not None:\n",
" print(6 * ' ' + (10 * ' ').join(str(captions[i]) for i in range(m)))"
]
},
{
"cell_type": "code",
2021-05-19 18:55:21 +02:00
"execution_count": 54,
2021-05-05 15:23:24 +02:00
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"outputs": [
{
"data": {
2021-05-19 18:55:21 +02:00
"image/png": "iVBORw0KGgoAAAANSUhEUgAAA54AAACOCAYAAABZsdfhAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjMuNCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8QVMy6AAAACXBIWXMAAAsTAAALEwEAmpwYAAAcc0lEQVR4nO3deZBU5bnH8ecVAVFERJAQEYYIQYhsAgpeC0gAV2SRgBL2GKHEBVJCgUoMxiCIStUAEkUujCAlWGHVSJCwSFRCgQTvBVkGjMDgBAYF2Qxc9Nw/6O15menpnjnn9Onu76eKmvOb093nnebh9Lycfvo1juMIAAAAAABeuSTVAwAAAAAAZDYmngAAAAAATzHxBAAAAAB4ioknAAAAAMBTTDwBAAAAAJ5i4gkAAAAA8FS5Jp7GmLuMMbuNMXuNMePcGhTSD7WAMGoBItQBoqgFiFAHiKIWspcp6zqexpgKIrJHRLqKSIGIbBaRfo7jfO7e8JAOqAWEUQsQoQ4QRS1AhDpAFLWQ3S4tx31vEZG9juN8ISJijFkoIj1EpMTCMcaUbZaLVDrqOE6tUm5DLWQBx3FMAjdLqhaog7TEOQFh1AJEJKHXB+ogO3BOQFixtVCet9peJyIHY3JB6HvILPsTuA21gDBqIfNxTkAYtYBEUQfZgXMCwoqthfJc8Szuf7cu+h8JY8wwERlWjuMg+KgFhJVaC9RBVuCcgDBqASLUAaKohSxWnolngYhcH5PrishX9o0cx5klIrNEuFSewagFhJVaC9RBVuCcgDBqASLUAaKohSxWnrfabhaRRsaYBsaYSiLyoIiscGdYSDPUAsKoBYhQB4iiFiBCHSCKWshiZb7i6TjOeWPMYyKySkQqiMgcx3F2uDYypA1qAWHUAkSoA0RRCxChDhBFLWS3Mi+nUqaDcak8HX3qOE4btx+UWkg/CX6qbVKog7TEOQFh1AJEhNcHRHBOQFixtVCet9oCAAAAAFAqJp4AAAAAAE8x8QQAAAAAeIqJJwAAAADAU0w8AQAAAACeYuIJAAAAAPAUE08AAAAAgKeYeAIAAAAAPMXEEwAAAADgqUtTPQAgk7Vu3Vrlxx57TOVBgwapPG/ePJWnT5+u8tatW10cHQAAALySm5ur8hNPPBHZ3r59u9rXrVs3lffv3+/dwFKEK54AAAAAAE8x8QQAAAAAeIq32iaoQoUKKl911VUJ39d+e+Xll1+ucuPGjVV+9NFHVX755ZdV7tevn8r/+c9/VJ48eXJk+7nnnkt4nCi/li1bqrx69WqVq1WrprLjOCoPHDhQ5e7du6t8zTXXlHOEyASdO3dWecGCBSp37NhR5d27d3s+Jnhj/PjxKtvn9Esu0f9/3KlTJ5U//PBDT8YFwB1XXnmlylWrVlX53nvvVblWrVoqT506VeWzZ8+6ODokKycnR+UBAwao/MMPP0S2mzRpovbdeOONKvNWWwAAAAAAksTEEwAAAADgKSaeAAAAAABPZU2PZ7169VSuVKmSyrfddpvKt99+u8rVq1dXuXfv3q6NraCgQOVp06ap3KtXL5VPnjyp8meffaYyPT3+uuWWWyLbixcvVvvsXmC7p9P+uzx37pzKdk9nu3btVLaXV7Hvn+k6dOgQ2bafq6VLl/o9HN+0bdtW5c2bN6doJHDbkCFDVB47dqzKsf1BxbHPMQBSL7bvz/433b59e5VvuummpB67Tp06Kscu1wH/FRUVqbxhwwaV7c/uyDZc8QQAAAAAeIqJJwAAAADAU0w8AQAAAACeytgeT3s9xbVr16qczDqcbrN7dOx12k6dOqWyvUZfYWGhyseOHVOZNfvcZa+7evPNN6v81ltvRbbtXovS5OfnqzxlyhSVFy5cqPLHH3+ssl07kyZNSur46S52zcJGjRqpfZnU42mv1digQQOV69evr7IxxvMxwRv23+Vll12WopGgLG699VaVY9fws9fX/dnPfhb3sUaPHq3yV199pbL9WRSxr0UiIps2bYo/WLjGXn9x1KhRKvfv3z+yXaVKFbXPPl8fPHhQZfuzIOy1H/v27avyzJkzVd61a1cJo4YXTp8+rXImrsVZHlzxBAAAAAB4ioknAAAAAMBTTDwBAAAAAJ7K2B7PAwcOqPz111+r7GaPp91Hcfz4cZV//vOfq2yvtTh//nzXxgL3vf766yr369fPtce2+0WrVq2qsr0ma2xPo4hI8+bNXRtLOho0aFBke+PGjSkcibfs3uGHH35YZbu3i56e9NGlSxeVH3/88bi3t/9uu3XrpvLhw4fdGRgS8sADD6icm5urcs2aNSPbdi/f+vXrVa5Vq5bKL730Utxj249n3//BBx+Me38kzv6d8cUXX1TZroMrr7wy4ce2P+vhzjvvVLlixYoq2+eA2BorLsNf1atXV7lFixapGUhAccUTAAAAAOApJp4AAAAAAE8x8QQAAAAAeCpjezy/+eYblceMGaOy3Rfzz3/+U+Vp06bFffxt27ZFtrt27ar22Wv42Gt1jRw5Mu5jI7Vat26t8r333qtyvDUS7Z7Md999V+WXX35ZZXtdNrsO7TVaf/GLXyQ8lmxgr2+ZqWbPnh13v90jhOCy116cO3euyqV9/oDd98cacd669FL9a1KbNm1UfuONN1S2133esGFDZPv5559X+z766COVK1eurPI777yj8h133BF3rFu2bIm7H2XXq1cvlX/zm9+U+bH27dunsv07pL2OZ8OGDct8LPjPPgfUq1cv4fu2bdtWZbufNxPO99nxWxsAAAAAIGVKnXgaY+YYY44YY7bHfK+GMWa1MSY/9PVqb4eJIKAWEEYtQIQ6QBS1gDBqASLUAYqXyBXPPBG5y/reOBFZ4zhOIxFZE8rIfHlCLeCCPKEWQB0gKk+oBVyQJ9QCqAMUo9QeT8dxNhhjcqxv9xCRTqHtN0VkvYiMdXNgblu2bJnKa9euVfnkyZMq2+vuPPTQQyrH9urZPZ22HTt2qDxs2LC4tw+qTKkFW8uWLVVevXq1ytWqVVPZcRyVV65cGdm21/js2LGjyuPHj1fZ7t0rKipS+bPPPlP5hx9+UNnuP7XXBd26dat4IVW1YK9bWrt2bTcfPrBK6/uza9YvmXpO8NLgwYNV/vGPfxz39vZaj/PmzXN7SK7I1FoYMGCAyqX1W9v/FmPXdzxx4kTc+9prQZbW01lQUKDym2++Gff2fsnEWujTp09St//yyy9V3rx5c2R77Fj9Y9s9nbYmTZokdeygyMQ6SIT92R15eXkqT5gwocT72vuOHz+u8owZM8oxsmAoa49nbcdxCkVEQl+vdW9ISDPUAsKoBYhQB4iiFhBGLUCEOsh6nn+qrTFmmIik5yU+uIpagAh1gChqAWHUAkSoA0RRC5mprFc8Dxtj6oiIhL4eKemGjuPMchynjeM4bUq6DdIatYCwhGqBOsh4nBMQRi0gjNcHiHBOyHplveK5QkQGi8jk0Nflro3IJ6X1Wnz77bdx9z/88MOR7UWLFql9dh9ehku7WvjpT3+qsr3Gq91Pd/ToUZULCwtVju2rOXXqlNr3l7/8JW4urypVqqj85JNPqty/f39Xj1cKz2vhnnvuUdn++TOF3bvaoEGDuLc/dOiQl8NJVtqdE7xUs2ZNlX/961+rbL9e2D09f/zjHz0Zl0/SrhbstTaffvpple0e/5kzZ6ps9/GX9rtGrGeeeSbh24qIPPHEEyrbnxEQMGlXC7Fif+cTufizOj744AOV9+7dq/KRIyXOr0qVYZ9lkNZ1UBb2OSVej2c2SGQ5lbdFZKOINDbGFBhjHpILBdPVGJMvIl1DGRmOWkAYtQAR6gBR1ALCqAWIUAcoXiKfatuvhF2dXR4LAo5aQBi1ABHqAFHUAsKoBYhQByheWXs8AQAAAABIiOefapuu7Pdgt27dWuXY9Rm7dOmi9tnv9UdqVa5cWeXYNVhFLu4btNd0HTRokMpbtmxROUh9hvXq1Uv1EDzVuHHjEvfZ6+WmM7tG7R6fPXv2qGzXLFIrJycnsr148eKk7jt9+nSV161b58aQUIJnn31WZbun89y5cyqvWrVKZXtNxu+++67EY11
2021-05-05 15:23:24 +02:00
"text/plain": [
2021-05-19 18:55:21 +02:00
"<Figure size 1152x288 with 7 Axes>"
2021-05-05 15:23:24 +02:00
]
},
2021-05-19 18:55:21 +02:00
"metadata": {
"needs_background": "light"
},
2021-05-05 15:23:24 +02:00
"output_type": "display_data"
},
{
"name": "stdout",
"output_type": "stream",
"text": [
" 5 0 4 1 9 2 1\n"
]
}
],
"source": [
"draw_examples(x_train[:7], captions=y_train)"
]
},
{
"cell_type": "code",
2021-05-19 18:55:21 +02:00
"execution_count": 55,
2021-05-05 15:23:24 +02:00
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"60000 przykładów uczących\n",
"10000 przykładów testowych\n"
]
}
],
"source": [
"num_classes = 10\n",
"\n",
"x_train = x_train.reshape(60000, 784) # 784 = 28 * 28\n",
"x_test = x_test.reshape(10000, 784)\n",
"x_train = x_train.astype('float32')\n",
"x_test = x_test.astype('float32')\n",
"x_train /= 255\n",
"x_test /= 255\n",
"print('{} przykładów uczących'.format(x_train.shape[0]))\n",
"print('{} przykładów testowych'.format(x_test.shape[0]))\n",
"\n",
"# przekonwertuj wektory klas na binarne macierze klas\n",
"y_train = keras.utils.to_categorical(y_train, num_classes)\n",
"y_test = keras.utils.to_categorical(y_test, num_classes)"
]
},
{
"cell_type": "code",
2021-05-19 18:55:21 +02:00
"execution_count": 56,
2021-05-05 15:23:24 +02:00
"metadata": {
"scrolled": true,
"slideshow": {
"slide_type": "subslide"
}
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
2021-05-19 18:55:21 +02:00
"Model: \"sequential_21\"\n",
2021-05-05 15:23:24 +02:00
"_________________________________________________________________\n",
"Layer (type) Output Shape Param # \n",
"=================================================================\n",
2021-05-19 18:55:21 +02:00
"dense_59 (Dense) (None, 512) 401920 \n",
2021-05-05 15:23:24 +02:00
"_________________________________________________________________\n",
2021-05-19 18:55:21 +02:00
"dropout_2 (Dropout) (None, 512) 0 \n",
2021-05-05 15:23:24 +02:00
"_________________________________________________________________\n",
2021-05-19 18:55:21 +02:00
"dense_60 (Dense) (None, 512) 262656 \n",
2021-05-05 15:23:24 +02:00
"_________________________________________________________________\n",
2021-05-19 18:55:21 +02:00
"dropout_3 (Dropout) (None, 512) 0 \n",
2021-05-05 15:23:24 +02:00
"_________________________________________________________________\n",
2021-05-19 18:55:21 +02:00
"dense_61 (Dense) (None, 10) 5130 \n",
2021-05-05 15:23:24 +02:00
"=================================================================\n",
"Total params: 669,706\n",
"Trainable params: 669,706\n",
"Non-trainable params: 0\n",
"_________________________________________________________________\n"
]
}
],
"source": [
2021-05-19 18:55:21 +02:00
"model = keras.Sequential()\n",
2021-05-05 15:23:24 +02:00
"model.add(Dense(512, activation='relu', input_shape=(784,)))\n",
"model.add(Dropout(0.2))\n",
"model.add(Dense(512, activation='relu'))\n",
"model.add(Dropout(0.2))\n",
"model.add(Dense(num_classes, activation='softmax'))\n",
"model.summary()"
]
},
{
"cell_type": "code",
2021-05-19 18:55:21 +02:00
"execution_count": 57,
2021-05-05 15:23:24 +02:00
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
2021-05-19 18:55:21 +02:00
"(60000, 784) (60000, 10)\n"
2021-05-05 15:23:24 +02:00
]
}
],
"source": [
"print(x_train.shape, y_train.shape)"
]
},
{
"cell_type": "code",
2021-05-19 18:55:21 +02:00
"execution_count": 58,
2021-05-05 15:23:24 +02:00
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Epoch 1/5\n",
2021-05-19 18:55:21 +02:00
"469/469 [==============================] - 11s 23ms/step - loss: 0.2463 - accuracy: 0.9238 - val_loss: 0.1009 - val_accuracy: 0.9690\n",
2021-05-05 15:23:24 +02:00
"Epoch 2/5\n",
2021-05-19 18:55:21 +02:00
"469/469 [==============================] - 10s 22ms/step - loss: 0.1042 - accuracy: 0.9681 - val_loss: 0.0910 - val_accuracy: 0.9739\n",
2021-05-05 15:23:24 +02:00
"Epoch 3/5\n",
2021-05-19 18:55:21 +02:00
"469/469 [==============================] - 11s 23ms/step - loss: 0.0774 - accuracy: 0.9762 - val_loss: 0.0843 - val_accuracy: 0.9755\n",
2021-05-05 15:23:24 +02:00
"Epoch 4/5\n",
2021-05-19 18:55:21 +02:00
"469/469 [==============================] - 11s 24ms/step - loss: 0.0606 - accuracy: 0.9815 - val_loss: 0.0691 - val_accuracy: 0.9818\n",
2021-05-05 15:23:24 +02:00
"Epoch 5/5\n",
2021-05-19 18:55:21 +02:00
"469/469 [==============================] - 10s 22ms/step - loss: 0.0504 - accuracy: 0.9848 - val_loss: 0.0886 - val_accuracy: 0.9772\n"
2021-05-05 15:23:24 +02:00
]
},
{
"data": {
"text/plain": [
2021-05-19 18:55:21 +02:00
"<tensorflow.python.keras.callbacks.History at 0x1ed6e0565b0>"
2021-05-05 15:23:24 +02:00
]
},
2021-05-19 18:55:21 +02:00
"execution_count": 58,
2021-05-05 15:23:24 +02:00
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
2021-05-19 18:55:21 +02:00
"model.compile(loss='categorical_crossentropy', optimizer=keras.optimizers.RMSprop(), metrics=['accuracy'])\n",
2021-05-05 15:23:24 +02:00
"\n",
"model.fit(x_train, y_train, batch_size=128, epochs=5, verbose=1,\n",
" validation_data=(x_test, y_test))"
]
},
{
"cell_type": "code",
2021-05-19 18:55:21 +02:00
"execution_count": 60,
2021-05-05 15:23:24 +02:00
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
2021-05-19 18:55:21 +02:00
"Test loss: 0.08859136700630188\n",
"Test accuracy: 0.9771999716758728\n"
2021-05-05 15:23:24 +02:00
]
}
],
"source": [
"score = model.evaluate(x_test, y_test, verbose=0)\n",
"\n",
"print('Test loss: {}'.format(score[0]))\n",
"print('Test accuracy: {}'.format(score[1]))"
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"source": [
"Warstwa _dropout_ to metoda regularyzacji, służy zapobieganiu nadmiernemu dopasowaniu sieci. Polega na tym, że część węzłów sieci jest usuwana w sposób losowy."
]
},
{
"cell_type": "code",
2021-05-19 18:55:21 +02:00
"execution_count": 61,
2021-05-05 15:23:24 +02:00
"metadata": {
"slideshow": {
"slide_type": "notes"
}
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
2021-05-19 18:55:21 +02:00
"Model: \"sequential_22\"\n",
2021-05-05 15:23:24 +02:00
"_________________________________________________________________\n",
"Layer (type) Output Shape Param # \n",
"=================================================================\n",
2021-05-19 18:55:21 +02:00
"dense_62 (Dense) (None, 512) 401920 \n",
2021-05-05 15:23:24 +02:00
"_________________________________________________________________\n",
2021-05-19 18:55:21 +02:00
"dense_63 (Dense) (None, 512) 262656 \n",
2021-05-05 15:23:24 +02:00
"_________________________________________________________________\n",
2021-05-19 18:55:21 +02:00
"dense_64 (Dense) (None, 10) 5130 \n",
2021-05-05 15:23:24 +02:00
"=================================================================\n",
"Total params: 669,706\n",
"Trainable params: 669,706\n",
"Non-trainable params: 0\n",
"_________________________________________________________________\n",
"Epoch 1/5\n",
2021-05-19 18:55:21 +02:00
"469/469 [==============================] - 10s 20ms/step - loss: 0.2203 - accuracy: 0.9317 - val_loss: 0.0936 - val_accuracy: 0.9697\n",
2021-05-05 15:23:24 +02:00
"Epoch 2/5\n",
2021-05-19 18:55:21 +02:00
"469/469 [==============================] - 10s 21ms/step - loss: 0.0816 - accuracy: 0.9746 - val_loss: 0.0747 - val_accuracy: 0.9779\n",
2021-05-05 15:23:24 +02:00
"Epoch 3/5\n",
2021-05-19 18:55:21 +02:00
"469/469 [==============================] - 10s 20ms/step - loss: 0.0544 - accuracy: 0.9827 - val_loss: 0.0674 - val_accuracy: 0.9798\n",
2021-05-05 15:23:24 +02:00
"Epoch 4/5\n",
2021-05-19 18:55:21 +02:00
"469/469 [==============================] - 10s 22ms/step - loss: 0.0384 - accuracy: 0.9879 - val_loss: 0.0746 - val_accuracy: 0.9806\n",
2021-05-05 15:23:24 +02:00
"Epoch 5/5\n",
2021-05-19 18:55:21 +02:00
"469/469 [==============================] - 10s 22ms/step - loss: 0.0298 - accuracy: 0.9901 - val_loss: 0.0736 - val_accuracy: 0.9801\n"
2021-05-05 15:23:24 +02:00
]
},
{
"data": {
"text/plain": [
2021-05-19 18:55:21 +02:00
"<tensorflow.python.keras.callbacks.History at 0x1ed7eba8070>"
2021-05-05 15:23:24 +02:00
]
},
2021-05-19 18:55:21 +02:00
"execution_count": 61,
2021-05-05 15:23:24 +02:00
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# Bez warstw Dropout\n",
"\n",
"num_classes = 10\n",
"\n",
"(x_train, y_train), (x_test, y_test) = mnist.load_data()\n",
"\n",
"x_train = x_train.reshape(60000, 784) # 784 = 28 * 28\n",
"x_test = x_test.reshape(10000, 784)\n",
"x_train = x_train.astype('float32')\n",
"x_test = x_test.astype('float32')\n",
"x_train /= 255\n",
"x_test /= 255\n",
"\n",
"y_train = keras.utils.to_categorical(y_train, num_classes)\n",
"y_test = keras.utils.to_categorical(y_test, num_classes)\n",
"\n",
2021-05-19 18:55:21 +02:00
"model_no_dropout = keras.Sequential()\n",
2021-05-05 15:23:24 +02:00
"model_no_dropout.add(Dense(512, activation='relu', input_shape=(784,)))\n",
"model_no_dropout.add(Dense(512, activation='relu'))\n",
"model_no_dropout.add(Dense(num_classes, activation='softmax'))\n",
"model_no_dropout.summary()\n",
"\n",
"model_no_dropout.compile(loss='categorical_crossentropy',\n",
2021-05-19 18:55:21 +02:00
" optimizer=keras.optimizers.RMSprop(),\n",
2021-05-05 15:23:24 +02:00
" metrics=['accuracy'])\n",
"\n",
"model_no_dropout.fit(x_train, y_train,\n",
" batch_size=128,\n",
" epochs=5,\n",
" verbose=1,\n",
" validation_data=(x_test, y_test))"
]
},
{
"cell_type": "code",
2021-05-19 18:55:21 +02:00
"execution_count": 62,
2021-05-05 15:23:24 +02:00
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
2021-05-19 18:55:21 +02:00
"Test loss (no dropout): 0.07358124107122421\n",
"Test accuracy (no dropout): 0.9800999760627747\n"
2021-05-05 15:23:24 +02:00
]
}
],
"source": [
"# Bez warstw Dropout\n",
"\n",
"score = model_no_dropout.evaluate(x_test, y_test, verbose=0)\n",
"\n",
"print('Test loss (no dropout): {}'.format(score[0]))\n",
"print('Test accuracy (no dropout): {}'.format(score[1]))"
]
},
{
"cell_type": "code",
2021-05-19 18:55:21 +02:00
"execution_count": 63,
2021-05-05 15:23:24 +02:00
"metadata": {
"slideshow": {
"slide_type": "notes"
}
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
2021-05-19 18:55:21 +02:00
"Model: \"sequential_23\"\n",
2021-05-05 15:23:24 +02:00
"_________________________________________________________________\n",
"Layer (type) Output Shape Param # \n",
"=================================================================\n",
2021-05-19 18:55:21 +02:00
"dense_65 (Dense) (None, 2500) 1962500 \n",
2021-05-05 15:23:24 +02:00
"_________________________________________________________________\n",
2021-05-19 18:55:21 +02:00
"dense_66 (Dense) (None, 2000) 5002000 \n",
2021-05-05 15:23:24 +02:00
"_________________________________________________________________\n",
2021-05-19 18:55:21 +02:00
"dense_67 (Dense) (None, 1500) 3001500 \n",
2021-05-05 15:23:24 +02:00
"_________________________________________________________________\n",
2021-05-19 18:55:21 +02:00
"dense_68 (Dense) (None, 1000) 1501000 \n",
2021-05-05 15:23:24 +02:00
"_________________________________________________________________\n",
2021-05-19 18:55:21 +02:00
"dense_69 (Dense) (None, 500) 500500 \n",
2021-05-05 15:23:24 +02:00
"_________________________________________________________________\n",
2021-05-19 18:55:21 +02:00
"dense_70 (Dense) (None, 10) 5010 \n",
2021-05-05 15:23:24 +02:00
"=================================================================\n",
"Total params: 11,972,510\n",
"Trainable params: 11,972,510\n",
"Non-trainable params: 0\n",
"_________________________________________________________________\n",
"Epoch 1/10\n",
2021-05-19 18:55:21 +02:00
"469/469 [==============================] - 129s 275ms/step - loss: 0.9587 - accuracy: 0.7005 - val_loss: 0.5066 - val_accuracy: 0.8566\n",
2021-05-05 15:23:24 +02:00
"Epoch 2/10\n",
2021-05-19 18:55:21 +02:00
"469/469 [==============================] - 130s 276ms/step - loss: 0.2666 - accuracy: 0.9234 - val_loss: 0.3376 - val_accuracy: 0.9024\n",
2021-05-05 15:23:24 +02:00
"Epoch 3/10\n",
2021-05-19 18:55:21 +02:00
"469/469 [==============================] - 130s 277ms/step - loss: 0.1811 - accuracy: 0.9477 - val_loss: 0.1678 - val_accuracy: 0.9520\n",
2021-05-05 15:23:24 +02:00
"Epoch 4/10\n",
2021-05-19 18:55:21 +02:00
"469/469 [==============================] - 134s 287ms/step - loss: 0.1402 - accuracy: 0.9588 - val_loss: 0.1553 - val_accuracy: 0.9576\n",
2021-05-05 15:23:24 +02:00
"Epoch 5/10\n",
2021-05-19 18:55:21 +02:00
"469/469 [==============================] - 130s 278ms/step - loss: 0.1153 - accuracy: 0.9662 - val_loss: 0.1399 - val_accuracy: 0.9599\n",
2021-05-05 15:23:24 +02:00
"Epoch 6/10\n",
2021-05-19 18:55:21 +02:00
"469/469 [==============================] - 130s 277ms/step - loss: 0.0956 - accuracy: 0.9711 - val_loss: 0.1389 - val_accuracy: 0.9612\n",
2021-05-05 15:23:24 +02:00
"Epoch 7/10\n",
2021-05-19 18:55:21 +02:00
"469/469 [==============================] - 131s 280ms/step - loss: 0.0803 - accuracy: 0.9761 - val_loss: 0.1008 - val_accuracy: 0.9724\n",
2021-05-05 15:23:24 +02:00
"Epoch 8/10\n",
2021-05-19 18:55:21 +02:00
"469/469 [==============================] - 134s 286ms/step - loss: 0.0685 - accuracy: 0.9797 - val_loss: 0.1137 - val_accuracy: 0.9679\n",
2021-05-05 15:23:24 +02:00
"Epoch 9/10\n",
2021-05-19 18:55:21 +02:00
"469/469 [==============================] - 130s 278ms/step - loss: 0.0602 - accuracy: 0.9819 - val_loss: 0.1064 - val_accuracy: 0.9700\n",
2021-05-05 15:23:24 +02:00
"Epoch 10/10\n",
2021-05-19 18:55:21 +02:00
"469/469 [==============================] - 129s 274ms/step - loss: 0.0520 - accuracy: 0.9843 - val_loss: 0.1095 - val_accuracy: 0.9698\n"
2021-05-05 15:23:24 +02:00
]
},
{
"data": {
"text/plain": [
2021-05-19 18:55:21 +02:00
"<tensorflow.python.keras.callbacks.History at 0x1ed0e628250>"
2021-05-05 15:23:24 +02:00
]
},
2021-05-19 18:55:21 +02:00
"execution_count": 63,
2021-05-05 15:23:24 +02:00
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# Więcej warstw, inna funkcja aktywacji\n",
"\n",
"num_classes = 10\n",
"\n",
"(x_train, y_train), (x_test, y_test) = mnist.load_data()\n",
"\n",
"x_train = x_train.reshape(60000, 784) # 784 = 28 * 28\n",
"x_test = x_test.reshape(10000, 784)\n",
"x_train = x_train.astype('float32')\n",
"x_test = x_test.astype('float32')\n",
"x_train /= 255\n",
"x_test /= 255\n",
"\n",
"y_train = keras.utils.to_categorical(y_train, num_classes)\n",
"y_test = keras.utils.to_categorical(y_test, num_classes)\n",
"\n",
2021-06-07 14:50:30 +02:00
"model3 = keras.Sequential()\n",
2021-05-05 15:23:24 +02:00
"model3.add(Dense(2500, activation='tanh', input_shape=(784,)))\n",
"model3.add(Dense(2000, activation='tanh'))\n",
"model3.add(Dense(1500, activation='tanh'))\n",
"model3.add(Dense(1000, activation='tanh'))\n",
"model3.add(Dense(500, activation='tanh'))\n",
"model3.add(Dense(num_classes, activation='softmax'))\n",
"model3.summary()\n",
"\n",
"model3.compile(loss='categorical_crossentropy',\n",
2021-05-19 18:55:21 +02:00
" optimizer=keras.optimizers.RMSprop(),\n",
2021-05-05 15:23:24 +02:00
" metrics=['accuracy'])\n",
"\n",
"model3.fit(x_train, y_train,\n",
" batch_size=128,\n",
" epochs=10,\n",
" verbose=1,\n",
" validation_data=(x_test, y_test))"
]
},
{
"cell_type": "code",
2021-05-19 18:55:21 +02:00
"execution_count": 64,
2021-05-05 15:23:24 +02:00
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
2021-05-19 18:55:21 +02:00
"Test loss: 0.10945799201726913\n",
"Test accuracy: 0.9697999954223633\n"
2021-05-05 15:23:24 +02:00
]
}
],
"source": [
"# Więcej warstw, inna funkcja aktywacji\n",
"\n",
"score = model3.evaluate(x_test, y_test, verbose=0)\n",
"\n",
"print('Test loss: {}'.format(score[0]))\n",
"print('Test accuracy: {}'.format(score[1]))"
]
},
{
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "slide"
}
},
"source": [
"### Przykład: 4-pikselowy aparat fotograficzny\n",
"\n",
"https://www.youtube.com/watch?v=ILsA4nyG7I0"
]
},
{
"cell_type": "code",
2021-05-19 18:55:21 +02:00
"execution_count": 65,
2021-05-05 15:23:24 +02:00
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"outputs": [],
"source": [
"def generate_example(description):\n",
" variant = random.choice([1, -1])\n",
" if description == 's': # solid\n",
" return (np.array([[ 1.0, 1.0], [ 1.0, 1.0]]) if variant == 1 else\n",
" np.array([[-1.0, -1.0], [-1.0, -1.0]]))\n",
" elif description == 'v': # vertical\n",
" return (np.array([[ 1.0, -1.0], [ 1.0, -1.0]]) if variant == 1 else\n",
" np.array([[-1.0, 1.0], [-1.0, 1.0]]))\n",
" elif description == 'd': # diagonal\n",
" return (np.array([[ 1.0, -1.0], [-1.0, 1.0]]) if variant == 1 else\n",
" np.array([[-1.0, 1.0], [ 1.0, -1.0]]))\n",
" elif description == 'h': # horizontal\n",
" return (np.array([[ 1.0, 1.0], [-1.0, -1.0]]) if variant == 1 else\n",
" np.array([[-1.0, -1.0], [ 1.0, 1.0]]))\n",
" else:\n",
" return np.array([[random.uniform(-1, 1), random.uniform(-1, 1)],\n",
" [random.uniform(-1, 1), random.uniform(-1, 1)]])"
]
},
{
"cell_type": "code",
2021-05-19 18:55:21 +02:00
"execution_count": 67,
2021-05-05 15:23:24 +02:00
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"outputs": [],
"source": [
2021-05-19 18:55:21 +02:00
"import random\n",
"\n",
2021-05-05 15:23:24 +02:00
"num_classes = 4\n",
"\n",
"trainset_size = 4000\n",
"testset_size = 1000\n",
"\n",
"y4_train = np.array([random.choice(['s', 'v', 'd', 'h']) for i in range(trainset_size)])\n",
"x4_train = np.array([generate_example(desc) for desc in y4_train])\n",
"\n",
"y4_test = np.array([random.choice(['s', 'v', 'd', 'h']) for i in range(testset_size)])\n",
"x4_test = np.array([generate_example(desc) for desc in y4_test])"
]
},
{
"cell_type": "code",
2021-05-19 18:55:21 +02:00
"execution_count": 68,
2021-05-05 15:23:24 +02:00
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"outputs": [
{
"data": {
2021-05-19 18:55:21 +02:00
"image/png": "iVBORw0KGgoAAAANSUhEUgAAA6oAAACQCAYAAAABdZZIAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjMuNCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8QVMy6AAAACXBIWXMAAAsTAAALEwEAmpwYAAAPGUlEQVR4nO3dX4ild33H8c+3uwYhVKTN+ie7WZOLZSUVWswkVXqTXliSYJNeaIm9UKSwKOSyFwsF7WUveiUVQy5C4o22eGGXNq1NhRALymZXVJK2SRdRs0nARkvaVTFm+fVizrMzpLOzc/Y858xvnvN6wZA5c56c5zkn7/09893zzKRaawEAAIBe/Np+HwAAAABsZ1AFAACgKwZVAAAAumJQBQAAoCsGVQAAALpiUAUAAKArhxf5l6vqN5L8TZJbk/wgyR+31v57h+1+kOR/k1xO8kZrbWOR/dIfLTDQAokO2KIFBlog0QF7t+g7qqeTfL21diLJ12e3r+b3W2u/I7LJ0gIDLZDogC1aYKAFEh2wR4sOqg8keXz2+eNJ/mjBx+Pg0gIDLZDogC1aYKAFEh2wR4sOqu9srb2SJLN/vuMq27Uk/1xV56vq1IL7pE9aYKAFEh2wRQsMtECiA/bomj+jWlX/kuRdO9z153Ps5/daay9X1TuSPFlV/9Fae/oq+zuVZIjxjjn2wT6rqtZaq2tstqcW1qGDO+44uE/rhRdeyK9+9av/9/WjR4/m0KFDqar/aq0ducbDzN3CjTfeeMd73/veMZ4CI7hWBxsbG+38+fOvXqMF54c1sKzzw1TXhPPnz+/3ISzNss4PmeiacFC/V1jl+UEHB9tuHVRr7bofuKqeT3J3a+2Vqnp3kqdaayev8e/8RZJLrbW/2sPjX//BsS9aazV2C1PtYJE/ez07efJkXnjhhe+11n577BY2NjbauXPnRjxaluXkyZN56qmncvPNN59P8odxflh7yzg/THVNqLrWTH+gLeX8MNU1YYrfKyzz/KCDg6eqzl/tZ5AXvfT3TJJPzD7/RJK/22HnN1bVrw+fJ/mDJM8uuF/6poU1dv/99yfJb85uamFN3X///Xn88eFHkHTAFVrA+WHNOT+wV4sOqn+Z5ENV9Z9JPjS7naq6uaqemG3zziT/WlXfTXI2yT+01v5pwf3SKS1w+vTpJHmbFtbb6dOn8+STTybJ+6ID4vzAFc4Pa875gb1a6NLfZZvq2/dTtoefQZrbVDvo+c/eona7jGMRU73Mb8qW1cJU14UpW8b5YaprwsQv/bUmzMH3CnM/5iRfsHXtYNF3VAEAAGBUBlUAAAC6YlAFAACgKwZVAAAAumJQBQAAoCsGVQAAALpiUAUAAKArBlUAAAC6YlAFAACgKwZVAAAAumJQBQAAoCsGVQAAALpiUAUAAKArBlUAAAC6YlAFAACgKwZVAAAAumJQBQAAoCsGVQAAALoyyqBaVfdU1fNVdaGqTu9wf1XV52b3f6+q3j/GfumPFhhogZm36YDEmsAV1gQGWmBXCw+qVXUoyeeT3Jvk9iQfq6rb37TZvUlOzD5OJfnCovulW1pgoIU1d/ny5SQ5Hh2wyZpAYk0gzg/szRjvqN6V5EJr7futtdeTfDnJA2/a5oEkX2ybvpXk7VX17hH2TX+0QJLcGC2svbNnzybJL3XAjDWBxJpAnB/YmzEG1aNJXtx2++Lsa/NuwzRogSS5IVpYey+99FKSvL7tSzpYb9YEEmsCcX5gbw6P8Bi1w9fadWyzuWHVqWy+vc80XFcLOpikhVs4fvz4Eg6LZWltx2Xe+YGBNYHEmrCWxjw/6GC6xnhH9WKSW7bdPpbk5evYJknSWnuktbbRWtsY4dhYvVFa0MGB93qW0MKRI0dGP1CW59ixY8nmu+tXvhTnh3VmTSCxJpBxzw86mK4xBtVnkpyoqtuq6oYkDyY586ZtziT5+Oy3d30gyWuttVdG2Df90QJJ8rNoYe3deeedSfJWHTBjTSCxJhDnB/Zm4Ut/W2tvVNVDSb6W5FCSR1trz1XVp2b3P5zkiST3JbmQ5OdJPrnofumWFhhoYc0dPnw4SX4UHbDJmkBiTSDOD+xNXeUa8S5UVb8Hx45aazv9PMFCptpBz3/2FlVV55dxCc7GxkY7d+7c2A/LEi2rhamuC1O2jPPDVNeEqtFfqp5YE+bge4W5H3OSL9i6djDGpb8AAAAwGoMqAAAAXTGoAgAA0BWDKgAAAF0xqAIAANAVgyoAAABdMagCAADQFYMqAAAAXTGoAgAA0BWDKgAAAF0xqAIAANAVgyoAAABdMagCAADQFYMqAAAAXTGoAgAA0BWDKgAAAF0xqAIAANAVgyoAAABdMagCAADQlVEG1aq6p6qer6oLVXV6h/vvrqrXquo7s4/PjLFf+qMFBlpg5m06ILEmcIU1gYEW2NXhRR+gqg4l+XySDyW5mOSZqjrTWvu3N236jdbahxfdH93TAgMtrLnLly8nyfEkt0cHWBPYZE3A+YE9GeMd1buSXGitfb+19nqSLyd5YITH5WDSAklyY7Sw9s6ePZskv9QBM9YEEmsCcX5gbxZ+RzXJ0SQvbrt9Mcnv7rDdB6vqu0leTvJnrbXndnqwqjqV5NQIx8X+GKWF7R0cP348P/zhD5dxrPuqqvb7EJbphiyhhdntkQ+VJXt92+fOD+vNmrBHrbX9PoSl+MpXvpKPfvSj1gTy0ksvJSOdH3QwXWMMqjudId68wn47yXtaa5eq6r4kX01yYqcHa609kuSRJKmqaa7U6+W6WtjewcbGhg6mYeEWrAmT4PzAwJqwZq4ygFsT1tCYLehgusa49Pdiklu23T6Wzb/1uKK19j+ttUuzz59I8paqummEfdMfLZBs/i2pFkg2310f6GC9WRPW3LFjxxJrAtECezPGoPpMkhNVdVtV3ZDkwSRntm9QVe+q2bU5VXXXbL8/GWHf9EcLJMnPogU2vVUHzFgT1tydd96ZWBOIFtibhS/9ba29UVUPJflakkNJHm2tPVdVn5rd/3CSjyT5dFW9keQXSR5sU/0BDLTAQAskyY+iAzZZE9bc4cOHE2sC0QJ7Uz3/93ad+cHTWhv9t1psbGy0c+fOjf2w+26KvwBkm/OttY2xH9SacCBpgSTLOT9MtYOevzdbVFVZE+aghbkfc5Iv2Lp2MMalvwAAADAagyoAAABdMagCAADQFYMqAAAAXTGoAgAA0BWDKgAAAF0xqAIAANAVgyoAAABdMagCAADQFYMqAAAAXTGoAgAA0BWDKgAAAF0xqAIAANAVgyoAAABdMagCAADQFYMqAAAAXTGoAgAA0BWDKgAAAF0ZZVCtqker6sdV9exV7q+q+lxVXaiq71XV+8fYL/3RATO3aoEZLZDE+YErrAkMtMCuxnpH9bEk9+xy/71JTsw+TiX5wkj7pT86IElejRbYpAUGOiCxJrBFC+xqlEG1tfZ0kp/usskDSb7YNn0rydur6t1j7Jvu6IAkuRQtsEkLDHRAYk1gixbY1ap+RvVokhe33b44+xrrRQcMtMBACyQ6YIsWGGhhzR1e0X5qh6+1HTesOpXNt/eZnuvq4Pjx48s8JvaHNYGBFkh0wBYtMNhTCzqYrlW9o3oxyS3bbh9L8vJOG7bWHmmtbbTWNlZyZKzSdXVw5MiRlRwcK2VNYKAFEh2wRQsM9tSCDqZrVYPqmSQfn/32rg8kea219sqK9k0/dMBACwy0QKIDtmiBgRbW3CiX/lbVl5LcneSmqrqY5LNJ3pIkrbWHkzyR5L4kF5L8PMknx9gvXfpmdEByW7TAJi0w0AGJNYEtWmBX1dqOl/13oar6PTh21Frb6ecJFrKxsdHOnTs39sPuu6rRX6qenF/GJTjWhANJCyRZzvlhqh30/L3ZoqrKmjAHLcz9mJN8wda1g1Vd+gsAAAB7YlAFAACgKwZVAAAAumJQBQAAoCsGVQAAALpiUAUAAKArBlUAAAC6YlAFAACgKwZVAAAAumJQBQAAoCsGVQAAALpiUAUAAKArBlUAAAC6YlAFAACgKwZVAAAAumJQBQAAoCsGVQAAALpiUAUAAKArowyqVfV
2021-05-05 15:23:24 +02:00
"text/plain": [
2021-05-19 18:55:21 +02:00
"<Figure size 1152x288 with 7 Axes>"
2021-05-05 15:23:24 +02:00
]
},
2021-05-19 18:55:21 +02:00
"metadata": {
"needs_background": "light"
},
2021-05-05 15:23:24 +02:00
"output_type": "display_data"
},
{
"name": "stdout",
"output_type": "stream",
"text": [
2021-05-19 18:55:21 +02:00
" s d h s d v v\n"
2021-05-05 15:23:24 +02:00
]
}
],
"source": [
"draw_examples(x4_train[:7], captions=y4_train)"
]
},
{
"cell_type": "code",
2021-05-19 18:55:21 +02:00
"execution_count": 69,
2021-05-05 15:23:24 +02:00
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"outputs": [],
"source": [
"x4_train = x4_train.reshape(trainset_size, 4)\n",
"x4_test = x4_test.reshape(testset_size, 4)\n",
"x4_train = x4_train.astype('float32')\n",
"x4_test = x4_test.astype('float32')\n",
"\n",
"y4_train = np.array([{'s': 0, 'v': 1, 'd': 2, 'h': 3}[desc] for desc in y4_train])\n",
"y4_test = np.array([{'s': 0, 'v': 1, 'd': 2, 'h': 3}[desc] for desc in y4_test])\n",
"\n",
"y4_train = keras.utils.to_categorical(y4_train, num_classes)\n",
"y4_test = keras.utils.to_categorical(y4_test, num_classes)"
]
},
{
"cell_type": "code",
2021-05-19 18:55:21 +02:00
"execution_count": 70,
2021-05-05 15:23:24 +02:00
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
2021-05-19 18:55:21 +02:00
"Model: \"sequential_24\"\n",
2021-05-05 15:23:24 +02:00
"_________________________________________________________________\n",
"Layer (type) Output Shape Param # \n",
"=================================================================\n",
2021-05-19 18:55:21 +02:00
"dense_71 (Dense) (None, 4) 20 \n",
2021-05-05 15:23:24 +02:00
"_________________________________________________________________\n",
2021-05-19 18:55:21 +02:00
"dense_72 (Dense) (None, 4) 20 \n",
2021-05-05 15:23:24 +02:00
"_________________________________________________________________\n",
2021-05-19 18:55:21 +02:00
"dense_73 (Dense) (None, 8) 40 \n",
2021-05-05 15:23:24 +02:00
"_________________________________________________________________\n",
2021-05-19 18:55:21 +02:00
"dense_74 (Dense) (None, 4) 36 \n",
2021-05-05 15:23:24 +02:00
"=================================================================\n",
"Total params: 116\n",
"Trainable params: 116\n",
"Non-trainable params: 0\n",
"_________________________________________________________________\n"
]
}
],
"source": [
2021-05-19 18:55:21 +02:00
"model4 = keras.Sequential()\n",
2021-05-05 15:23:24 +02:00
"model4.add(Dense(4, activation='tanh', input_shape=(4,)))\n",
"model4.add(Dense(4, activation='tanh'))\n",
"model4.add(Dense(8, activation='relu'))\n",
"model4.add(Dense(num_classes, activation='softmax'))\n",
"model4.summary()"
]
},
{
"cell_type": "code",
2021-05-19 18:55:21 +02:00
"execution_count": 71,
2021-05-05 15:23:24 +02:00
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"outputs": [],
"source": [
"model4.layers[0].set_weights(\n",
" [np.array([[ 1.0, 0.0, 1.0, 0.0],\n",
" [ 0.0, 1.0, 0.0, 1.0],\n",
" [ 1.0, 0.0, -1.0, 0.0],\n",
" [ 0.0, 1.0, 0.0, -1.0]],\n",
" dtype=np.float32), np.array([0., 0., 0., 0.], dtype=np.float32)])\n",
"model4.layers[1].set_weights(\n",
" [np.array([[ 1.0, -1.0, 0.0, 0.0],\n",
" [ 1.0, 1.0, 0.0, 0.0],\n",
" [ 0.0, 0.0, 1.0, -1.0],\n",
" [ 0.0, 0.0, -1.0, -1.0]],\n",
" dtype=np.float32), np.array([0., 0., 0., 0.], dtype=np.float32)])\n",
"model4.layers[2].set_weights(\n",
" [np.array([[ 1.0, -1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],\n",
" [ 0.0, 0.0, 1.0, -1.0, 0.0, 0.0, 0.0, 0.0],\n",
" [ 0.0, 0.0, 0.0, 0.0, 1.0, -1.0, 0.0, 0.0],\n",
" [ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, -1.0]],\n",
" dtype=np.float32), np.array([0., 0., 0., 0., 0., 0., 0., 0.], dtype=np.float32)])"
]
},
{
"cell_type": "code",
2021-05-19 18:55:21 +02:00
"execution_count": 73,
2021-05-05 15:23:24 +02:00
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"outputs": [],
"source": [
"model4.layers[3].set_weights(\n",
" [np.array([[ 1.0, 0.0, 0.0, 0.0],\n",
" [ 1.0, 0.0, 0.0, 0.0],\n",
" [ 0.0, 1.0, 0.0, 0.0],\n",
" [ 0.0, 1.0, 0.0, 0.0],\n",
" [ 0.0, 0.0, 1.0, 0.0],\n",
" [ 0.0, 0.0, 1.0, 0.0],\n",
" [ 0.0, 0.0, 0.0, 1.0],\n",
" [ 0.0, 0.0, 0.0, 1.0]],\n",
" dtype=np.float32), np.array([0., 0., 0., 0.], dtype=np.float32)])\n",
"\n",
"model4.compile(loss='categorical_crossentropy',\n",
2021-05-19 18:55:21 +02:00
" optimizer=keras.optimizers.Adagrad(),\n",
2021-05-05 15:23:24 +02:00
" metrics=['accuracy'])"
]
},
{
"cell_type": "code",
2021-05-19 18:55:21 +02:00
"execution_count": 74,
2021-05-05 15:23:24 +02:00
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[array([[ 1., 0., 1., 0.],\n",
" [ 0., 1., 0., 1.],\n",
" [ 1., 0., -1., 0.],\n",
2021-05-19 18:55:21 +02:00
" [ 0., 1., 0., -1.]], dtype=float32), array([0., 0., 0., 0.], dtype=float32)]\n",
2021-05-05 15:23:24 +02:00
"[array([[ 1., -1., 0., 0.],\n",
" [ 1., 1., 0., 0.],\n",
" [ 0., 0., 1., -1.],\n",
2021-05-19 18:55:21 +02:00
" [ 0., 0., -1., -1.]], dtype=float32), array([0., 0., 0., 0.], dtype=float32)]\n",
2021-05-05 15:23:24 +02:00
"[array([[ 1., -1., 0., 0., 0., 0., 0., 0.],\n",
" [ 0., 0., 1., -1., 0., 0., 0., 0.],\n",
" [ 0., 0., 0., 0., 1., -1., 0., 0.],\n",
2021-05-19 18:55:21 +02:00
" [ 0., 0., 0., 0., 0., 0., 1., -1.]], dtype=float32), array([0., 0., 0., 0., 0., 0., 0., 0.], dtype=float32)]\n",
"[array([[1., 0., 0., 0.],\n",
" [1., 0., 0., 0.],\n",
" [0., 1., 0., 0.],\n",
" [0., 1., 0., 0.],\n",
" [0., 0., 1., 0.],\n",
" [0., 0., 1., 0.],\n",
" [0., 0., 0., 1.],\n",
" [0., 0., 0., 1.]], dtype=float32), array([0., 0., 0., 0.], dtype=float32)]\n"
2021-05-05 15:23:24 +02:00
]
}
],
"source": [
"for layer in model4.layers:\n",
" print(layer.get_weights())"
]
},
{
"cell_type": "code",
2021-05-19 18:55:21 +02:00
"execution_count": 75,
2021-05-05 15:23:24 +02:00
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"outputs": [
{
"data": {
"text/plain": [
2021-05-19 18:55:21 +02:00
"array([[0.17831734, 0.17831734, 0.17831734, 0.465048 ]], dtype=float32)"
2021-05-05 15:23:24 +02:00
]
},
2021-05-19 18:55:21 +02:00
"execution_count": 75,
2021-05-05 15:23:24 +02:00
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"model4.predict([np.array([[1.0, 1.0], [-1.0, -1.0]]).reshape(1, 4)])"
]
},
{
"cell_type": "code",
2021-05-19 18:55:21 +02:00
"execution_count": 76,
2021-05-05 15:23:24 +02:00
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
2021-05-19 18:55:21 +02:00
"Test loss: 0.7656148672103882\n",
2021-05-05 15:23:24 +02:00
"Test accuracy: 1.0\n"
]
}
],
"source": [
"score = model4.evaluate(x4_test, y4_test, verbose=0)\n",
"\n",
"print('Test loss: {}'.format(score[0]))\n",
"print('Test accuracy: {}'.format(score[1]))"
]
},
{
"cell_type": "code",
2021-05-19 18:55:21 +02:00
"execution_count": 77,
2021-05-05 15:23:24 +02:00
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
2021-05-19 18:55:21 +02:00
"Model: \"sequential_25\"\n",
2021-05-05 15:23:24 +02:00
"_________________________________________________________________\n",
"Layer (type) Output Shape Param # \n",
"=================================================================\n",
2021-05-19 18:55:21 +02:00
"dense_75 (Dense) (None, 4) 20 \n",
2021-05-05 15:23:24 +02:00
"_________________________________________________________________\n",
2021-05-19 18:55:21 +02:00
"dense_76 (Dense) (None, 4) 20 \n",
2021-05-05 15:23:24 +02:00
"_________________________________________________________________\n",
2021-05-19 18:55:21 +02:00
"dense_77 (Dense) (None, 8) 40 \n",
2021-05-05 15:23:24 +02:00
"_________________________________________________________________\n",
2021-05-19 18:55:21 +02:00
"dense_78 (Dense) (None, 4) 36 \n",
2021-05-05 15:23:24 +02:00
"=================================================================\n",
"Total params: 116\n",
"Trainable params: 116\n",
"Non-trainable params: 0\n",
"_________________________________________________________________\n"
]
}
],
"source": [
2021-06-07 14:50:30 +02:00
"model5 = keras.Sequential()\n",
2021-05-05 15:23:24 +02:00
"model5.add(Dense(4, activation='tanh', input_shape=(4,)))\n",
"model5.add(Dense(4, activation='tanh'))\n",
"model5.add(Dense(8, activation='relu'))\n",
"model5.add(Dense(num_classes, activation='softmax'))\n",
"model5.compile(loss='categorical_crossentropy',\n",
2021-05-19 18:55:21 +02:00
" optimizer=keras.optimizers.RMSprop(),\n",
2021-05-05 15:23:24 +02:00
" metrics=['accuracy'])\n",
"model5.summary()"
]
},
{
"cell_type": "code",
2021-05-19 18:55:21 +02:00
"execution_count": 78,
2021-05-05 15:23:24 +02:00
"metadata": {
"scrolled": true,
"slideshow": {
"slide_type": "subslide"
}
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Epoch 1/8\n",
2021-05-19 18:55:21 +02:00
"125/125 [==============================] - 0s 3ms/step - loss: 1.3126 - accuracy: 0.3840 - val_loss: 1.1926 - val_accuracy: 0.6110\n",
2021-05-05 15:23:24 +02:00
"Epoch 2/8\n",
2021-05-19 18:55:21 +02:00
"125/125 [==============================] - 0s 2ms/step - loss: 1.0978 - accuracy: 0.5980 - val_loss: 1.0085 - val_accuracy: 0.6150\n",
2021-05-05 15:23:24 +02:00
"Epoch 3/8\n",
2021-05-19 18:55:21 +02:00
"125/125 [==============================] - 0s 2ms/step - loss: 0.9243 - accuracy: 0.7035 - val_loss: 0.8416 - val_accuracy: 0.7380\n",
2021-05-05 15:23:24 +02:00
"Epoch 4/8\n",
2021-05-19 18:55:21 +02:00
"125/125 [==============================] - 0s 2ms/step - loss: 0.7522 - accuracy: 0.8740 - val_loss: 0.6738 - val_accuracy: 1.0000\n",
2021-05-05 15:23:24 +02:00
"Epoch 5/8\n",
2021-05-19 18:55:21 +02:00
"125/125 [==============================] - 0s 2ms/step - loss: 0.5811 - accuracy: 1.0000 - val_loss: 0.5030 - val_accuracy: 1.0000\n",
2021-05-05 15:23:24 +02:00
"Epoch 6/8\n",
2021-05-19 18:55:21 +02:00
"125/125 [==============================] - 0s 2ms/step - loss: 0.4134 - accuracy: 1.0000 - val_loss: 0.3428 - val_accuracy: 1.0000\n",
2021-05-05 15:23:24 +02:00
"Epoch 7/8\n",
2021-05-19 18:55:21 +02:00
"125/125 [==============================] - 0s 2ms/step - loss: 0.2713 - accuracy: 1.0000 - val_loss: 0.2161 - val_accuracy: 1.0000\n",
2021-05-05 15:23:24 +02:00
"Epoch 8/8\n",
2021-05-19 18:55:21 +02:00
"125/125 [==============================] - 0s 1ms/step - loss: 0.1621 - accuracy: 1.0000 - val_loss: 0.1225 - val_accuracy: 1.0000\n"
2021-05-05 15:23:24 +02:00
]
},
{
"data": {
"text/plain": [
2021-05-19 18:55:21 +02:00
"<tensorflow.python.keras.callbacks.History at 0x1ed00809700>"
2021-05-05 15:23:24 +02:00
]
},
2021-05-19 18:55:21 +02:00
"execution_count": 78,
2021-05-05 15:23:24 +02:00
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"model5.fit(x4_train, y4_train, epochs=8, validation_data=(x4_test, y4_test))"
]
},
{
"cell_type": "code",
2021-05-19 18:55:21 +02:00
"execution_count": 79,
2021-05-05 15:23:24 +02:00
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"outputs": [
{
"data": {
"text/plain": [
2021-05-19 18:55:21 +02:00
"array([[3.2040708e-02, 1.0065207e-03, 4.9596769e-04, 9.6645677e-01]],\n",
" dtype=float32)"
2021-05-05 15:23:24 +02:00
]
},
2021-05-19 18:55:21 +02:00
"execution_count": 79,
2021-05-05 15:23:24 +02:00
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"model5.predict([np.array([[1.0, 1.0], [-1.0, -1.0]]).reshape(1, 4)])"
]
},
{
"cell_type": "code",
2021-05-19 18:55:21 +02:00
"execution_count": 80,
2021-05-05 15:23:24 +02:00
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
2021-05-19 18:55:21 +02:00
"Test loss: 0.1224619448184967\n",
2021-05-05 15:23:24 +02:00
"Test accuracy: 1.0\n"
]
}
],
"source": [
"score = model5.evaluate(x4_test, y4_test, verbose=0)\n",
"\n",
"print('Test loss: {}'.format(score[0]))\n",
"print('Test accuracy: {}'.format(score[1]))"
]
},
{
"cell_type": "code",
2021-05-19 18:55:21 +02:00
"execution_count": 81,
2021-05-05 15:23:24 +02:00
"metadata": {
"slideshow": {
"slide_type": "notes"
}
},
"outputs": [],
"source": [
"import contextlib\n",
"\n",
"@contextlib.contextmanager\n",
"def printoptions(*args, **kwargs):\n",
" original = np.get_printoptions()\n",
" np.set_printoptions(*args, **kwargs)\n",
" try:\n",
" yield\n",
" finally: \n",
" np.set_printoptions(**original)"
]
},
{
"cell_type": "code",
2021-05-19 18:55:21 +02:00
"execution_count": 82,
2021-05-05 15:23:24 +02:00
"metadata": {
"slideshow": {
"slide_type": "subslide"
}
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
2021-05-19 18:55:21 +02:00
"[array([[ 0.7, 0.2, -0.7, 0.7],\n",
" [-0.5, 0.9, 0.6, 0.6],\n",
" [ 1.1, 0.2, 0.1, 0.2],\n",
" [ 0.7, 0.1, 0.3, -0.7]], dtype=float32), array([ 0. , 0.1, -0.1, -0.2], dtype=float32)]\n",
"[array([[ 0.7, 0.5, -1.1, -1.2],\n",
" [ 0.7, 0.9, -0.6, 0.3],\n",
" [ 0.1, 1.4, -0.6, 0.8],\n",
" [ 1.5, 0.1, -0.1, 0.9]], dtype=float32), array([-0.4, 0.2, -0. , 0.2], dtype=float32)]\n",
"[array([[-1. , 1. , -0.7, -0.3, 0.2, 1.3, -0.7, 0.9],\n",
" [-0.9, 0.5, 0.8, -1.3, -1.2, 1.3, 0.4, -1. ],\n",
" [ 0.9, 0.2, 0.3, 0.4, 1.3, -0.9, -0.1, -0.2],\n",
" [-0.4, 0.5, 1.1, -0.6, 1.1, 0.1, -1.5, -1. ]], dtype=float32), array([-0.1, 0.1, 0.1, 0.1, 0.2, -0. , 0.1, 0.2], dtype=float32)]\n",
"[array([[ 0.7, -0.5, 0.8, -0.5],\n",
" [-0.3, -1.6, -0.2, 0.1],\n",
" [-1.5, 0.9, 0.1, -0.5],\n",
" [ 0.6, 0.7, 1. , -1.4],\n",
" [ 0.7, -1.2, -1.6, 1.2],\n",
" [ 1. , -1.2, 0.3, -1.5],\n",
" [-0.2, 0. , 0.6, 1.3],\n",
" [-0.8, 0.2, -0.6, -1. ]], dtype=float32), array([-0.6, 0.5, -0.3, 0.4], dtype=float32)]\n"
2021-05-05 15:23:24 +02:00
]
}
],
"source": [
"with printoptions(precision=1, suppress=True):\n",
" for layer in model5.layers:\n",
" print(layer.get_weights())"
]
2021-05-05 14:43:56 +02:00
}
],
"metadata": {
"celltoolbar": "Slideshow",
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
2021-06-07 14:50:30 +02:00
"version": "3.8.3"
2021-05-05 14:43:56 +02:00
},
"livereveal": {
"start_slideshow_at": "selected",
"theme": "white"
}
},
"nbformat": 4,
"nbformat_minor": 4
}