{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"id": "spread-happiness",
"metadata": {},
"outputs": [],
"source": [
"%matplotlib inline\n",
"%load_ext autoreload\n",
"%autoreload 2\n",
"\n",
"import numpy as np\n",
"import pandas as pd\n",
"import matplotlib.pyplot as plt\n",
"import seaborn as sns\n",
"import matplotlib.ticker as ticker\n",
"from IPython.display import Markdown, display, HTML\n",
"\n",
"import torch\n",
"import torch.nn as nn\n",
"import torch.optim as optim\n",
"\n",
"# Fix the dying kernel problem (only a problem in some installations - you can remove it, if it works without it)\n",
"import os\n",
"os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'"
]
},
{
"cell_type": "markdown",
"id": "approximate-classic",
"metadata": {},
"source": [
"# PyTorch\n",
"\n",
"Here's your best friend when working with PyTorch: https://pytorch.org/docs/stable/index.html.\n",
"\n",
"The beginning of this notebook shows that PyTorch tensors can be used exactly like numpy arrays. Later in the notebook additional features of tensors will be presented."
]
},
{
"cell_type": "markdown",
"id": "renewable-chase",
"metadata": {},
"source": [
"## Creating PyTorch tensors"
]
},
{
"cell_type": "markdown",
"id": "afraid-consortium",
"metadata": {},
"source": [
"### Directly"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "textile-mainland",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[[1. 2. 3.]\n",
" [4. 5. 6.]\n",
" [7. 8. 9.]]\n",
"\n",
"tensor([[1., 2., 3.],\n",
" [4., 5., 6.],\n",
" [7., 8., 9.]])\n"
]
}
],
"source": [
"a = np.array(\n",
" [[1.0, 2.0, 3.0], \n",
" [4.0, 5.0, 6.0], \n",
" [7.0, 8.0, 9.0]]\n",
")\n",
"\n",
"print(a)\n",
"print()\n",
"\n",
"t = torch.tensor(\n",
" [[1.0, 2.0, 3.0], \n",
" [4.0, 5.0, 6.0], \n",
" [7.0, 8.0, 9.0]]\n",
")\n",
"\n",
"print(t)"
]
},
{
"cell_type": "markdown",
"id": "floating-junior",
"metadata": {},
"source": [
"### From a list"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "reasonable-mistress",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]\n",
"\n",
"[[1. 2. 3.]\n",
" [4. 5. 6.]\n",
" [7. 8. 9.]]\n",
"\n",
"tensor([[1., 2., 3.],\n",
" [4., 5., 6.],\n",
" [7., 8., 9.]])\n"
]
}
],
"source": [
"l = [[1.0, 2.0, 3.0], \n",
" [4.0, 5.0, 6.0], \n",
" [7.0, 8.0, 9.0]]\n",
"\n",
"print(l)\n",
"print()\n",
"\n",
"a = np.array(l)\n",
"print(a)\n",
"print()\n",
"\n",
"t = torch.tensor(l)\n",
"print(t)"
]
},
{
"cell_type": "markdown",
"id": "incorrect-practitioner",
"metadata": {},
"source": [
"### From a list comprehension"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "straight-cooling",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[0, 1, 4, 9, 16, 25, 36, 49, 64, 81]\n",
"\n",
"[ 0 1 4 9 16 25 36 49 64 81]\n",
"\n",
"tensor([ 0, 1, 4, 9, 16, 25, 36, 49, 64, 81])\n"
]
}
],
"source": [
"a = [i**2 for i in range(10)]\n",
"\n",
"print(a)\n",
"print()\n",
"print(np.array(a))\n",
"print()\n",
"print(torch.tensor(a))"
]
},
{
"cell_type": "markdown",
"id": "enormous-drink",
"metadata": {},
"source": [
"### From a numpy array"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "parental-judges",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"tensor([[1., 2., 3.],\n",
" [4., 5., 6.],\n",
" [7., 8., 9.]], dtype=torch.float64)\n"
]
}
],
"source": [
"a = np.array(\n",
" [[1.0, 2.0, 3.0], \n",
" [4.0, 5.0, 6.0], \n",
" [7.0, 8.0, 9.0]]\n",
")\n",
"\n",
"t = torch.tensor(a)\n",
"\n",
"print(t)"
]
},
{
"cell_type": "markdown",
"id": "suffering-myanmar",
"metadata": {},
"source": [
"### Ready-made functions in PyTorch"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "expensive-bowling",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"All zeros\n",
"tensor([[0., 0., 0., 0.],\n",
" [0., 0., 0., 0.],\n",
" [0., 0., 0., 0.]])\n",
"\n",
"All chosen value (variant 1)\n",
"tensor([[7., 7., 7., 7.],\n",
" [7., 7., 7., 7.],\n",
" [7., 7., 7., 7.]])\n",
"\n",
"All chosen value (variant 2)\n",
"tensor([[7., 7., 7., 7.],\n",
" [7., 7., 7., 7.],\n",
" [7., 7., 7., 7.]])\n",
"\n",
"Random integers\n",
"[[6 6]\n",
" [8 9]\n",
" [1 0]]\n",
"\n",
"tensor([[9, 5],\n",
" [9, 3],\n",
" [3, 8]])\n",
"\n",
"Random values from the normal distribution\n",
"[[ -5.34346728 0.97207777]\n",
" [ -7.26648922 -12.2890286 ]\n",
" [ -2.68082928 10.95819034]]\n",
"\n",
"tensor([[ 1.1231, -5.9980],\n",
" [20.4600, -6.4359],\n",
" [-6.6826, -0.4491]])\n"
]
}
],
"source": [
"# All zeros\n",
"a = torch.zeros((3, 4))\n",
"print(\"All zeros\")\n",
"print(a)\n",
"print()\n",
"\n",
"# All a chosen value\n",
"a = torch.full((3, 4), 7.0)\n",
"print(\"All chosen value (variant 1)\")\n",
"print(a)\n",
"print()\n",
"\n",
"# or\n",
"\n",
"a = torch.zeros((3, 4))\n",
"a[:] = 7.0\n",
"print(\"All chosen value (variant 2)\")\n",
"print(a)\n",
"print()\n",
"\n",
"# Random integers\n",
"\n",
"print(\"Random integers\")\n",
"a = np.random.randint(low=0, high=10, size=(3, 2))\n",
"print(a)\n",
"print()\n",
"a = torch.randint(low=0, high=10, size=(3, 2))\n",
"print(a)\n",
"print()\n",
"\n",
"# Random values from the normal distribution (Gaussian)\n",
"\n",
"print(\"Random values from the normal distribution\")\n",
"a = np.random.normal(loc=0, scale=10, size=(3, 2))\n",
"print(a)\n",
"print()\n",
"a = torch.normal(mean=0, std=10, size=(3, 2))\n",
"print(a)"
]
},
{
"cell_type": "markdown",
"id": "aggressive-titanium",
"metadata": {},
"source": [
"## Slicing PyTorch tensors"
]
},
{
"cell_type": "markdown",
"id": "former-richardson",
"metadata": {},
"source": [
"### Slicing in 1D\n",
"\n",
"To obtain only specific values from a PyTorch tensor one can use so called slicing. It has the form\n",
"\n",
"**arr[low:high:step]**\n",
"\n",
"where low is the lowest index to be retrieved, high is the lowest index not to be retrieved and step indicates that every step element will be taken."
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "desirable-documentary",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Original: tensor([ 0, 1, 4, 9, 16, 25, 36, 49, 64, 81])\n",
"First 5 elements: tensor([ 0, 1, 4, 9, 16])\n",
"Elements from index 3 to index 5: tensor([ 9, 16, 25])\n",
"Last 3 elements (negative indexing): tensor([49, 64, 81])\n",
"Every second element: tensor([ 0, 4, 16, 36, 64])\n",
"Negative step a[::-1] to obtain reverse order does not work for tensors\n"
]
}
],
"source": [
"a = torch.tensor([i**2 for i in range(10)])\n",
"\n",
"print(\"Original: \", a)\n",
"print(\"First 5 elements:\", a[:5])\n",
"print(\"Elements from index 3 to index 5:\", a[3:6])\n",
"print(\"Last 3 elements (negative indexing):\", a[-3:])\n",
"print(\"Every second element:\", a[::2])\n",
"\n",
"print(\"Negative step a[::-1] to obtain reverse order does not work for tensors\")"
]
},
{
"cell_type": "markdown",
"id": "micro-explosion",
"metadata": {},
"source": [
"### Slicing in 2D\n",
"\n",
"In two dimensions it works similarly, just the slicing is separate for every dimension."
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "disciplinary-think",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Original: \n",
"tensor([[ 0, 1, 2, 3, 4],\n",
" [ 5, 6, 7, 8, 9],\n",
" [10, 11, 12, 13, 14],\n",
" [15, 16, 17, 18, 19],\n",
" [20, 21, 22, 23, 24]])\n",
"\n",
"First 2 elements of the first 3 row:\n",
"tensor([[ 0, 1],\n",
" [ 5, 6],\n",
" [10, 11]])\n",
"\n",
"Middle 3 elements from the middle 3 rows:\n",
"tensor([[ 6, 7, 8],\n",
" [11, 12, 13],\n",
" [16, 17, 18]])\n",
"\n",
"Bottom-right 3 by 3 submatrix (negative indexing):\n",
"tensor([[12, 13, 14],\n",
" [17, 18, 19],\n",
" [22, 23, 24]])\n"
]
}
],
"source": [
"a = torch.tensor([i for i in range(25)]).reshape(5, 5)\n",
"\n",
"print(\"Original: \")\n",
"print(a)\n",
"print()\n",
"print(\"First 2 elements of the first 3 row:\")\n",
"print(a[:3, :2])\n",
"print()\n",
"print(\"Middle 3 elements from the middle 3 rows:\")\n",
"print(a[1:4, 1:4])\n",
"print()\n",
"print(\"Bottom-right 3 by 3 submatrix (negative indexing):\")\n",
"print(a[-3:, -3:])"
]
},
{
"cell_type": "markdown",
"id": "removable-canyon",
"metadata": {},
"source": [
"### Setting PyTorch tensor field values"
]
},
{
"cell_type": "code",
"execution_count": 9,
"id": "senior-serbia",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Original: \n",
"tensor([[ 0, 1, 2, 3, 4],\n",
" [ 5, 6, 7, 8, 9],\n",
" [10, 11, 12, 13, 14],\n",
" [15, 16, 17, 18, 19],\n",
" [20, 21, 22, 23, 24]])\n",
"\n",
"Middle values changed to 5\n",
"tensor([[ 0, 1, 2, 3, 4],\n",
" [ 5, 5, 5, 5, 9],\n",
" [10, 5, 5, 5, 14],\n",
" [15, 5, 5, 5, 19],\n",
" [20, 21, 22, 23, 24]])\n",
"\n",
"Second matrix\n",
"tensor([[ 0, 0, 2],\n",
" [ 6, 12, 20],\n",
" [30, 42, 56]])\n",
"\n",
"Second matrix substituted into the middle of the first matrix\n",
"tensor([[ 0, 1, 2, 3, 4],\n",
" [ 5, 0, 0, 2, 9],\n",
" [10, 6, 12, 20, 14],\n",
" [15, 30, 42, 56, 19],\n",
" [20, 21, 22, 23, 24]])\n"
]
}
],
"source": [
"a = torch.tensor([i for i in range(25)]).reshape(5, 5)\n",
"\n",
"print(\"Original: \")\n",
"print(a)\n",
"print()\n",
"\n",
"a[1:4, 1:4] = 5.0\n",
"\n",
"print(\"Middle values changed to 5\")\n",
"print(a)\n",
"print()\n",
"\n",
"b = torch.tensor([i**2 - i for i in range(9)]).reshape(3, 3)\n",
"\n",
"print(\"Second matrix\")\n",
"print(b)\n",
"print()\n",
"\n",
"a[1:4, 1:4] = b\n",
"\n",
"print(\"Second matrix substituted into the middle of the first matrix\")\n",
"print(a)"
]
},
{
"cell_type": "markdown",
"id": "federal-wayne",
"metadata": {},
"source": [
"## Operations on PyTorch tensors\n",
"\n",
"It is important to remember that arithmetic operations on PyTorch tensors are always element-wise."
]
},
{
"cell_type": "code",
"execution_count": 10,
"id": "southwest-biotechnology",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"tensor([[ 0, 1, 4],\n",
" [ 9, 16, 25],\n",
" [36, 49, 64]])\n",
"\n",
"tensor([[0.0000, 1.0000, 1.4142],\n",
" [1.7321, 2.0000, 2.2361],\n",
" [2.4495, 2.6458, 2.8284]])\n",
"\n"
]
}
],
"source": [
"a = torch.tensor([i**2 for i in range(9)]).reshape((3, 3))\n",
"print(a)\n",
"print()\n",
"\n",
"b = torch.tensor([i**0.5 for i in range(9)]).reshape((3, 3))\n",
"print(b)\n",
"print()"
]
},
{
"cell_type": "markdown",
"id": "intensive-gates",
"metadata": {},
"source": [
"### Element-wise sum"
]
},
{
"cell_type": "code",
"execution_count": 11,
"id": "behavioral-safety",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"tensor([[ 0.0000, 2.0000, 5.4142],\n",
" [10.7321, 18.0000, 27.2361],\n",
" [38.4495, 51.6458, 66.8284]])\n"
]
}
],
"source": [
"print(a + b)"
]
},
{
"cell_type": "markdown",
"id": "occupied-trial",
"metadata": {},
"source": [
"### Element-wise multiplication"
]
},
{
"cell_type": "code",
"execution_count": 12,
"id": "charming-pleasure",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"tensor([[ 0.0000, 1.0000, 5.6569],\n",
" [ 15.5885, 32.0000, 55.9017],\n",
" [ 88.1816, 129.6418, 181.0193]])\n"
]
}
],
"source": [
"print(a * b)"
]
},
{
"cell_type": "markdown",
"id": "efficient-league",
"metadata": {},
"source": [
"### Matrix multiplication"
]
},
{
"cell_type": "code",
"execution_count": 13,
"id": "changing-community",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"tensor([[ 11.5300, 12.5830, 13.5498],\n",
" [ 88.9501, 107.1438, 119.2157],\n",
" [241.6378, 303.3281, 341.4984]], dtype=torch.float64)\n",
"\n",
"tensor([[ 0., 1., 4.],\n",
" [ 9., 16., 25.],\n",
" [36., 49., 64.]])\n"
]
}
],
"source": [
"print(np.matmul(a, b))\n",
"print()\n",
"\n",
"# Multiplication by the identity matrix (to check it works as expected)\n",
"id_matrix = torch.tensor(\n",
" [[1.0, 0.0, 0.0], \n",
" [0.0, 1.0, 0.0], \n",
" [0.0, 0.0, 1.0]]\n",
")\n",
"\n",
"# Tensor a contained integers (type Long by default) and must be changed to the float type\n",
"a = a.type(torch.FloatTensor)\n",
"\n",
"print(torch.matmul(id_matrix, a))"
]
},
{
"cell_type": "markdown",
"id": "assisted-communications",
"metadata": {},
"source": [
"### Calculating the mean"
]
},
{
"cell_type": "code",
"execution_count": 14,
"id": "defensive-wrong",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"tensor([3, 8, 7, 2, 6])\n",
"\n",
"Mean: tensor(5.2000)\n",
"\n",
"Mean: 5.199999809265137\n"
]
}
],
"source": [
"a = torch.randint(low=0, high=10, size=(5,))\n",
"\n",
"print(a)\n",
"print()\n",
"\n",
"print(\"Mean: \", torch.sum(a) / len(a))\n",
"print()\n",
"\n",
"# To get a single value use tensor.item()\n",
"\n",
"print(\"Mean: \", (torch.sum(a) / len(a)).item())"
]
},
{
"cell_type": "markdown",
"id": "complex-karma",
"metadata": {},
"source": [
"### Calculating the mean of every row"
]
},
{
"cell_type": "code",
"execution_count": 15,
"id": "correct-dietary",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"tensor([[1, 6, 8],\n",
" [6, 4, 8],\n",
" [1, 5, 8],\n",
" [2, 5, 7],\n",
" [1, 0, 4]])\n",
"\n",
"Mean: tensor([5.0000, 6.0000, 4.6667, 4.6667, 1.6667])\n",
"Mean in the original matrix form:\n",
"tensor([[5.0000],\n",
" [6.0000],\n",
" [4.6667],\n",
" [4.6667],\n",
" [1.6667]])\n"
]
}
],
"source": [
"a = torch.randint(low=0, high=10, size=(5, 3))\n",
"\n",
"print(a)\n",
"print()\n",
"\n",
"print(\"Mean:\", torch.sum(a, axis=1) / a.shape[1])\n",
"\n",
"print(\"Mean in the original matrix form:\")\n",
"print((torch.sum(a, axis=1) / a.shape[1]).reshape(-1, 1)) # -1 calculates the right size to use all elements"
]
},
{
"cell_type": "markdown",
"id": "indian-orlando",
"metadata": {},
"source": [
"### More complex operations\n",
"\n",
"Note that more complex tensor operations can only be performed on tensors. Numpy operations can be performed on numpy arrays but also directly on lists."
]
},
{
"cell_type": "code",
"execution_count": 16,
"id": "presidential-cologne",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Vector to power 2 (element-wise)\n",
"tensor([1., 4., 9.])\n",
"\n",
"Euler number to the power a (element-wise)\n",
"tensor([ 2.7183, 7.3891, 20.0855])\n",
"\n",
"An even more complex expression\n",
"tensor([0.6197, 1.8982, 4.8476])\n"
]
}
],
"source": [
"a = torch.tensor([1.0, 2.0, 3.0])\n",
"\n",
"print(\"Vector to power 2 (element-wise)\")\n",
"print(torch.pow(a, 2))\n",
"print()\n",
"print(\"Euler number to the power a (element-wise)\")\n",
"print(torch.exp(a))\n",
"print()\n",
"print(\"An even more complex expression\")\n",
"print((torch.pow(a, 2) + torch.exp(a)) / torch.sum(a))"
]
},
{
"cell_type": "markdown",
"id": "hearing-street",
"metadata": {},
"source": [
"## PyTorch basic operations tasks"
]
},
{
"cell_type": "markdown",
"id": "regular-niger",
"metadata": {},
"source": [
"**Task 1.** Calculate the sigmoid (logistic) function on every element of the following array [0.3, 1.2, -1.4, 0.2, -0.1, 0.1, 0.8, -0.25] and print the last 5 elements. Use only tensor operations."
]
},
{
"cell_type": "code",
"execution_count": 17,
"id": "agreed-single",
"metadata": {},
"outputs": [],
"source": [
"# Write your code here"
]
},
{
"cell_type": "markdown",
"id": "another-catch",
"metadata": {},
"source": [
"**Task 2.** Calculate the dot product of the following two vectors:
\n",
"$x = [3, 1, 4, 2, 6, 1, 4, 8]$
\n",
"$y = [5, 2, 3, 12, 2, 4, 17, 9]$
\n",
"a) by using element-wise mutliplication and torch.sum,
\n",
"b) by using torch.dot,
\n",
"b) by using torch.matmul and transposition (x.T)."
]
},
{
"cell_type": "code",
"execution_count": 18,
"id": "forbidden-journalism",
"metadata": {},
"outputs": [],
"source": [
"# Write your code here"
]
},
{
"cell_type": "markdown",
"id": "acute-amber",
"metadata": {},
"source": [
"**Task 3.** Calculate the following expression
\n",
"$$\\frac{1}{1 + e^{-x_0 \\theta_0 - \\ldots - x_9 \\theta_9 - \\theta_{10}}}$$\n",
"for
\n",
"$x = [1.2, 2.3, 3.4, -0.7, 4.2, 2.7, -0.5, 1.4, -3.3, 0.2]$
\n",
"$\\theta = [1.7, 0.33, -2.12, -1.73, 2.9, -5.8, -0.9, 12.11, 3.43, -0.5, -1.65]$
\n",
"and print the result. Use only tensor operations."
]
},
{
"cell_type": "code",
"execution_count": 19,
"id": "falling-holder",
"metadata": {},
"outputs": [],
"source": [
"# Write your code here"
]
},
{
"cell_type": "markdown",
"id": "latter-vector",
"metadata": {},
"source": [
"# Tensor gradients\n",
"\n",
"Tensors are designed to be used in neural networks. Their most important functionality is automatic gradient and backward propagation calculation."
]
},
{
"cell_type": "code",
"execution_count": 20,
"id": "guided-interface",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"out=35.0\n",
"\n",
"gradient\n",
"tensor([[12., 3.],\n",
" [27., 3.]])\n"
]
}
],
"source": [
"x = torch.tensor([[2., -1.], [3., 1.]], requires_grad=True)\n",
"out = x.pow(3).sum() # the actual derivative is 3*x^2\n",
"print(\"out={}\".format(out))\n",
"print()\n",
"\n",
"out.backward()\n",
"print(\"gradient\")\n",
"print(x.grad)"
]
},
{
"cell_type": "code",
"execution_count": 21,
"id": "nuclear-gothic",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"tensor([[ 4., 2., -1.]])\n",
"tensor([[ 2., -1., 3.]])\n",
"tensor([[ 0.1807, 0.0904, -0.0452]])\n",
"tensor([[ 0.0904, -0.0452, 0.1355]])\n"
]
}
],
"source": [
"x = torch.tensor([[2., -1., 3.]], requires_grad=True)\n",
"y = torch.tensor([[4., 2., -1.]], requires_grad=True)\n",
"\n",
"z = torch.sum(x * y)\n",
"\n",
"z.backward()\n",
"print(x.grad)\n",
"print(y.grad)\n",
"\n",
"x.grad.data.zero_()\n",
"y.grad.data.zero_()\n",
"\n",
"z = torch.sigmoid(torch.sum(x * y))\n",
"\n",
"z.backward()\n",
"print(x.grad)\n",
"print(y.grad)"
]
},
{
"cell_type": "markdown",
"id": "innovative-provider",
"metadata": {},
"source": [
"# Backpropagation\n",
"\n",
"In this section we train weights $w$ of a simple model $y = \\text{sigmoid}(w * x)$ to obtain $y = 0.65$ on $x = [2.0, -1.0, 3.0]$."
]
},
{
"cell_type": "code",
"execution_count": 22,
"id": "supposed-sellers",
"metadata": {
"scrolled": false
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"x\n",
"tensor([ 2., -1., 3.])\n",
"x.grad\n",
"None\n",
"w\n",
"tensor([ 4., 2., -1.], requires_grad=True)\n",
"w.grad\n",
"None\n",
"\n",
"\n",
"w\n",
"tensor([ 3.9945, 2.0027, -1.0082], requires_grad=True)\n",
"w.grad\n",
"tensor([ 0.0547, -0.0273, 0.0820])\n",
"y\n",
"tensor(0.9526, grad_fn=)\n",
"loss\n",
"tensor(0.0916, grad_fn=)\n",
"\n",
"\n",
"w\n",
"tensor([ 3.9889, 2.0055, -1.0166], requires_grad=True)\n",
"w.grad\n",
"tensor([ 0.0563, -0.0281, 0.0844])\n",
"y\n",
"tensor(0.9508, grad_fn=)\n",
"loss\n",
"tensor(0.0905, grad_fn=)\n",
"\n",
"\n",
"w\n",
"tensor([ 3.9831, 2.0084, -1.0253], requires_grad=True)\n",
"w.grad\n",
"tensor([ 0.0579, -0.0290, 0.0869])\n",
"y\n",
"tensor(0.9489, grad_fn=)\n",
"loss\n",
"tensor(0.0894, grad_fn=)\n",
"\n",
"\n",
"w\n",
"tensor([ 3.6599, 2.1701, -1.5102], requires_grad=True)\n",
"w.grad\n",
"tensor([ 6.1291e-06, -3.0645e-06, 9.1936e-06])\n",
"y\n",
"tensor(0.6500, grad_fn=)\n",
"loss\n",
"tensor(4.5365e-11, grad_fn=)\n",
"\n",
"\n",
"w\n",
"tensor([ 3.6599, 2.1701, -1.5102], requires_grad=True)\n",
"w.grad\n",
"tensor([ 5.0985e-06, -2.5493e-06, 7.6478e-06])\n",
"y\n",
"tensor(0.6500, grad_fn=)\n",
"loss\n",
"tensor(3.1392e-11, grad_fn=)\n",
"\n",
"\n",
"w\n",
"tensor([ 3.6599, 2.1701, -1.5102], requires_grad=True)\n",
"w.grad\n",
"tensor([ 4.4477e-06, -2.2238e-06, 6.6715e-06])\n",
"y\n",
"tensor(0.6500, grad_fn=)\n",
"loss\n",
"tensor(2.3888e-11, grad_fn=)\n",
"\n",
"\n"
]
},
{
"data": {
"image/png": "iVBORw0KGgoAAAANSUhEUgAAAYgAAAEWCAYAAAB8LwAVAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjMuNCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8QVMy6AAAACXBIWXMAAAsTAAALEwEAmpwYAAAkGklEQVR4nO3deXhV5bn+8e+TGQiEKUwZCKOIgIoBxFr1J9aCtVILKuBArRbbU22t9LTYns7n9KenA7Z1ONpSiyNYrRa1dZ4HhgCiDIIRZJ6nAIGEJM/5Yy88Md1IgOysPdyf6+JiDe/e+1nXgtxZ77vXu8zdERERaSgt7AJERCQ+KSBERCQqBYSIiESlgBARkagUECIiEpUCQkREolJAiByGmf3TzCY2ddujrOEcM1vX1O8r0hgZYRcg0pTMbG+91ZZAFVAbrF/n7g829r3cfVQs2ookCgWEJBV3zz20bGYfAde6+wsN25lZhrvXNGdtIolGXUySEg511ZjZ981sE3CvmbUzs6fMbKuZ7QyWC+u95hUzuzZY/oqZvWFmvw7arjKzUcfYtoeZvWZme8zsBTO7w8weaORxnBh81i4zW2JmF9Xbd4GZLQ3ed72ZfTfY3jE4tl1mtsPMXjcz/d+XI9I/EkklXYD2QHdgEpF///cG68XAfuD2T3n9MGA50BH4b2CamdkxtH0ImAt0AH4KXNmY4s0sE3gSeA7oBNwAPGhmJwRNphHpRmsNDABeCrZPBtYB+UBn4AeA5tiRI1JASCqpA37i7lXuvt/dt7v7Y+5e6e57gP8Czv6U16929z+6ey0wHehK5Aduo9uaWTEwBPixu1e7+xvArEbWfzqQC9wSvPYl4ClgfLD/INDfzNq4+053X1Bve1egu7sfdPfXXZOwSSMoICSVbHX3A4dWzKylmd1tZqvNrAJ4DWhrZumHef2mQwvuXhks5h5l227AjnrbANY2sv5uwFp3r6u3bTVQECyPAS4AVpvZq2Y2PNj+K6AceM7MVprZlEZ+nqQ4BYSkkoa/NU8GTgCGuXsb4Kxg++G6jZrCRqC9mbWst62oka/dABQ1GD8oBtYDuPs8dx9NpPvpCeCRYPsed5/s7j2Bi4CbzGzE8R2GpAIFhKSy1kTGHXaZWXvgJ7H+QHdfDZQBPzWzrOC3/C828uVzgErge2aWaWbnBK+dEbzX5WaW5+4HgQoiXWqY2YVm1jsYA9lN5Gu/dVE/QaQeBYSkstuAFsA2YDbwTDN97uXAcGA78J/ATCL3a3wqd68mEgijiNR8J3CVu78fNLkS+CjoLvt68DkAfYAXgL3A28Cd7v5ykx2NJC3TWJVIuMxsJvC+u8f8CkbkaOgKQqSZmdkQM+tlZmlmNhIYTWTMQCSu6E5qkebXBfgbkfsg1gHfcPeF4ZYk8q/UxSQiIlGpi0lERKJKmi6mjh07eklJSdhliIgklPnz529z9/xo+5ImIEpKSigrKwu7DBGRhGJmqw+3T11MIiISlQJCRESiUkCIiEhUCggREYlKASEiIlEpIEREJCoFhIiIRJXyAXHgYC0/nbWEbXuPONuyiEhKSfmAWLR2Fw/NXcMFv3ud2Su3h12OiEjcSPmAGNazA0/822fIzc5gwh9n8/sXP6C2ThMYioikfEAA9O/Whlk3nMlFJ3fjt8+v4Kt/mceuyuqwyxIRCZUCIpCbncHUy07hlxcP5K0Pt3HR7W+ydENF2GWJiIRGAVGPmTFhWDEzrxtOdU0dX77rTZ5ctCHsskREQqGAiGJwcTuevOFMBhbkccPDC5n6/Ar0YCURSTUKiMPIb53NA9cOY+xphfzuxQ+4/uGF7K+uDbssEZFmkzTPg4iF7Ix0fjV2EH065XLLM++zYdd+pk0cQvtWWWGXJiISc7qCOAIz47qze3HX5aexdEMFY+96i7U7KsMuS0Qk5hQQjTRyQBcevHYY2/dVc/Gdb7F4/e6wSxIRiSkFxFEoLWnPY984g+yMNMbfM5u5q3aEXZKISMwoII5S7065PPqN4eS3yeaqP8/h1RVbwy5JRCQmFBDHoGteCx65bjg9O+Zy7fR5PLN4U9gliYg0OQXEMeqYm83Dk05nYEEe1z+0QCEhIklHAXEc8lpkMv2rQxlUqJAQkeSjgDhOrXMiITEwCIlnlygkRCQ5KCCawKGQGFCQxw0PLeT1DzRwLSKJTwHRRNrkZDL96qH0zG/FpPvmM3/1zrBLEhE5LgqIJpTXMpP7rxlG5zbZXH3vXE0XLiIJTQHRxA5N8tcqO4Or/jxX03KISMJSQMRAYbuW3H/NUKpravnKvXP1dDoRSUgKiBjp3ak1f7yqlLU79vO1+8o4cFBThYtIYolpQJjZSDNbbmblZjYlyv5sM5sZ7J9jZiXB9kwzm25m75nZMjO7OZZ1xsqwnh349aUnM++jnUz+6yLq6vTQIRFJHDELCDNLB+4ARgH9gfFm1r9Bs2uAne7eG5gK3BpsvwTIdveBwGnAdYfCI9FcdHI3pozqx9PvbuT3L30QdjkiIo0WyyuIoUC5u69092pgBjC6QZvRwPRg+VFghJkZ4EArM8sAWgDVQMJ+Jei6s3oyZnAht73wAU+/uzHsckREGiWWAVEArK23vi7YFrWNu9cAu4EORMJiH7ARWAP82t3/ZW5tM5tkZmVmVrZ1a/zenGZm/PLLAxhc3JbJf31Hz5IQkYQQr4PUQ4FaoBvQA5hsZj0bNnL3e9y91N1L8/Pzm7vGo5Kdkc7dV5bSvmUWX7uvjO17q8IuSUTkU8UyINYDRfXWC4NtUdsE3Ul5wHZgAvCMux909y3Am0BpDGttFvmts7nnqlJ27KvmWzMWUqtBaxGJY7EMiHlAHzPrYWZZwDhgVoM2s4CJwfJY4CV3dyLdSucCmFkr4HTg/RjW2mwGFOTxiy8N4M3y7fzmueVhlyMiclgxC4hgTOF64FlgGfCIuy8xs5+b2UVBs2lABzMrB24CDn0V9g4g18yWEAmae9393VjV2twuLS1i/NAi7nzlQ57T7K8iEqcs8gt74istLfWysrKwy2i0AwdrueR/3uaj7fv4x7c+S1H7lmGXJCIpyMzmu3vULvx4HaROejmZ6dx5+WBw+NaMhRysrQu7JBGRT1BAhKiofUv+/5iBLFyzi6nPrwi7HBGRT1BAhOzCQd0YN6SIu179kDfLt4VdjojIxxQQceDHX+xPz46t+M7Md9ixTzO/ikh8UEDEgZZZGfxh/GB2VlbzH0+8R7J8cUBEEpsCIk7079aG73yuL/94bxOzFm0IuxwREQVEPLnurF4MLm7Lj55YzKbdB8IuR0RSnAIijqSnGb+59BQO1jrfe+xddTWJSKgUEHGmR8dW3HxBP15bsZW/lq0LuxwRSWEKiDh0xbDuDO3Rnv98eilbKtTVJCLhUEDEobQ045YvD+RATR0/mbUk7HJEJEUpIOJUz/xcvj2iD/9cvIlnFmtCPxFpfgqIODbprJ6c2LUNP/77YnbvPxh2OSKSYhQQcSwzPY1bxwxk294qfqtnR4hIM1NAxLlBhW254vTu3D97tZ5lLSLNSgGRACaffwLtWmbxo78vpk6PKRWRZqKASAB5LTK5+YITWbhmF3+dvzbsckQkRSggEsSYwQUMKWnHLf98n52a8VVEmoECIkGYGb/40gB27z/IbS/o4UIiEnsKiATSr0sbJgwr5oE5a/hg856wyxGRJKeASDDfOa8vLbPS+c+nl4VdiogkOQVEgumQm823R/Th1RVbeXn5lrDLEZEkpoBIQFcNL6GkQ0v+6+llHKytC7scEUlSCogElJWRxg+/0J/yLXt5eO6asMsRkSSlgEhQ553YiaE92vP7Fz9gX1VN2OWISBJSQCQoM2PKqH5s21vNtDdWhV2OiCQhBUQCG1zcjs+f1Jm7X/2Q7Xurwi5HRJKMAiLB/fvnT2D/wVpuf7k87FJEJMkoIBJc706tubS0iAdmr2btjsqwyxGRJKKASAI3nteXNDNue+GDsEsRkSSigEgCXfJyuOL07jy+cB2rtu0LuxwRSRIKiCTx9bN7kZWRxu9f1FWEiDQNBUSSyG+dzcThJfz9nfWUb9FEfiJy/BQQSWTSWT3JyUzndy/qG00icvxiGhBmNtLMlptZuZlNibI/28xmBvvnmFlJvX2DzOxtM1tiZu+ZWU4sa00GHXKz+coZJTz17gZWaDpwETlOMQsIM0sH7gBGAf2B8WbWv0Gza4Cd7t4bmArcGrw2A3gA+Lq7nwScAxyMVa3J5Guf7UmrrAyNRYjIcYvlFcRQoNzdV7p7NTADGN2gzWhgerD8KDDCzAw4H3jX3RcBuPt2d6+NYa1Jo12rLK4c3p2n39vIyq17wy5HRBJYLAOiAFhbb31dsC1qG3evAXYDHYC+gJvZs2a2wMy+F+0DzGySmZWZWdnWrVub/AAS1TVn9iA7I427Xvkw7FJEJIHF6yB1BnAmcHnw98VmNqJhI3e/x91L3b00Pz+/uWuMWx1zsxk3pJjHF65n3U7dXS0ixyaWAbEeKKq3Xhhsi9omGHfIA7YTudp4zd23uXsl8A9gcAxrTTrXnd0TM7j71ZVhlyIiCSqWATEP6GNmPcwsCxgHzGrQZhYwMVgeC7zk7g48Cww0s5ZBcJwNLI1hrUmna14LxgwuZGbZWrZUHAi7HBFJQDELiGBM4XoiP+yXAY+4+xIz+7mZXRQ0mwZ0MLNy4CZgSvDancBviYTMO8ACd386VrUmq6+f3Yua2jr+pOdFiMgxsMgv7ImvtLTUy8rKwi4j7tzw8EJefn8Lb918Lm1yMsMuR0TijJnNd/fSaPvidZBamsh1Z/Vkb1UND8/Rs6tF5OgoIJLcgII8zujVgXvf/IjqmrqwyxGRBKKASAGTzurJpooDzFq0IexSRCSBKCBSwNl98+nXpTV/fG0lyTLmJCKxp4BIAWbGpLN6snzzHl5ZoTvORaRxFBAp4osnd6NrXg736MY5EWkkBUSKyExPY+IZJby9cjvLNlaEXY6IJAAFRAoZN6SIFpnp3PumbpwTkSNTQKSQti2zGHNaAU+8s4Fte6vCLkdE4pwCIsV85YweVNfU8ZBunBORI1BApJjenXI5u28+989eTVWNnsEkIoengEhBXz2zB1v3VPH0uxvDLkVE4pgCIgWd1acjvTvlcu+bH+nGORE5LAVECjIzJg7vznvrd/PO2l1hlyMicUoBkaIuHlxIbnYG9729OuxSRCROKSBSVG52BmMGF/D0uxv1lVcRiUoBkcKuHF5CdW0dM+etDbsUEYlDCogU1rtTLmf27sgDs1dTU6tnRYjIJykgUtyVw7uzcfcBXli2JexSRCTONCogzOzbZtbGIqaZ2QIzOz/WxUnsjejXiYK2Lbjv7Y/CLkVE4kxjryC+6u4VwPlAO+BK4JaYVSXNJiM9jQnDinnrw+18uHVv2OWISBxpbEBY8PcFwP3uvqTeNklwl5YWkZFmmp9JRD6hsQEx38yeIxIQz5pZa0Cjmkkiv3U2nx/QhUfnr+PAQc3PJCIRjQ2Ia4ApwBB3rwQygatjVpU0u8uHFbN7/0HNzyQiH2tsQAwHlrv7LjO7AvgPYHfsypLmNrxnB3rmt+KBObqzWkQiGhsQdwGVZnYyMBn4ELgvZlVJszMzLh/WnYVrdrFkg7JfRBofEDUemfZzNHC7u98BtI5dWRKGMYMLyM5I02C1iACND4g9ZnYzka+3Pm1maUTGISSJtG2ZxYWDuvHEwvXsq6oJuxwRCVljA+IyoIrI/RCbgELgVzGrSkIzYVgx+6preXLRhrBLEZGQNSogglB4EMgzswuBA+6uMYgkNLi4LX075/LwXHUziaS6xk61cSkwF7gEuBSYY2ZjY1mYhMPMGD+0mEXrdrN4vQarRVJZY7uYfkjkHoiJ7n4VMBT4UezKkjBdfGpksHrGPF1FiKSyxgZEmrvXn+5z+1G8VhJM25ZZXDCwK39fuIHKag1Wi6Sqxv6Qf8bMnjWzr5jZV4CngX/EriwJ2/ihxeypquEp3VktkrIaO0j978A9wKDgzz3u/v0jvc7MRprZcjMrN7MpUfZnm9nMYP8cMytpsL/YzPaa2XcbdTTSZIaUtKNXfisNVouksEZ3E7n7Y+5+U/Dn8SO1N7N04A5gFNAfGG9m/Rs0uwbY6e69ganArQ32/xb4Z2NrlKZzaLB64ZpdLN+0J+xyRCQEnxoQZrbHzCqi/NljZhVHeO+hQLm7r3T3amAGkTux6xsNTA+WHwVGmJkFn/0lYBWw5CiPSZrIxacWkJluGqwWSVGfGhDu3trd20T509rd2xzhvQuAtfXW1wXborZx9xoiEwB2MLNc4PvAz47mYKRpdcjN5vyTuvD4wvWaBlwkBcXrN5F+Ckx19099xJmZTTKzMjMr27p1a/NUlmLGDSliV+VBnlu6OexSRKSZxTIg1gNF9dYLg21R25hZBpBH5Cu0w4D/NrOPgBuBH5jZ9Q0/wN3vcfdSdy/Nz89v8gMQ+EyvjhS0bcFMdTOJpJxYBsQ8oI+Z9TCzLGAcMKtBm1nAxGB5LPCSR3zW3UvcvQS4Dfilu98ew1rlMNLSjMuGFPFm+XbWbK8MuxwRaUYxC4hgTOF64FlgGfCIuy8xs5+b2UVBs2lExhzKgZuIPLVO4swlpYWkGcws01WESCqxyGMeEl9paamXlZWFXUbS+upf5rF4/W7emnIuGenxOnQlIkfLzOa7e2m0ffqfLo1y2ZAituyp4pXl+jKASKpQQEijnNuvEx1zs5kxb+2RG4tIUlBASKNkpqcx9rRCXl6+hS0VB8IuR0SagQJCGu2yIUXU1jmPLlgXdiki0gwUENJoPTq2YmiP9syct5Zk+XKDiByeAkKOyrghRazeXsnslTvCLkVEYkwBIUdl1ICutM7J0J3VIilAASFHpUVWOl86pYB/LN7E7sqDYZcjIjGkgJCjdtmQIqpr6nh8oQarRZKZAkKO2oCCPAYUtGGGBqtFkpoCQo7JuCHFvL9pD4vW7Q67FBGJEQWEHJPRp3SjRWY6M/TMapGkpYCQY9I6J5MLB3Vl1qIN7K2qCbscEYkBBYQcs3FDi6msruXJRRvCLkVEYkABIcdscHFb+nbO1QR+IklKASHHzMy4bEgxi9buYumGirDLEZEmpoCQ4zJmcAFZGWk8rMFqkaSjgJDj0rZlFl8Y2JUnFq6nslqD1SLJRAEhx23CsGL2VNVosFokySgg5LiVdm9H3865PDRH3UwiyUQBIcfNzJgwtJhF63azeL3urBZJFgoIaRIXDy4kJzONB3UVIZI0FBDSJPJaZHLhoG7Meme97qwWSRIKCGkyE4YVs6+6lscXrg+7FBFpAgoIaTKnFrVlQEEb7n/7I00DLpIEFBDSZMyMq04vYcXmvcxZpWdWiyQ6BYQ0qS+e3I28Fpnc//bqsEsRkeOkgJAm1SIrnUtLC3l2ySY2VxwIuxwROQ4KCGlyV5zenVp33TgnkuAUENLkundoxTl983lo7hqqa+rCLkdEjpECQmLiquElbN1TxTNLNoVdiogcIwWExMTZffPp0bEV095Ypa+8iiQoBYTERFqacfVnSli0dhcL1uwMuxwROQYKCImZMYMLaZOTwbQ3VoVdiogcAwWExEyr7AwmDOvOM4s3sXZHZdjliMhRimlAmNlIM1tuZuVmNiXK/mwzmxnsn2NmJcH2z5nZfDN7L/j73FjWKbEz8YzupJkx/a2Pwi5FRI5SzALCzNKBO4BRQH9gvJn1b9DsGmCnu/cGpgK3Btu3AV9094HAROD+WNUpsdU1rwUXDOzKjHlr2XPgYNjliMhRiOUVxFCg3N1Xuns1MAMY3aDNaGB6sPwoMMLMzN0Xuvuh51cuAVqYWXYMa5UYuvazPdhbVcPMeWvDLkVEjkIsA6IAqP8TYV2wLWobd68BdgMdGrQZAyxw96qGH2Bmk8yszMzKtm7d2mSFS9MaVNiW03u254+vr6SqpjbsckSkkeJ6kNrMTiLS7XRdtP3ufo+7l7p7aX5+fvMWJ0flm/+vN5srqnh8gZ4VIZIoYhkQ64GieuuFwbaobcwsA8gDtgfrhcDjwFXu/mEM65RmcGbvjgwqzOOuVz+kplbTb4gkglgGxDygj5n1MLMsYBwwq0GbWUQGoQHGAi+5u5tZW+BpYIq7vxnDGqWZmBn/dk5vVm+v5B+LNf2GSCKIWUAEYwrXA88Cy4BH3H2Jmf3czC4Kmk0DOphZOXATcOirsNcDvYEfm9k7wZ9OsapVmsf5/TvTu1Mud75cruk3RBKAJct/1NLSUi8rKwu7DDmCx+avY/JfF/Gnq0o5r3/nsMsRSXlmNt/dS6Pti+tBakk+F53SjaL2LbjtxRW6ihCJcwoIaVaZ6Wl8e0RfFq+v4BmNRYjENQWENLuLTy2gd6dcfvP8CmrrdBUhEq8UENLs0tOMyZ/rS/mWvTyxUPdFiMQrBYSEYuSALgwoaMPUF1bosaQicUoBIaEwM757/gms27mfGfPWhF2OiEShgJDQnN03n2E92jP1+RXsrtRMryLxRgEhoTEzfvLFk9i9/yBTX1gRdjki0oACQkLVv1sbxg8t5v7Zq1mxeU/Y5YhIPQoICd3k80+gVVY6P3tyiW6eE4kjCggJXftWWdz0ub68Wb6dZ5dsDrscEQkoICQuXHF6d/p1ac3PnlxChR5NKhIXFBASFzLS07h1zCA2Vxzgl08vC7scEUEBIXHk5KK2TDqrFzPmreW1FXqErEjYFBASV248rw+98lsx5bF32aOuJpFQKSAkruRkpvOrS05mU8UBfvHU0rDLEUlpCgiJO4OL2/H1s3vxSNk6Hpu/LuxyRFKWAkLi0k2f68uwHu354RPv8f6mirDLEUlJCgiJSxnpafxhwqm0zsnkGw8s0HiESAgUEBK3OrXO4fbxp7JmRyWTH1mkhwuJNDMFhMS1YT078MMLTuS5pZs1FYdIM8sIuwCRI/nqmT3YXHGAu19bSX5uNjeM6BN2SSIpQQEhCeH7I/uxdW8Vv3l+BR1ys5kwrDjskkSSngJCEkJamnHrmEHs3FfNDx5/j6qaWq7+TI+wyxJJahqDkISRmZ7GXVecxudP6szPnlzKb55brjEJkRhSQEhCyclM544Jg7mstIg/vFTODx5/j+qaurDLEklK6mKShJORnsYtYwbSITeLO1/5kKUb93DHhFMpbNcy7NJEkoquICQhmRnfG9mPuy4fzMote/nC79/g+aV62JBIU1JASEIbNbArT33rTArbteBr95Vx/UML2FxxIOyyRJKCAkISXvcOrXjsG2dw43l9eG7pZkb85lWmvbGKqprasEsTSWgKCEkKOZnp3HheX5678SwGd2/HL55aytn//Qr3vrmKAwcVFCLHwpLla4KlpaVeVlYWdhkSB9ydN8q38YeXypm7agcdc7MYe1oRl5QW0is/N+zyROKKmc1399Ko+xQQkszmrNzOH19fxcvLt1Bb55R2b8fIAV0478TOlHRsFXZ5IqFTQEjK21JxgL8tXM/jC9azfPMeAHrlt2J4rw6Udm/Pad3bUdiuBWYWcqUizSu0gDCzkcDvgHTgT+5+S4P92cB9wGnAduAyd/8o2HczcA1QC3zL3Z/9tM9SQEhjrdleyYvvb+bl5VtZsHone6tqAGjbMpN+XVrTr0sbeua3orh9S7p3aEXXvBxyMtNDrlokNkIJCDNLB1YAnwPWAfOA8e6+tF6bfwMGufvXzWwccLG7X2Zm/YGHgaFAN+AFoK+7H3a0UQEhx6K2znl/UwXzV+9k2cYK3t+0h+Wb9lBZ/cl/au1aZtK5TQ4dc7Np1yqL9i0zyWuRSeucTFrnZJCbk0HLrHRaZGbQIiudnMw0sjPSyc5IIzM9jayMNDLTjYy0NDLSjLQ0XalIfPi0gIjlndRDgXJ3XxkUMQMYDdR/Ev1o4KfB8qPA7Ra5xh8NzHD3KmCVmZUH7/d2DOuVFJSeZpzULY+TuuV9vM3d2bqnitU7Klm9vZJNu/ezqeIAm3ZXsWNfFet37Wf73ir2VNVwrL9fpVnks9PMSE8z0i0SGmkGaWaYRZbt0Dp83P1lwXYAw+ot84kusn+JIIu6+MkmMe5iUyzGxjkn5PPDL/Rv8veNZUAUAGvrra8Dhh2ujbvXmNluoEOwfXaD1xY0/AAzmwRMAigu1vTP0jTMjE5tcujUJochJe0P266uztlbXcOeAzXsq6qhsrqWyuoa9lfXUl1TR1VNHVU1tVTXOtU1dRysraO2zjlYW0dNrVPrTl2dU1sXWXaPXNHUuePB+7uD49Q5Hy8ThJLDx5MVRpb/r7aGuVW/p+CwmRbj4UiP9QeksM5tcmLyvgk9F5O73wPcA5EuppDLkRSTlma0ycmkTU5m2KWIxEQsb5RbDxTVWy8MtkVtY2YZQB6RwerGvFZERGIolgExD+hjZj3MLAsYB8xq0GYWMDFYHgu85JFr4VnAODPLNrMeQB9gbgxrFRGRBmLWxRSMKVwPPEvka65/dvclZvZzoMzdZwHTgPuDQegdREKEoN0jRAa0a4Bvfto3mEREpOnpRjkRkRT2aV9z1WR9IiISlQJCRESiUkCIiEhUCggREYkqaQapzWwrsPo43qIjsK2JykkUqXjMkJrHrWNOHUd73N3dPT/ajqQJiONlZmWHG8lPVql4zJCax61jTh1NedzqYhIRkagUECIiEpUC4v/cE3YBIUjFY4bUPG4dc+posuPWGISIiESlKwgREYlKASEiIlGlfECY2UgzW25m5WY2Jex6YsHMiszsZTNbamZLzOzbwfb2Zva8mX0Q/N0u7FpjwczSzWyhmT0VrPcwsznBOZ8ZTEefNMysrZk9ambvm9kyMxueCufazL4T/PtebGYPm1lOMp5rM/uzmW0xs8X1tkU9vxbx++D43zWzwUfzWSkdEGaWDtwBjAL6A+PNrOkf7Bq+GmCyu/cHTge+GRznFOBFd+8DvBisJ6NvA8vqrd8KTHX33sBO4JpQqoqd3wHPuHs/4GQix57U59rMCoBvAaXuPoDIIwbGkZzn+i/AyAbbDnd+RxF5nk4fIo9nvutoPiilAwIYCpS7+0p3rwZmAKNDrqnJuftGd18QLO8h8gOjgMixTg+aTQe+FEqBMWRmhcAXgD8F6wacCzwaNEmq4zazPOAsIs9awd2r3X0XKXCuiTzfpkXwdMqWwEaS8Fy7+2tEnp9T3+HO72jgPo+YDbQ1s66N/axUD4gCYG299XXBtqRlZiXAqcAcoLO7bwx2bQI6h1VXDN0GfA+oC9Y7ALvcvSZYT7Zz3gPYCtwbdKv9ycxakeTn2t3XA78G1hAJht3AfJL7XNd3uPN7XD/jUj0gUoqZ5QKPATe6e0X9fcGjXpPqO89mdiGwxd3nh11LM8oABgN3ufupwD4adCcl6bluR+S35R5AN6AV/9oNkxKa8vymekCsB4rqrRcG25KOmWUSCYcH3f1vwebNhy43g7+3hFVfjHwGuMjMPiLSfXgukf75tkE3BCTfOV8HrHP3OcH6o0QCI9nP9XnAKnff6u4Hgb8ROf/JfK7rO9z5Pa6fcakeEPOAPsE3HbKIDGrNCrmmJhf0u08Dlrn7b+vtmgVMDJYnAn9v7tpiyd1vdvdCdy8hcm5fcvfLgZeBsUGzpDpud98ErDWzE4JNI4g82z2pzzWRrqXTzaxl8O/90HEn7blu4HDndxZwVfBtptOB3fW6oo4o5e+kNrMLiPRTpwN/dvf/CreipmdmZwKvA+/xf33xPyAyDvEIUExkqvRL3b3h4FdSMLNzgO+6+4Vm1pPIFUV7YCFwhbtXhVhekzKzU4gMymcBK4GrifwymNTn2sx+BlxG5Ft7C4FrifS3J9W5NrOHgXOITOu9GfgJ8ARRzm8QlrcT6W6rBK5297JGf1aqB4SIiESX6l1MIiJyGAoIERGJSgEhIiJRKSBERCQqBYSIiESlgBCJA2Z2zqHZZkXihQJCRESiUkCIHAUzu8LM5prZO2Z2d/Csib1mNjV4FsGLZpYftD3FzGYH8/A/Xm+O/t5m9oKZLTKzBWbWK3j73HrPcXgwuMlJJDQKCJFGMrMTidyp+xl3PwWoBS4nMjFcmbufBLxK5M5WgPuA77v7ICJ3sR/a/iBwh7ufDJxBZPZRiMyyeyORZ5P0JDKXkEhoMo7cREQCI4DTgHnBL/ctiEyKVgfMDNo8APwteC5DW3d/Ndg+HfirmbUGCtz9cQB3PwAQvN9cd18XrL8DlABvxPyoRA5DASHSeAZMd/ebP7HR7EcN2h3r/DX15wiqRf8/JWTqYhJpvBeBsWbWCT5+DnB3Iv+PDs0YOgF4w913AzvN7LPB9iuBV4Mn+q0zsy8F75FtZi2b8yBEGku/oYg0krsvNbP/AJ4zszTgIPBNIg/lGRrs20JknAIi0y7/TxAAh2ZVhUhY3G1mPw/e45JmPAyRRtNsriLHycz2untu2HWINDV1MYmISFS6ghARkah0BSEiIlEpIEREJCoFhIiIRKWAEBGRqBQQIiIS1f8CaBmvcsgnclYAAAAASUVORK5CYII=\n",
"text/plain": [
"