diff --git a/cw/08_regresja_logistyczna.ipynb b/cw/08_regresja_logistyczna.ipynb
new file mode 100644
index 0000000..efcec1b
--- /dev/null
+++ b/cw/08_regresja_logistyczna.ipynb
@@ -0,0 +1,1050 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Regresja logistyczna"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## import bibliotek"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "metadata": {
+ "scrolled": true
+ },
+ "outputs": [
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "/media/kuba/ssdsam/anaconda3/lib/python3.8/site-packages/gensim/similarities/__init__.py:15: UserWarning: The gensim.similarities.levenshtein submodule is disabled, because the optional Levenshtein package is unavailable. Install Levenhstein (e.g. `pip install python-Levenshtein`) to suppress this warning.\n",
+ " warnings.warn(msg)\n"
+ ]
+ }
+ ],
+ "source": [
+ "import numpy as np\n",
+ "import gensim\n",
+ "import torch\n",
+ "import pandas as pd\n",
+ "from sklearn.model_selection import train_test_split\n",
+ "\n",
+ "from sklearn.datasets import fetch_20newsgroups\n",
+ "# https://scikit-learn.org/0.19/datasets/twenty_newsgroups.html\n",
+ "\n",
+ "from sklearn.feature_extraction.text import TfidfVectorizer\n",
+ "from sklearn.metrics import accuracy_score"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "CATEGORIES = ['soc.religion.christian', 'alt.atheism']"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "newsgroups_train_dev = fetch_20newsgroups(subset = 'train', categories=CATEGORIES)\n",
+ "newsgroups_test = fetch_20newsgroups(subset = 'test', categories=CATEGORIES)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "newsgroups_train_dev_text = newsgroups_train_dev['data']\n",
+ "newsgroups_test_text = newsgroups_test['data']"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "Y_train_dev = newsgroups_train_dev['target']\n",
+ "Y_test = newsgroups_test['target']"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 6,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "newsgroups_train_text, newsgroups_dev_text, Y_train, Y_dev = train_test_split(newsgroups_train_dev_text, Y_train_dev, random_state=42)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 7,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "Y_names = newsgroups_train_dev['target_names']"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 8,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "['alt.atheism', 'soc.religion.christian']"
+ ]
+ },
+ "execution_count": 8,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "Y_names"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## baseline"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## zadanie (5 minut)\n",
+ "\n",
+ "- stworzyć baseline "
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### PYTANIE: co jest nie tak z regresją liniową?"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Regresja logistyczna"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### wektoryzacja"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## zadanie (5 minut)\n",
+ "\n",
+ "- na podstawie newsgroups_train_text stworzyć tfidf wektoryzer ze słownikiem max 10_000\n",
+ "- wygenerować wektory: X_train, X_dev, X_test"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### model - inicjalizacja "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 18,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "class LogisticRegressionModel(torch.nn.Module):\n",
+ "\n",
+ " def __init__(self):\n",
+ " super(LogisticRegressionModel, self).__init__()\n",
+ " self.fc = torch.nn.Linear(FEAUTERES,1)\n",
+ "\n",
+ " def forward(self, x):\n",
+ " x = self.fc(x)\n",
+ " x = torch.sigmoid(x)\n",
+ " return x"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 19,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "lr_model = LogisticRegressionModel()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 20,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "tensor([[0.4978],\n",
+ " [0.5009],\n",
+ " [0.4998],\n",
+ " [0.4990],\n",
+ " [0.5018]], grad_fn=)"
+ ]
+ },
+ "execution_count": 20,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "lr_model(torch.Tensor(X_train[0:5].astype(np.float32).todense()))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 21,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "LogisticRegressionModel(\n",
+ " (fc): Linear(in_features=10000, out_features=1, bias=True)\n",
+ ")"
+ ]
+ },
+ "execution_count": 21,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "lr_model"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 22,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "[Parameter containing:\n",
+ " tensor([[-0.0059, 0.0035, 0.0021, ..., -0.0042, -0.0057, -0.0049]],\n",
+ " requires_grad=True),\n",
+ " Parameter containing:\n",
+ " tensor([-0.0023], requires_grad=True)]"
+ ]
+ },
+ "execution_count": 22,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "list(lr_model.parameters())"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## model - trenowanie"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 23,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "BATCH_SIZE = 5"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 24,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "criterion = torch.nn.BCELoss()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 25,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "optimizer = torch.optim.SGD(lr_model.parameters(), lr = 0.1)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 26,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "809"
+ ]
+ },
+ "execution_count": 26,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "Y_train.shape[0]"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 27,
+ "metadata": {
+ "scrolled": true
+ },
+ "outputs": [],
+ "source": [
+ "loss_score = 0\n",
+ "acc_score = 0\n",
+ "items_total = 0\n",
+ "lr_model.train()\n",
+ "for i in range(0, Y_train.shape[0], BATCH_SIZE):\n",
+ " X = X_train[i:i+BATCH_SIZE]\n",
+ " X = torch.tensor(X.astype(np.float32).todense())\n",
+ " Y = Y_train[i:i+BATCH_SIZE]\n",
+ " Y = torch.tensor(Y.astype(np.float32)).reshape(-1,1)\n",
+ " Y_predictions = lr_model(X)\n",
+ " acc_score += torch.sum((Y_predictions > 0.5) == Y).item()\n",
+ " items_total += Y.shape[0] \n",
+ " \n",
+ " optimizer.zero_grad()\n",
+ " loss = criterion(Y_predictions, Y)\n",
+ " loss.backward()\n",
+ " optimizer.step()\n",
+ " \n",
+ "\n",
+ " loss_score += loss.item() * Y.shape[0] "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 28,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "tensor([[0.5667],\n",
+ " [0.5802],\n",
+ " [0.5757],\n",
+ " [0.5670]], grad_fn=)"
+ ]
+ },
+ "execution_count": 28,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "Y_predictions"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 29,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "tensor([[0.],\n",
+ " [1.],\n",
+ " [1.],\n",
+ " [0.]])"
+ ]
+ },
+ "execution_count": 29,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "Y"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 30,
+ "metadata": {
+ "scrolled": true
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "452"
+ ]
+ },
+ "execution_count": 30,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "acc_score"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 31,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "809"
+ ]
+ },
+ "execution_count": 31,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "items_total"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 32,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "accuracy: 0.5587144622991347\n"
+ ]
+ }
+ ],
+ "source": [
+ "print(f'accuracy: {acc_score / items_total}')"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 33,
+ "metadata": {
+ "scrolled": true
+ },
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "BCE loss: 0.6745463597170355\n"
+ ]
+ }
+ ],
+ "source": [
+ "print(f'BCE loss: {loss_score / items_total}')"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### model - ewaluacja"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 34,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def get_loss_acc(model, X_dataset, Y_dataset):\n",
+ " loss_score = 0\n",
+ " acc_score = 0\n",
+ " items_total = 0\n",
+ " model.eval()\n",
+ " for i in range(0, Y_dataset.shape[0], BATCH_SIZE):\n",
+ " X = X_dataset[i:i+BATCH_SIZE]\n",
+ " X = torch.tensor(X.astype(np.float32).todense())\n",
+ " Y = Y_dataset[i:i+BATCH_SIZE]\n",
+ " Y = torch.tensor(Y.astype(np.float32)).reshape(-1,1)\n",
+ " Y_predictions = model(X)\n",
+ " acc_score += torch.sum((Y_predictions > 0.5) == Y).item()\n",
+ " items_total += Y.shape[0] \n",
+ "\n",
+ " optimizer.zero_grad()\n",
+ " loss = criterion(Y_predictions, Y)\n",
+ " loss.backward()\n",
+ " optimizer.step()\n",
+ "\n",
+ "\n",
+ " loss_score += loss.item() * Y.shape[0] \n",
+ " return (loss_score / items_total), (acc_score / items_total)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 35,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "(0.6443227143826974, 0.622991347342398)"
+ ]
+ },
+ "execution_count": 35,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "get_loss_acc(lr_model, X_train, Y_train)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 36,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "(0.6369243131743537, 0.6037037037037037)"
+ ]
+ },
+ "execution_count": 36,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "get_loss_acc(lr_model, X_dev, Y_dev)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 37,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "(0.6323775731785694, 0.6499302649930265)"
+ ]
+ },
+ "execution_count": 37,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "get_loss_acc(lr_model, X_test, Y_test)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### wagi modelu"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 38,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "[Parameter containing:\n",
+ " tensor([[ 0.0314, -0.0375, 0.0131, ..., -0.0057, -0.0008, -0.0089]],\n",
+ " requires_grad=True),\n",
+ " Parameter containing:\n",
+ " tensor([0.0563], requires_grad=True)]"
+ ]
+ },
+ "execution_count": 38,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "list(lr_model.parameters())"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 39,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "tensor([ 0.0314, -0.0375, 0.0131, ..., -0.0057, -0.0008, -0.0089],\n",
+ " grad_fn=)"
+ ]
+ },
+ "execution_count": 39,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "list(lr_model.parameters())[0][0]"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 40,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "torch.return_types.topk(\n",
+ "values=tensor([0.3753, 0.2305, 0.2007, 0.2006, 0.1993, 0.1952, 0.1930, 0.1898, 0.1831,\n",
+ " 0.1731, 0.1649, 0.1647, 0.1543, 0.1320, 0.1314, 0.1303, 0.1296, 0.1261,\n",
+ " 0.1245, 0.1243], grad_fn=),\n",
+ "indices=tensor([8942, 6336, 1852, 9056, 1865, 4039, 7820, 5002, 8208, 1857, 9709, 803,\n",
+ " 1046, 130, 4306, 6481, 4370, 4259, 4285, 1855]))"
+ ]
+ },
+ "execution_count": 40,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "torch.topk(list(lr_model.parameters())[0][0], 20)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 41,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "the\n",
+ "of\n",
+ "christ\n",
+ "to\n",
+ "church\n",
+ "god\n",
+ "rutgers\n",
+ "jesus\n",
+ "sin\n",
+ "christians\n",
+ "we\n",
+ "and\n",
+ "athos\n",
+ "1993\n",
+ "hell\n",
+ "our\n",
+ "his\n",
+ "he\n",
+ "heaven\n",
+ "christian\n"
+ ]
+ }
+ ],
+ "source": [
+ "for i in torch.topk(list(lr_model.parameters())[0][0], 20)[1]:\n",
+ " print(vectorizer.get_feature_names()[i])"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 42,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "torch.return_types.topk(\n",
+ "values=tensor([-0.3478, -0.2578, -0.2455, -0.2347, -0.2330, -0.2265, -0.2205, -0.2050,\n",
+ " -0.2044, -0.1979, -0.1876, -0.1790, -0.1747, -0.1745, -0.1734, -0.1647,\n",
+ " -0.1639, -0.1617, -0.1601, -0.1592], grad_fn=),\n",
+ "indices=tensor([5119, 8096, 5420, 4436, 6194, 1627, 6901, 5946, 9970, 3116, 1036, 9906,\n",
+ " 5654, 8329, 7869, 1039, 1991, 4926, 5035, 4925]))"
+ ]
+ },
+ "execution_count": 42,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "torch.topk(list(lr_model.parameters())[0][0], 20, largest = False)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 43,
+ "metadata": {
+ "scrolled": true
+ },
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "keith\n",
+ "sgi\n",
+ "livesey\n",
+ "host\n",
+ "nntp\n",
+ "caltech\n",
+ "posting\n",
+ "morality\n",
+ "you\n",
+ "edu\n",
+ "atheism\n",
+ "wpd\n",
+ "mathew\n",
+ "solntze\n",
+ "sandvik\n",
+ "atheists\n",
+ "com\n",
+ "islamic\n",
+ "jon\n",
+ "islam\n"
+ ]
+ }
+ ],
+ "source": [
+ "for i in torch.topk(list(lr_model.parameters())[0][0], 20, largest = False)[1]:\n",
+ " print(vectorizer.get_feature_names()[i])"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### sieć neuronowa"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 44,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "class NeuralNetworkModel(torch.nn.Module):\n",
+ "\n",
+ " def __init__(self):\n",
+ " super(NeuralNetworkModel, self).__init__()\n",
+ " self.fc1 = torch.nn.Linear(FEAUTERES,500)\n",
+ " self.fc2 = torch.nn.Linear(500,1)\n",
+ "\n",
+ " def forward(self, x):\n",
+ " x = self.fc1(x)\n",
+ " x = self.fc2(x)\n",
+ " x = torch.sigmoid(x)\n",
+ " return x"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 45,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "nn_model = NeuralNetworkModel()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 46,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "BATCH_SIZE = 5"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 47,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "criterion = torch.nn.BCELoss()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 48,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "optimizer = torch.optim.SGD(nn_model.parameters(), lr = 0.1)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 49,
+ "metadata": {
+ "scrolled": true
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "0"
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "text/plain": [
+ "(0.6605833534551934, 0.5908529048207664)"
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "text/plain": [
+ "(0.6379233609747004, 0.6481481481481481)"
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "text/plain": [
+ "1"
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "text/plain": [
+ "(0.4341224195120214, 0.896168108776267)"
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "text/plain": [
+ "(0.3649017943276299, 0.9074074074074074)"
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "text/plain": [
+ "2"
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "text/plain": [
+ "(0.18619558424660096, 0.9765142150803461)"
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "text/plain": [
+ "(0.16293201995668588, 0.9888888888888889)"
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "text/plain": [
+ "3"
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "text/plain": [
+ "(0.09108264647580784, 0.9962917181705809)"
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "text/plain": [
+ "(0.08985773311858927, 0.9962962962962963)"
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "text/plain": [
+ "4"
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "text/plain": [
+ "(0.053487053708540566, 0.9987639060568603)"
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "text/plain": [
+ "(0.05794332528279887, 1.0)"
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "for epoch in range(5):\n",
+ " loss_score = 0\n",
+ " acc_score = 0\n",
+ " items_total = 0\n",
+ " nn_model.train()\n",
+ " for i in range(0, Y_train.shape[0], BATCH_SIZE):\n",
+ " X = X_train[i:i+BATCH_SIZE]\n",
+ " X = torch.tensor(X.astype(np.float32).todense())\n",
+ " Y = Y_train[i:i+BATCH_SIZE]\n",
+ " Y = torch.tensor(Y.astype(np.float32)).reshape(-1,1)\n",
+ " Y_predictions = nn_model(X)\n",
+ " acc_score += torch.sum((Y_predictions > 0.5) == Y).item()\n",
+ " items_total += Y.shape[0] \n",
+ "\n",
+ " optimizer.zero_grad()\n",
+ " loss = criterion(Y_predictions, Y)\n",
+ " loss.backward()\n",
+ " optimizer.step()\n",
+ "\n",
+ "\n",
+ " loss_score += loss.item() * Y.shape[0] \n",
+ "\n",
+ " display(epoch)\n",
+ " display(get_loss_acc(nn_model, X_train, Y_train))\n",
+ " display(get_loss_acc(nn_model, X_dev, Y_dev))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 50,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "(0.16834938257537793, 0.9428172942817294)"
+ ]
+ },
+ "execution_count": 50,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "get_loss_acc(nn_model, X_test, Y_test)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Zadanie domowe\n",
+ "\n",
+ "- wybrać jedno z poniższych repozytoriów i je sforkować:\n",
+ " - https://git.wmi.amu.edu.pl/kubapok/paranormal-or-skeptic-ISI-public\n",
+ " - https://git.wmi.amu.edu.pl/kubapok/sport-text-classification-ball-ISI-public\n",
+ "- stworzyć klasyfikator bazujący na prostej sieci neuronowej feed forward w pytorchu (można bazować na tym jupyterze). Zamiast tfidf proszę skorzystać z jakieś reprezentacji gęstej (np. word2vec).\n",
+ "- stworzyć predykcje w plikach dev-0/out.tsv oraz test-A/out.tsv\n",
+ "- wynik accuracy sprawdzony za pomocą narzędzia geval (patrz poprzednie zadanie) powinien wynosić conajmniej 0.67\n",
+ "- proszę umieścić predykcję oraz skrypty generujące (w postaci tekstowej a nie jupyter) w repo, a w MS TEAMS umieścić link do swojego repo\n",
+ "termin 25.05, 70 punktów\n"
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.8.5"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 4
+}
diff --git a/cw/08_regresja_logistyczna_ODPOWIEDZI.ipynb b/cw/08_regresja_logistyczna_ODPOWIEDZI.ipynb
new file mode 100644
index 0000000..dba395f
--- /dev/null
+++ b/cw/08_regresja_logistyczna_ODPOWIEDZI.ipynb
@@ -0,0 +1,1242 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Regresja logistyczna"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## import bibliotek"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "metadata": {
+ "scrolled": true
+ },
+ "outputs": [
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "/media/kuba/ssdsam/anaconda3/lib/python3.8/site-packages/gensim/similarities/__init__.py:15: UserWarning: The gensim.similarities.levenshtein submodule is disabled, because the optional Levenshtein package is unavailable. Install Levenhstein (e.g. `pip install python-Levenshtein`) to suppress this warning.\n",
+ " warnings.warn(msg)\n"
+ ]
+ }
+ ],
+ "source": [
+ "import numpy as np\n",
+ "import gensim\n",
+ "import torch\n",
+ "import pandas as pd\n",
+ "from sklearn.model_selection import train_test_split\n",
+ "\n",
+ "from sklearn.datasets import fetch_20newsgroups\n",
+ "# https://scikit-learn.org/0.19/datasets/twenty_newsgroups.html\n",
+ "\n",
+ "from sklearn.feature_extraction.text import TfidfVectorizer\n",
+ "from sklearn.metrics import accuracy_score"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "CATEGORIES = ['soc.religion.christian', 'alt.atheism']"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "newsgroups_train_dev = fetch_20newsgroups(subset = 'train', categories=CATEGORIES)\n",
+ "newsgroups_test = fetch_20newsgroups(subset = 'test', categories=CATEGORIES)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "newsgroups_train_dev_text = newsgroups_train_dev['data']\n",
+ "newsgroups_test_text = newsgroups_test['data']"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "Y_train_dev = newsgroups_train_dev['target']\n",
+ "Y_test = newsgroups_test['target']"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 6,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "newsgroups_train_text, newsgroups_dev_text, Y_train, Y_dev = train_test_split(newsgroups_train_dev_text, Y_train_dev, random_state=42)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 7,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "Y_names = newsgroups_train_dev['target_names']"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 8,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "['alt.atheism', 'soc.religion.christian']"
+ ]
+ },
+ "execution_count": 8,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "Y_names"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## baseline"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 9,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "array([1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1,\n",
+ " 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0,\n",
+ " 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0,\n",
+ " 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1,\n",
+ " 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1,\n",
+ " 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0,\n",
+ " 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0,\n",
+ " 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,\n",
+ " 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1,\n",
+ " 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0,\n",
+ " 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0,\n",
+ " 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0,\n",
+ " 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0,\n",
+ " 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0,\n",
+ " 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1,\n",
+ " 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1,\n",
+ " 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1,\n",
+ " 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0,\n",
+ " 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0,\n",
+ " 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1,\n",
+ " 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1,\n",
+ " 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1,\n",
+ " 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1,\n",
+ " 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0,\n",
+ " 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0,\n",
+ " 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0,\n",
+ " 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1,\n",
+ " 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1,\n",
+ " 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0,\n",
+ " 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0,\n",
+ " 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0,\n",
+ " 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1,\n",
+ " 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1,\n",
+ " 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1,\n",
+ " 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1,\n",
+ " 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1,\n",
+ " 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0])"
+ ]
+ },
+ "execution_count": 9,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "Y_train"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 10,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "1 450\n",
+ "0 359\n",
+ "dtype: int64"
+ ]
+ },
+ "execution_count": 10,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "pd.value_counts(Y_train)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### train"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 11,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "0.5562422744128553"
+ ]
+ },
+ "execution_count": 11,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "accuracy_score(np.ones_like(Y_train) * 1, Y_train)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### dev"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 12,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "0.5518518518518518"
+ ]
+ },
+ "execution_count": 12,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "accuracy_score(np.ones_like(Y_dev) * 1, Y_dev)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### test"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 13,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "0.5550906555090656"
+ ]
+ },
+ "execution_count": 13,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "accuracy_score(np.ones_like(Y_test) * 1, Y_test)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### PYTANIE: co jest nie tak z regresją liniową?"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Regresja logistyczna"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### wektoryzacja"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 14,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "FEAUTERES = 10_000"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 15,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "vectorizer = TfidfVectorizer(max_features=10_000)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 16,
+ "metadata": {
+ "scrolled": true
+ },
+ "outputs": [],
+ "source": [
+ "X_train = vectorizer.fit_transform(newsgroups_train_text)\n",
+ "X_dev = vectorizer.transform(newsgroups_dev_text)\n",
+ "X_test = vectorizer.transform(newsgroups_test_text)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 17,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "<717x10000 sparse matrix of type ''\n",
+ "\twith 120739 stored elements in Compressed Sparse Row format>"
+ ]
+ },
+ "execution_count": 17,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "X_test"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### model - inicjalizacja "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 18,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "class LogisticRegressionModel(torch.nn.Module):\n",
+ "\n",
+ " def __init__(self):\n",
+ " super(LogisticRegressionModel, self).__init__()\n",
+ " self.fc = torch.nn.Linear(FEAUTERES,1)\n",
+ "\n",
+ " def forward(self, x):\n",
+ " x = self.fc(x)\n",
+ " x = torch.sigmoid(x)\n",
+ " return x"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 19,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "lr_model = LogisticRegressionModel()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 20,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "tensor([[0.4978],\n",
+ " [0.5009],\n",
+ " [0.4998],\n",
+ " [0.4990],\n",
+ " [0.5018]], grad_fn=)"
+ ]
+ },
+ "execution_count": 20,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "lr_model(torch.Tensor(X_train[0:5].astype(np.float32).todense()))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 21,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "LogisticRegressionModel(\n",
+ " (fc): Linear(in_features=10000, out_features=1, bias=True)\n",
+ ")"
+ ]
+ },
+ "execution_count": 21,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "lr_model"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 22,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "[Parameter containing:\n",
+ " tensor([[-0.0059, 0.0035, 0.0021, ..., -0.0042, -0.0057, -0.0049]],\n",
+ " requires_grad=True),\n",
+ " Parameter containing:\n",
+ " tensor([-0.0023], requires_grad=True)]"
+ ]
+ },
+ "execution_count": 22,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "list(lr_model.parameters())"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## model - trenowanie"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 23,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "BATCH_SIZE = 5"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 24,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "criterion = torch.nn.BCELoss()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 25,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "optimizer = torch.optim.SGD(lr_model.parameters(), lr = 0.1)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 26,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "809"
+ ]
+ },
+ "execution_count": 26,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "Y_train.shape[0]"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 27,
+ "metadata": {
+ "scrolled": true
+ },
+ "outputs": [],
+ "source": [
+ "loss_score = 0\n",
+ "acc_score = 0\n",
+ "items_total = 0\n",
+ "lr_model.train()\n",
+ "for i in range(0, Y_train.shape[0], BATCH_SIZE):\n",
+ " X = X_train[i:i+BATCH_SIZE]\n",
+ " X = torch.tensor(X.astype(np.float32).todense())\n",
+ " Y = Y_train[i:i+BATCH_SIZE]\n",
+ " Y = torch.tensor(Y.astype(np.float32)).reshape(-1,1)\n",
+ " Y_predictions = lr_model(X)\n",
+ " acc_score += torch.sum((Y_predictions > 0.5) == Y).item()\n",
+ " items_total += Y.shape[0] \n",
+ " \n",
+ " optimizer.zero_grad()\n",
+ " loss = criterion(Y_predictions, Y)\n",
+ " loss.backward()\n",
+ " optimizer.step()\n",
+ " \n",
+ "\n",
+ " loss_score += loss.item() * Y.shape[0] "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 28,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "tensor([[0.5667],\n",
+ " [0.5802],\n",
+ " [0.5757],\n",
+ " [0.5670]], grad_fn=)"
+ ]
+ },
+ "execution_count": 28,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "Y_predictions"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 29,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "tensor([[0.],\n",
+ " [1.],\n",
+ " [1.],\n",
+ " [0.]])"
+ ]
+ },
+ "execution_count": 29,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "Y"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 30,
+ "metadata": {
+ "scrolled": true
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "452"
+ ]
+ },
+ "execution_count": 30,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "acc_score"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 31,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "809"
+ ]
+ },
+ "execution_count": 31,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "items_total"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 32,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "accuracy: 0.5587144622991347\n"
+ ]
+ }
+ ],
+ "source": [
+ "print(f'accuracy: {acc_score / items_total}')"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 33,
+ "metadata": {
+ "scrolled": true
+ },
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "BCE loss: 0.6745463597170355\n"
+ ]
+ }
+ ],
+ "source": [
+ "print(f'BCE loss: {loss_score / items_total}')"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### model - ewaluacja"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 34,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def get_loss_acc(model, X_dataset, Y_dataset):\n",
+ " loss_score = 0\n",
+ " acc_score = 0\n",
+ " items_total = 0\n",
+ " model.eval()\n",
+ " for i in range(0, Y_dataset.shape[0], BATCH_SIZE):\n",
+ " X = X_dataset[i:i+BATCH_SIZE]\n",
+ " X = torch.tensor(X.astype(np.float32).todense())\n",
+ " Y = Y_dataset[i:i+BATCH_SIZE]\n",
+ " Y = torch.tensor(Y.astype(np.float32)).reshape(-1,1)\n",
+ " Y_predictions = model(X)\n",
+ " acc_score += torch.sum((Y_predictions > 0.5) == Y).item()\n",
+ " items_total += Y.shape[0] \n",
+ "\n",
+ " optimizer.zero_grad()\n",
+ " loss = criterion(Y_predictions, Y)\n",
+ " loss.backward()\n",
+ " optimizer.step()\n",
+ "\n",
+ "\n",
+ " loss_score += loss.item() * Y.shape[0] \n",
+ " return (loss_score / items_total), (acc_score / items_total)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 35,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "(0.6443227143826974, 0.622991347342398)"
+ ]
+ },
+ "execution_count": 35,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "get_loss_acc(lr_model, X_train, Y_train)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 36,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "(0.6369243131743537, 0.6037037037037037)"
+ ]
+ },
+ "execution_count": 36,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "get_loss_acc(lr_model, X_dev, Y_dev)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 37,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "(0.6323775731785694, 0.6499302649930265)"
+ ]
+ },
+ "execution_count": 37,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "get_loss_acc(lr_model, X_test, Y_test)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### wagi modelu"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 38,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "[Parameter containing:\n",
+ " tensor([[ 0.0314, -0.0375, 0.0131, ..., -0.0057, -0.0008, -0.0089]],\n",
+ " requires_grad=True),\n",
+ " Parameter containing:\n",
+ " tensor([0.0563], requires_grad=True)]"
+ ]
+ },
+ "execution_count": 38,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "list(lr_model.parameters())"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 39,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "tensor([ 0.0314, -0.0375, 0.0131, ..., -0.0057, -0.0008, -0.0089],\n",
+ " grad_fn=)"
+ ]
+ },
+ "execution_count": 39,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "list(lr_model.parameters())[0][0]"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 40,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "torch.return_types.topk(\n",
+ "values=tensor([0.3753, 0.2305, 0.2007, 0.2006, 0.1993, 0.1952, 0.1930, 0.1898, 0.1831,\n",
+ " 0.1731, 0.1649, 0.1647, 0.1543, 0.1320, 0.1314, 0.1303, 0.1296, 0.1261,\n",
+ " 0.1245, 0.1243], grad_fn=),\n",
+ "indices=tensor([8942, 6336, 1852, 9056, 1865, 4039, 7820, 5002, 8208, 1857, 9709, 803,\n",
+ " 1046, 130, 4306, 6481, 4370, 4259, 4285, 1855]))"
+ ]
+ },
+ "execution_count": 40,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "torch.topk(list(lr_model.parameters())[0][0], 20)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 41,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "the\n",
+ "of\n",
+ "christ\n",
+ "to\n",
+ "church\n",
+ "god\n",
+ "rutgers\n",
+ "jesus\n",
+ "sin\n",
+ "christians\n",
+ "we\n",
+ "and\n",
+ "athos\n",
+ "1993\n",
+ "hell\n",
+ "our\n",
+ "his\n",
+ "he\n",
+ "heaven\n",
+ "christian\n"
+ ]
+ }
+ ],
+ "source": [
+ "for i in torch.topk(list(lr_model.parameters())[0][0], 20)[1]:\n",
+ " print(vectorizer.get_feature_names()[i])"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 42,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "torch.return_types.topk(\n",
+ "values=tensor([-0.3478, -0.2578, -0.2455, -0.2347, -0.2330, -0.2265, -0.2205, -0.2050,\n",
+ " -0.2044, -0.1979, -0.1876, -0.1790, -0.1747, -0.1745, -0.1734, -0.1647,\n",
+ " -0.1639, -0.1617, -0.1601, -0.1592], grad_fn=),\n",
+ "indices=tensor([5119, 8096, 5420, 4436, 6194, 1627, 6901, 5946, 9970, 3116, 1036, 9906,\n",
+ " 5654, 8329, 7869, 1039, 1991, 4926, 5035, 4925]))"
+ ]
+ },
+ "execution_count": 42,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "torch.topk(list(lr_model.parameters())[0][0], 20, largest = False)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 43,
+ "metadata": {
+ "scrolled": true
+ },
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "keith\n",
+ "sgi\n",
+ "livesey\n",
+ "host\n",
+ "nntp\n",
+ "caltech\n",
+ "posting\n",
+ "morality\n",
+ "you\n",
+ "edu\n",
+ "atheism\n",
+ "wpd\n",
+ "mathew\n",
+ "solntze\n",
+ "sandvik\n",
+ "atheists\n",
+ "com\n",
+ "islamic\n",
+ "jon\n",
+ "islam\n"
+ ]
+ }
+ ],
+ "source": [
+ "for i in torch.topk(list(lr_model.parameters())[0][0], 20, largest = False)[1]:\n",
+ " print(vectorizer.get_feature_names()[i])"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### sieć neuronowa"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 44,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "class NeuralNetworkModel(torch.nn.Module):\n",
+ "\n",
+ " def __init__(self):\n",
+ " super(NeuralNetworkModel, self).__init__()\n",
+ " self.fc1 = torch.nn.Linear(FEAUTERES,500)\n",
+ " self.fc2 = torch.nn.Linear(500,1)\n",
+ "\n",
+ " def forward(self, x):\n",
+ " x = self.fc1(x)\n",
+ " x = self.fc2(x)\n",
+ " x = torch.sigmoid(x)\n",
+ " return x"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 45,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "nn_model = NeuralNetworkModel()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 46,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "BATCH_SIZE = 5"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 47,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "criterion = torch.nn.BCELoss()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 48,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "optimizer = torch.optim.SGD(nn_model.parameters(), lr = 0.1)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 49,
+ "metadata": {
+ "scrolled": true
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "0"
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "text/plain": [
+ "(0.6605833534551934, 0.5908529048207664)"
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "text/plain": [
+ "(0.6379233609747004, 0.6481481481481481)"
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "text/plain": [
+ "1"
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "text/plain": [
+ "(0.4341224195120214, 0.896168108776267)"
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "text/plain": [
+ "(0.3649017943276299, 0.9074074074074074)"
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "text/plain": [
+ "2"
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "text/plain": [
+ "(0.18619558424660096, 0.9765142150803461)"
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "text/plain": [
+ "(0.16293201995668588, 0.9888888888888889)"
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "text/plain": [
+ "3"
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "text/plain": [
+ "(0.09108264647580784, 0.9962917181705809)"
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "text/plain": [
+ "(0.08985773311858927, 0.9962962962962963)"
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "text/plain": [
+ "4"
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "text/plain": [
+ "(0.053487053708540566, 0.9987639060568603)"
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "text/plain": [
+ "(0.05794332528279887, 1.0)"
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "for epoch in range(5):\n",
+ " loss_score = 0\n",
+ " acc_score = 0\n",
+ " items_total = 0\n",
+ " nn_model.train()\n",
+ " for i in range(0, Y_train.shape[0], BATCH_SIZE):\n",
+ " X = X_train[i:i+BATCH_SIZE]\n",
+ " X = torch.tensor(X.astype(np.float32).todense())\n",
+ " Y = Y_train[i:i+BATCH_SIZE]\n",
+ " Y = torch.tensor(Y.astype(np.float32)).reshape(-1,1)\n",
+ " Y_predictions = nn_model(X)\n",
+ " acc_score += torch.sum((Y_predictions > 0.5) == Y).item()\n",
+ " items_total += Y.shape[0] \n",
+ "\n",
+ " optimizer.zero_grad()\n",
+ " loss = criterion(Y_predictions, Y)\n",
+ " loss.backward()\n",
+ " optimizer.step()\n",
+ "\n",
+ "\n",
+ " loss_score += loss.item() * Y.shape[0] \n",
+ "\n",
+ " display(epoch)\n",
+ " display(get_loss_acc(nn_model, X_train, Y_train))\n",
+ " display(get_loss_acc(nn_model, X_dev, Y_dev))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 50,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "(0.16834938257537793, 0.9428172942817294)"
+ ]
+ },
+ "execution_count": 50,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "get_loss_acc(nn_model, X_test, Y_test)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Zadanie domowe\n",
+ "\n",
+ "- wybrać jedno z poniższych repozytoriów i je sforkować:\n",
+ " - https://git.wmi.amu.edu.pl/kubapok/paranormal-or-skeptic-ISI-public\n",
+ " - https://git.wmi.amu.edu.pl/kubapok/sport-text-classification-ball-ISI-public\n",
+ "- stworzyć klasyfikator bazujący na prostej sieci neuronowej feed forward w pytorchu (można bazować na tym jupyterze). Zamiast tfidf proszę skorzystać z jakieś reprezentacji gęstej (np. word2vec).\n",
+ "- stworzyć predykcje w plikach dev-0/out.tsv oraz test-A/out.tsv\n",
+ "- wynik accuracy sprawdzony za pomocą narzędzia geval (patrz poprzednie zadanie) powinien wynosić conajmniej 0.67\n",
+ "- proszę umieścić predykcję oraz skrypty generujące (w postaci tekstowej a nie jupyter) w repo, a w MS TEAMS umieścić link do swojego repo\n",
+ "termin 25.05, 70 punktów\n"
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.8.5"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 4
+}
diff --git a/wyk/07_Naiwny_klasyfikator_bayesowski.ipynb b/wyk/07_Naiwny_klasyfikator_bayesowski.ipynb
index 8208481..5468e43 100644
--- a/wyk/07_Naiwny_klasyfikator_bayesowski.ipynb
+++ b/wyk/07_Naiwny_klasyfikator_bayesowski.ipynb
@@ -2,7 +2,7 @@
"cells": [
{
"cell_type": "markdown",
- "id": "damaged-senator",
+ "id": "moderate-array",
"metadata": {},
"source": [
"# Klasyfikacja binarna dla tekstu\n",
@@ -14,7 +14,7 @@
},
{
"cell_type": "markdown",
- "id": "explicit-gathering",
+ "id": "correct-victory",
"metadata": {},
"source": [
"**Pytanie**: Czy można wyobrazić sobie zadanie klasyfikacji mejli, niebędące zadaniem klasyfikacji binarnej?"
@@ -22,7 +22,7 @@
},
{
"cell_type": "markdown",
- "id": "material-watch",
+ "id": "spiritual-diploma",
"metadata": {},
"source": [
"Zakładamy paradygmat uczenia nadzorowanego, tzn. dysponujemy zbiorem uczącym.\n",
@@ -32,7 +32,7 @@
},
{
"cell_type": "markdown",
- "id": "referenced-hello",
+ "id": "secure-performance",
"metadata": {},
"source": [
"## Klasyfikacja regułowa\n",
@@ -49,14 +49,14 @@
"body __FRAUD_XJR\t/(?:who was a|as a|an? honest|you being a|to any) foreigner/i\n",
"```\n",
"\n",
- "Jakie są wady i zalety regułowych filtrów antyspamowych?\n",
+ "**Pytanie:** Jakie są wady i zalety regułowych filtrów antyspamowych?\n",
"\n",
"Współcześnie zdecydowanie dominuje użycie metod statystycznych (opartych na nadzorowanym uczeniu maszynowym). Do popularności tych metod przyczynił się artykuł [Plan for spam](http://www.paulgraham.com/spam.html) autorstwa Paula Grahama."
]
},
{
"cell_type": "markdown",
- "id": "cathedral-uganda",
+ "id": "indoor-ending",
"metadata": {},
"source": [
"## Podejście generatywne i dyskryminatywne\n",
@@ -72,7 +72,7 @@
},
{
"cell_type": "markdown",
- "id": "powerful-engineer",
+ "id": "pleased-clinic",
"metadata": {},
"source": [
"## Nasz \"dyżurny\" przykład\n",
@@ -94,11 +94,31 @@
"Zakładamy, że dokumenty podlegają lematyzacji i sprowadzeniu do mały liter, więc ostatecznie będziemy mieli następujące ciąg termów:\n",
"\n",
"* $x_1=(\\mathit{kupić}, \\mathit{pan}, \\mathit{viagra})$\n",
- "* $x_2=(\\mathit{tani}, \\mathit{miejsce}, \\mathit{dla}, \\mathit{pana})$\n",
+ "* $x_2=(\\mathit{tani}, \\mathit{miejsce}, \\mathit{dla}, \\mathit{pan})$\n",
"* $x_3=(\\mathit{viagra}, \\mathit{viagra}, \\mathit{viagra})$\n",
"* $x_4=(\\mathit{kupić}, \\mathit{tani}, \\mathit{cartridge})$\n",
"\n",
- "Uczymy na tym zbiorze klasyfikator, który będziemy testować na dokumencie $d=\\mathit{tania tania viagra dla pana}$, tj. po normalizacji\n",
+ "$P(tani|c) = (1+1)/(9+7) = 2/16 = 0.125$\n",
+ "$P(viagra|c) = \\frac{4+1}{9 + 7} = 5/16 = 0.3125 $\n",
+ "$P(dla|c) = \\frac{0+1}{9+7} = 1/16 = 0.0625$\n",
+ "$P(pan|c) = (1+1)/(9+7) = 2/16 = 0.125 $\n",
+ "$P(c) = 0.75$\n",
+ "\n",
+ "w wersji wielomianowej: $P(c)P(tani|c)P(tani|c)P(viagra|c)P(dla|c)P(pan|c) = 0.75 * 0.125 * 0.125 * 0.3125 * 0.0625 * 0.125= 0.0002861$\n",
+ "\n",
+ "w werjis Bernoulliego: $P(c)P(U_{dla}=1|c)P(U_{cartridge}=0|c)P(U_{viagra}=1|c)P(U_{pan}=1|c)P(U_{tani}=1|c)P(U_{miejsce}=0|c)P(U_{kup}=0|c)$\n",
+ "\n",
+ "$P(tani|\\bar{c}) = (1+1)/(4+7) = 2/11 =0.182 $\n",
+ "$P(viagra|\\bar{c}) = 1/11 = 0.091 $\n",
+ "$P(dla|\\bar{c}) = 2/11 = 0.182 $\n",
+ "$P(pan|\\bar{c}) = 2/11 = 0.182 $\n",
+ "$P(\\bar{c}) = 0.25$\n",
+ "\n",
+ "$P(\\bar{c})P(tani|\\bar{c})P(tani|\\bar{c})P(dla|\\bar{c})P(pan|\\bar{c}) = 0.25 * 0.182 * 0.182 * 0.091 * 0.182 * 0.182 = 0.00002496$\n",
+ "\n",
+ "\n",
+ "\n",
+ "Uczymy na tym zbiorze klasyfikator, który będziemy testować na dokumencie $d=\\mathit{tania\\ tania\\ viagra\\ dla\\ pana}$, tj. po normalizacji\n",
"$d=(\\mathit{tani}, \\mathit{tani}, \\mathit{viagra}, \\mathit{dla}, \\mathit{pan})$.\n",
"\n",
"**Uwaga:** Przykład jest oczywiście nierealistyczny i trudno będzie nam ocenić sensowność odpowiedzi. Za to będziemy w stanie policzyć ręcznie wynik.\n"
@@ -106,7 +126,7 @@
},
{
"cell_type": "markdown",
- "id": "controversial-rotation",
+ "id": "partial-military",
"metadata": {},
"source": [
"## Naiwny klasyfikator bayesowski\n",
@@ -127,7 +147,7 @@
},
{
"cell_type": "markdown",
- "id": "spatial-citizenship",
+ "id": "colonial-creature",
"metadata": {},
"source": [
"Mamy dokument $d$ i dwie klasy $c$ i $\\bar{c}$. Policzymy prawdopodobieństwa $P(c|d)$ (mamy dokument $d$, jakie jest prawdopodobieństwo, że to klasa $c$) i $P(\\bar{c}|d)$. A właściwie będziemy te prawdopodobieństwa porównywać.\n",
@@ -139,25 +159,25 @@
},
{
"cell_type": "markdown",
- "id": "united-recognition",
+ "id": "governing-fiction",
"metadata": {},
"source": [
"Zastosujmy najpierw wzór Bayesa.\n",
"\n",
- "$P(c|d) = \\frac{P(d|c) P(c)}{P(d)} \\propto P(d|c) P(c)$"
+ "$P(c|d) = \\frac{P(d|c) P(c)}{P(d)}$"
]
},
{
"cell_type": "markdown",
- "id": "present-draft",
+ "id": "northern-spine",
"metadata": {},
"source": [
- "$P(\\bar{c}|d) = \\frac{P(d|\\bar{c}) P(\\bar{c})}{P(d)} \\propto P(d|\\bar{c}) P(\\bar{c}) $"
+ "$P(\\bar{c}|d) = \\frac{P(d|\\bar{c}) P(\\bar{c})}{P(d)}$"
]
},
{
"cell_type": "markdown",
- "id": "accepting-tamil",
+ "id": "utility-induction",
"metadata": {},
"source": [
"(Oczywiście skądinąd $P(\\bar{c}|d) = 1 - P(c|d)$, ale nie będziemy teraz tego wykorzystywali.)"
@@ -165,7 +185,7 @@
},
{
"cell_type": "markdown",
- "id": "equipped-outreach",
+ "id": "timely-force",
"metadata": {},
"source": [
"Co możemy pominąć, jeśli tylko porównujemy $P(c|d)$ i $P(\\bar{c}|d)$?\n",
@@ -181,7 +201,7 @@
},
{
"cell_type": "markdown",
- "id": "active-motor",
+ "id": "embedded-involvement",
"metadata": {},
"source": [
"#### Prawdopodobieństwo _a priori_\n",
@@ -193,12 +213,16 @@
"gdzie\n",
"\n",
"* N - liczba wszystkich dokumentów w zbiorze uczącym\n",
- "* N_c - liczba dokumentow w zbiorze uczącym z klasą $c$\n"
+ "* N_c - liczba dokumentow w zbiorze uczącym z klasą $c$\n",
+ "\n",
+ "$\\hat{P}(c) = 0,75$\n",
+ "\n",
+ "$\\hat{P}(\\bar{c}) = 0,25$\n"
]
},
{
"cell_type": "markdown",
- "id": "trying-indonesian",
+ "id": "virgin-premiere",
"metadata": {},
"source": [
"#### Prawdopodobieństwo _a posteriori_\n",
@@ -212,7 +236,7 @@
},
{
"cell_type": "markdown",
- "id": "median-nomination",
+ "id": "acting-zimbabwe",
"metadata": {},
"source": [
"$P(d|c) = P(t_1\\dots t_n|c)$\n",
@@ -228,7 +252,7 @@
},
{
"cell_type": "markdown",
- "id": "romantic-verse",
+ "id": "adjustable-disney",
"metadata": {},
"source": [
"Jak oszacować $\\hat{P}(t|c)$?\n",
@@ -238,7 +262,7 @@
},
{
"cell_type": "markdown",
- "id": "interracial-today",
+ "id": "associate-variance",
"metadata": {},
"source": [
"### Wygładzanie\n",
@@ -260,12 +284,17 @@
"4. $f(m, 0, 0) = \\frac{1}{m}$\n",
"5. $\\lim_{T \\to \\inf} f(m, k, T) = \\frac{k}{T}$\n",
"\n",
+ "\n",
+ "m=2, k1=2, k2=4, T=6, 2/6 => f(2, 2, 6) > 0.333, f(2, 4, 6) < 0.666 \n",
+ "\n",
"Jaka funkcja spełnia te aksjomaty?\n",
"\n",
"$$f(m, k, T) = \\frac{k+1}{T+m}$$\n",
"\n",
"Jest to wygładzanie +1, albo wygładzanie Laplace'a.\n",
"\n",
+ "**Pytanie:** Wymyślić jakiś inny przykład funkcji, która będzie spełniała aksjomaty.\n",
+ "\n",
"\n",
"\n",
"\n",
@@ -275,7 +304,7 @@
},
{
"cell_type": "markdown",
- "id": "accepting-stockholm",
+ "id": "complimentary-airplane",
"metadata": {},
"source": [
"Po zastosowaniu do naszego naiwnego klasyfikatora otrzymamy:\n",
@@ -283,10 +312,35 @@
"$$\\hat{P}(t|c) = \\frac{\\#(t,c) + 1}{\\sum_i^{|V|} \\#(t_i,c) + |V|}$$"
]
},
+ {
+ "cell_type": "markdown",
+ "id": "comprehensive-junior",
+ "metadata": {},
+ "source": [
+ "### Metoda Bernoulliego"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "vocational-spanish",
+ "metadata": {},
+ "source": [
+ "$$P(𝑑|𝑐) \\approx P(U=u_1|c)\\dots P(U=u_{|v|})$$, gdzie $u_i = 1$, $t_i$ pojawił się w dokumencie $d$, 0 - w przeciwnym razie\n",
+ "\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "enabling-manitoba",
+ "metadata": {},
+ "source": [
+ "$\\hat{P}(U_{viagra}=1|c) = \\frac{\\#(viagra,N_c) + 1}{N_c + 2}$"
+ ]
+ },
{
"cell_type": "code",
"execution_count": null,
- "id": "moral-ceremony",
+ "id": "bearing-execution",
"metadata": {},
"outputs": [],
"source": []
diff --git a/wyk/08_Regresja_liniowa.ipynb b/wyk/08_Regresja_liniowa.ipynb
new file mode 100644
index 0000000..4e01af8
--- /dev/null
+++ b/wyk/08_Regresja_liniowa.ipynb
@@ -0,0 +1,214 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "id": "continent-intermediate",
+ "metadata": {},
+ "source": [
+ "# Regresja liniowa\n",
+ "\n",
+ "Regresja liniowa jest prosta...\n",
+ "\n",
+ "![Ceny mieszkań](./08_files/linregr1.png)\n",
+ "\n",
+ "... dosłownie — dopasuj prostą $y = ax + b$ do punktów\n",
+ "\n",
+ "Należy odgadnąć $a$ i $b$ tak, aby zminimalizować błąd\n",
+ "kwadratowy, tj. wartość:\n",
+ "\n",
+ "$$\\sum_{i=1}^n (y_i - (ax_i + b))^2$$\n",
+ "\n",
+ "\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "original-speed",
+ "metadata": {},
+ "source": [
+ "Regresje liniowa (jednej zmiennej) jest łatwa do rozwiązania — wystarczy podstawić do wzoru!\n",
+ "\n",
+ "$$\\hat{b} = \\frac{ \\sum_{i=1}^{n}{x_i y_i} - \\frac{1}{n} \\sum_{i=1}^n x_i\n",
+ " \\sum_{j=1}^n y_j}{ \\sum_{i=1}^n {x_i^2} - \\frac{1}{n} (\\sum_{i=1}^n\n",
+ " x_i)^2 }$$\n",
+ "\n",
+ "$$\\hat{a} = \\bar{y} - \\hat{b}\\,\\bar{x}$$\n",
+ "\n",
+ "\n",
+ "Na przykład dla mieszkań: $b =$ -30809.203 zł, $a =$ 5733.693 zł/m$^2$.\n",
+ "\n",
+ "![Ceny mieszkań](./08_files/linregr2.png)\n",
+ "\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "significant-relaxation",
+ "metadata": {},
+ "source": [
+ "## Regresja wielu zmiennych\n",
+ "\n",
+ "W praktyce mamy do czynienia z **wielowymiarową** regresją\n",
+ "liniową.\n",
+ "\n",
+ "Cena mieszkań może być prognozowana na podstawie:\n",
+ "\n",
+ "* powierzchni ($x_1 = 32.3$) \n",
+ "\n",
+ "* liczby pokoi ($x_2 = 3$)\n",
+ " \n",
+ "* piętra ($x_3 = 4$)\n",
+ "\n",
+ "* wieku ($x_4 = 13$)\n",
+ "\n",
+ "* odległości od Dworca Centralnego w Warszawie ($x_5 = 371.3$)\n",
+ "\n",
+ "* cech zerojedynkowych:\n",
+ "\n",
+ " * czy wielka płyta? ($x_6 = 0$)\n",
+ "\n",
+ " * czy jest jacuzzi? ($x_7 = 1$)\n",
+ "\n",
+ " * czy jest grzyb? ($x_8 = 0$)\n",
+ "\n",
+ " * czy to Kielce? ($x_9 = 1$)\n",
+ "\n",
+ "* ...\n",
+ "\n",
+ "... więc uogólniamy na wiele ($k$) wymiarów:\n",
+ "\n",
+ "$$ y = w_0 + w_1x_1 + \\ldots + w_kx_k = w_0 + \\sum_{j=1}^{k} w_jx_j $$\n",
+ "\n",
+ "gdzie:\n",
+ "\n",
+ "* $x_1,\\dots,x_k$ -- zmienne, na podstawie których zgadujemy\n",
+ "\n",
+ "* $w_0, w_1,\\dots,w_k$ -- wagi modelu (do wymyślenia na\n",
+ " podstawie przykładów)\n",
+ "\n",
+ "* $y$ -- odgadywana wartość\n",
+ "\n",
+ "Też istnieje wzór ładny wzór na wyliczenie wektora wag!\n",
+ "\n",
+ "$$\\mathbf{w} = (\\mathbf{X}^{\\rm T}\\mathbf{X})^{-1} \\mathbf{X}^{\\rm T}\\mathbf{y}$$\n",
+ "\n",
+ "... niestety odwracanie macierzy nie jest tanie :("
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "ordinary-appendix",
+ "metadata": {},
+ "source": [
+ "## Kilka sporzeżeń\n",
+ "\n",
+ "Regresja liniowa to najprostszy możliwy model:\n",
+ "\n",
+ "* im czegoś więcej na wejściu, tym proporcjonalnie (troszkę) więcej/mniej na wyjściu\n",
+ "\n",
+ "* nic prostszego nie da się wymyślić (funkcja stała??)\n",
+ "\n",
+ "* niestety model liniowy czasami kompletnie nie ma sensu (np. wzrost człowieka w\n",
+ " stosunku do wieku)\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "egyptian-austria",
+ "metadata": {},
+ "source": [
+ "## Uczenie\n",
+ "\n",
+ "A jak nauczyć się wag z przykładów?\n",
+ "\n",
+ "* wzór (z odwracaniem macierzy) — problematyczny\n",
+ "\n",
+ "### Metoda gradientu prostego\n",
+ "\n",
+ "![Morskie Oko - Krzysztof Dudzik](08_files/morskieoko.jpg)\n",
+ "\n",
+ "Schodź wzdłuż lokalnego spadku funkcji błędu.\n",
+ "\n",
+ "Tak więc w praktyce zamiast podstawiać do wzoru lepiej się uczyć iteracyjnie -\n",
+ " metodą **gradientu prostego** (ang. _gradient descent_).\n",
+ "\n",
+ "1. Zacznij od byle jakich wag $w_i$ (np. wylosuj)\n",
+ "2. Weź losowy przykład uczący $x_1,\\dots,x_n$, $y$.\n",
+ "3. Oblicz wyjście $\\hat{y}$ na podstawie $x_1,\\dots,x_n$.\n",
+ "4. Oblicz funkcję błędu między $y$ a $\\hat{y}$.\n",
+ "5. Zmodyfikuj lekko wagi $(w_i)$ w kierunku spadku funkcji błędu.\n",
+ "6. Jeśli błąd jest duży, idź do 2.\n",
+ "\n",
+ "Modyfikacja wag:\n",
+ "\n",
+ "$$w_i := w_i - x_i (\\hat{y} - y) \\eta$$\n",
+ "\n",
+ "gdzie $\\eta$ to **współczynnik uczenia** _learning rate_.\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "exact-train",
+ "metadata": {},
+ "source": [
+ "## Ewaluacja regresji\n",
+ "\n",
+ "To miary błędu (im mniej, tym lepiej!)}\n",
+ "\n",
+ "### Błąd bezwzględny (Mean Absolute Error, MAE)\n",
+ "\n",
+ "$$\\frac{1}{n}\\sum_{i=1}^n |\\hat{y}_i - y_i| $$\n",
+ "\n",
+ "### Mean Squared Error (MSE)\n",
+ "\n",
+ "$$\\frac{1}{n}\\sum_{i=1}^n (\\hat{y}_i - y_i)^2$$\n",
+ "\n",
+ "### Root Mean Squared Error (RMSE)\n",
+ "\n",
+ "$$\\sqrt{\\frac{1}{n}\\sum_{i=1}^n (\\hat{y}_i - y_i)^2}$$\n",
+ "\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "selective-agriculture",
+ "metadata": {},
+ "source": [
+ "## Regresja liniowa dla tekstu\n",
+ "\n",
+ "Czym jest wektor $\\vec{x} = (x_1,\\dots,x_n)$? Wiemy, np. reprezentacja tf-idf (być z trikiem z haszowaniem, Word2vec etc.).\n",
+ "\n",
+ "![schemat regresji liniowej](08_files/regresja-liniowa-tekst.png)\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "numerous-limitation",
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.9.2"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/wyk/08_files/linregr1.pdf b/wyk/08_files/linregr1.pdf
new file mode 100644
index 0000000..9773332
Binary files /dev/null and b/wyk/08_files/linregr1.pdf differ
diff --git a/wyk/08_files/linregr1.png b/wyk/08_files/linregr1.png
new file mode 100644
index 0000000..e51a1b9
Binary files /dev/null and b/wyk/08_files/linregr1.png differ
diff --git a/wyk/08_files/linregr1.r b/wyk/08_files/linregr1.r
new file mode 100644
index 0000000..b8fb7e0
--- /dev/null
+++ b/wyk/08_files/linregr1.r
@@ -0,0 +1,8 @@
+library(ggplot2)
+
+prices = read.csv("mieszkania.tsv", sep="\t", header=TRUE)
+prices$area = prices$powierzchnia
+prices$price = prices$cena
+pdf("linregr1.pdf")
+ggplot(prices, aes(x=area, y=price)) + geom_point()
+dev.off()
diff --git a/wyk/08_files/linregr2.pdf b/wyk/08_files/linregr2.pdf
new file mode 100644
index 0000000..f912906
Binary files /dev/null and b/wyk/08_files/linregr2.pdf differ
diff --git a/wyk/08_files/linregr2.png b/wyk/08_files/linregr2.png
new file mode 100644
index 0000000..4e65624
Binary files /dev/null and b/wyk/08_files/linregr2.png differ
diff --git a/wyk/08_files/linregr2.r b/wyk/08_files/linregr2.r
new file mode 100644
index 0000000..4ec030c
--- /dev/null
+++ b/wyk/08_files/linregr2.r
@@ -0,0 +1,8 @@
+library(ggplot2)
+
+prices = read.csv("mieszkania.tsv", sep="\t", header=TRUE)
+prices$area = prices$powierzchnia
+prices$price = prices$cena
+pdf("linregr2.pdf")
+ggplot(prices, aes(x=area, y=price)) + geom_point() + stat_smooth(method=lm, se=FALSE)
+dev.off()
diff --git a/wyk/08_files/mieszkania.tsv b/wyk/08_files/mieszkania.tsv
new file mode 100644
index 0000000..e893fea
--- /dev/null
+++ b/wyk/08_files/mieszkania.tsv
@@ -0,0 +1,121 @@
+powierzchnia cena
+53 215000
+60.01 219990
+54 285000
+60 330000
+63 212000
+39 219000
+76.11 399000
+48 119000
+42.19 260000
+53.41 323000
+65.65 555000
+65 185000
+55 247000
+100 280000
+56 224000
+39 230000
+42.3 179000
+49.65 305000
+68 345000
+37 145000
+103 529000
+62.3 209000
+17.65 42000
+45 500000
+36.15 140000
+45 159000
+50 130000
+48 84000
+36 359000
+39.3 116400
+49.48 136950
+26 85000
+72 469000
+64 239000
+55 435000
+90 175903
+90 175903
+90 175903
+127.88 1710000
+59 649000
+48.7 240000
+73 259000
+32.9 275000
+64 170000
+44.72 174408
+68 275000
+38 323000
+35 110000
+63 165000
+25 69000
+50 290000
+76.312 572325
+65 429000
+52.5 499000
+58 145000
+34 95000
+46 280000
+38 120000
+52 269000
+47 105000
+63 266000
+67.79 275000
+60 550000
+107 1230000
+53 228000
+48.65 148000
+39 140000
+23 170000
+35 195000
+71.19 245000
+75 329000
+53 185000
+51 135000
+42 133000
+38 142000
+45.6 470000
+50 194000
+29 158999
+28.8 199000
+36 199000
+57.43 385621
+57.71 402305
+60.12 395000
+38 210000
+56.28 419000
+60 346800
+41 295000
+28.7 219000
+39 275000
+37 105000
+47 330000
+64 435000
+96 151200
+35.34 87000
+101 489000
+50 129000
+49.5 315000
+14 2000
+31 110000
+50.9 265000
+117 129000
+52.2 250000
+28 140000
+15 5000
+41.7 249000
+56.4 490000
+30.9 161000
+42.3 229000
+53 270000
+72.4 409000
+52.9 370000
+37.77 135000
+82 260000
+32 195000
+59 590000
+62.01 205000
+52.5 543000
+56 170000
+67.61 285000
+51 494000
diff --git a/wyk/08_files/morskieoko.jpg b/wyk/08_files/morskieoko.jpg
new file mode 100644
index 0000000..b9b6997
Binary files /dev/null and b/wyk/08_files/morskieoko.jpg differ
diff --git a/wyk/08_files/regresja-liniowa-tekst.png b/wyk/08_files/regresja-liniowa-tekst.png
new file mode 100644
index 0000000..a6fed62
Binary files /dev/null and b/wyk/08_files/regresja-liniowa-tekst.png differ