aitech-eks-pub/cw/08_regresja_logistyczna_ODPOWIEDZI.ipynb

1260 lines
28 KiB
Plaintext
Raw Normal View History

2021-05-04 23:02:28 +02:00
{
"cells": [
2021-09-27 12:34:44 +02:00
{
"cell_type": "markdown",
"metadata": {},
"source": [
"![Logo 1](https://git.wmi.amu.edu.pl/AITech/Szablon/raw/branch/master/Logotyp_AITech1.jpg)\n",
"<div class=\"alert alert-block alert-info\">\n",
"<h1> Ekstrakcja informacji </h1>\n",
"<h2> 8. <i>Regresja logistyczna</i> [ćwiczenia]</h2> \n",
"<h3> Jakub Pokrywka (2021)</h3>\n",
"</div>\n",
"\n",
"![Logo 2](https://git.wmi.amu.edu.pl/AITech/Szablon/raw/branch/master/Logotyp_AITech2.jpg)"
]
},
2021-05-04 23:02:28 +02:00
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Regresja logistyczna"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## import bibliotek"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {
"scrolled": true
},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"/media/kuba/ssdsam/anaconda3/lib/python3.8/site-packages/gensim/similarities/__init__.py:15: UserWarning: The gensim.similarities.levenshtein submodule is disabled, because the optional Levenshtein package <https://pypi.org/project/python-Levenshtein/> is unavailable. Install Levenhstein (e.g. `pip install python-Levenshtein`) to suppress this warning.\n",
" warnings.warn(msg)\n"
]
}
],
"source": [
"import numpy as np\n",
"import gensim\n",
"import torch\n",
"import pandas as pd\n",
"from sklearn.model_selection import train_test_split\n",
"\n",
"from sklearn.datasets import fetch_20newsgroups\n",
"# https://scikit-learn.org/0.19/datasets/twenty_newsgroups.html\n",
"\n",
"from sklearn.feature_extraction.text import TfidfVectorizer\n",
"from sklearn.metrics import accuracy_score"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"CATEGORIES = ['soc.religion.christian', 'alt.atheism']"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"newsgroups_train_dev = fetch_20newsgroups(subset = 'train', categories=CATEGORIES)\n",
"newsgroups_test = fetch_20newsgroups(subset = 'test', categories=CATEGORIES)"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [],
"source": [
"newsgroups_train_dev_text = newsgroups_train_dev['data']\n",
"newsgroups_test_text = newsgroups_test['data']"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [],
"source": [
"Y_train_dev = newsgroups_train_dev['target']\n",
"Y_test = newsgroups_test['target']"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [],
"source": [
"newsgroups_train_text, newsgroups_dev_text, Y_train, Y_dev = train_test_split(newsgroups_train_dev_text, Y_train_dev, random_state=42)"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [],
"source": [
"Y_names = newsgroups_train_dev['target_names']"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"['alt.atheism', 'soc.religion.christian']"
]
},
"execution_count": 8,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"Y_names"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## baseline"
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"array([1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1,\n",
" 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0,\n",
" 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0,\n",
" 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1,\n",
" 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1,\n",
" 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0,\n",
" 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0,\n",
" 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,\n",
" 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1,\n",
" 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0,\n",
" 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0,\n",
" 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0,\n",
" 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0,\n",
" 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0,\n",
" 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1,\n",
" 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1,\n",
" 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1,\n",
" 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0,\n",
" 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0,\n",
" 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1,\n",
" 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1,\n",
" 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1,\n",
" 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1,\n",
" 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0,\n",
" 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0,\n",
" 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0,\n",
" 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1,\n",
" 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1,\n",
" 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0,\n",
" 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0,\n",
" 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0,\n",
" 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1,\n",
" 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1,\n",
" 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1,\n",
" 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1,\n",
" 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1,\n",
" 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0])"
]
},
"execution_count": 9,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"Y_train"
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"1 450\n",
"0 359\n",
"dtype: int64"
]
},
"execution_count": 10,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"pd.value_counts(Y_train)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### train"
]
},
{
"cell_type": "code",
"execution_count": 11,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"0.5562422744128553"
]
},
"execution_count": 11,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"accuracy_score(np.ones_like(Y_train) * 1, Y_train)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### dev"
]
},
{
"cell_type": "code",
"execution_count": 12,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"0.5518518518518518"
]
},
"execution_count": 12,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"accuracy_score(np.ones_like(Y_dev) * 1, Y_dev)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### test"
]
},
{
"cell_type": "code",
"execution_count": 13,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"0.5550906555090656"
]
},
"execution_count": 13,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"accuracy_score(np.ones_like(Y_test) * 1, Y_test)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### PYTANIE: co jest nie tak z regresją liniową?"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Regresja logistyczna"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### wektoryzacja"
]
},
{
"cell_type": "code",
"execution_count": 14,
"metadata": {},
"outputs": [],
"source": [
"FEAUTERES = 10_000"
]
},
{
"cell_type": "code",
"execution_count": 15,
"metadata": {},
"outputs": [],
"source": [
"vectorizer = TfidfVectorizer(max_features=10_000)"
]
},
{
"cell_type": "code",
"execution_count": 16,
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"X_train = vectorizer.fit_transform(newsgroups_train_text)\n",
"X_dev = vectorizer.transform(newsgroups_dev_text)\n",
"X_test = vectorizer.transform(newsgroups_test_text)"
]
},
{
"cell_type": "code",
"execution_count": 17,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"<717x10000 sparse matrix of type '<class 'numpy.float64'>'\n",
"\twith 120739 stored elements in Compressed Sparse Row format>"
]
},
"execution_count": 17,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"X_test"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### model - inicjalizacja "
]
},
{
"cell_type": "code",
"execution_count": 18,
"metadata": {},
"outputs": [],
"source": [
"class LogisticRegressionModel(torch.nn.Module):\n",
"\n",
" def __init__(self):\n",
" super(LogisticRegressionModel, self).__init__()\n",
" self.fc = torch.nn.Linear(FEAUTERES,1)\n",
"\n",
" def forward(self, x):\n",
" x = self.fc(x)\n",
" x = torch.sigmoid(x)\n",
" return x"
]
},
{
"cell_type": "code",
"execution_count": 19,
"metadata": {},
"outputs": [],
"source": [
"lr_model = LogisticRegressionModel()"
]
},
{
"cell_type": "code",
"execution_count": 20,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
2021-05-10 10:55:55 +02:00
"tensor([[0.4989],\n",
" [0.4985],\n",
" [0.4970],\n",
" [0.4968],\n",
" [0.5007]], grad_fn=<SigmoidBackward>)"
2021-05-04 23:02:28 +02:00
]
},
"execution_count": 20,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"lr_model(torch.Tensor(X_train[0:5].astype(np.float32).todense()))"
]
},
{
"cell_type": "code",
"execution_count": 21,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"LogisticRegressionModel(\n",
" (fc): Linear(in_features=10000, out_features=1, bias=True)\n",
")"
]
},
"execution_count": 21,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"lr_model"
]
},
{
"cell_type": "code",
"execution_count": 22,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"[Parameter containing:\n",
2021-05-10 10:55:55 +02:00
" tensor([[ 0.0006, -0.0076, 0.0002, ..., 0.0051, 0.0034, -0.0004]],\n",
2021-05-04 23:02:28 +02:00
" requires_grad=True),\n",
" Parameter containing:\n",
2021-05-10 10:55:55 +02:00
" tensor([-0.0099], requires_grad=True)]"
2021-05-04 23:02:28 +02:00
]
},
"execution_count": 22,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"list(lr_model.parameters())"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## model - trenowanie"
]
},
{
"cell_type": "code",
"execution_count": 23,
"metadata": {},
"outputs": [],
"source": [
"BATCH_SIZE = 5"
]
},
{
"cell_type": "code",
"execution_count": 24,
"metadata": {},
"outputs": [],
"source": [
"criterion = torch.nn.BCELoss()"
]
},
{
"cell_type": "code",
"execution_count": 25,
"metadata": {},
"outputs": [],
"source": [
"optimizer = torch.optim.SGD(lr_model.parameters(), lr = 0.1)"
]
},
{
"cell_type": "code",
"execution_count": 26,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"809"
]
},
"execution_count": 26,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"Y_train.shape[0]"
]
},
{
"cell_type": "code",
"execution_count": 27,
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"loss_score = 0\n",
"acc_score = 0\n",
"items_total = 0\n",
"lr_model.train()\n",
"for i in range(0, Y_train.shape[0], BATCH_SIZE):\n",
" X = X_train[i:i+BATCH_SIZE]\n",
" X = torch.tensor(X.astype(np.float32).todense())\n",
" Y = Y_train[i:i+BATCH_SIZE]\n",
" Y = torch.tensor(Y.astype(np.float32)).reshape(-1,1)\n",
" Y_predictions = lr_model(X)\n",
" acc_score += torch.sum((Y_predictions > 0.5) == Y).item()\n",
" items_total += Y.shape[0] \n",
" \n",
" optimizer.zero_grad()\n",
" loss = criterion(Y_predictions, Y)\n",
" loss.backward()\n",
" optimizer.step()\n",
" \n",
"\n",
" loss_score += loss.item() * Y.shape[0] "
]
},
{
"cell_type": "code",
"execution_count": 28,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
2021-05-10 10:55:55 +02:00
"tensor([[0.5657],\n",
" [0.5827],\n",
" [0.5727],\n",
" [0.5672]], grad_fn=<SigmoidBackward>)"
2021-05-04 23:02:28 +02:00
]
},
"execution_count": 28,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"Y_predictions"
]
},
{
"cell_type": "code",
"execution_count": 29,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"tensor([[0.],\n",
" [1.],\n",
" [1.],\n",
" [0.]])"
]
},
"execution_count": 29,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"Y"
]
},
{
"cell_type": "code",
"execution_count": 30,
"metadata": {
"scrolled": true
},
"outputs": [
{
"data": {
"text/plain": [
2021-05-10 10:55:55 +02:00
"453"
2021-05-04 23:02:28 +02:00
]
},
"execution_count": 30,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"acc_score"
]
},
{
"cell_type": "code",
"execution_count": 31,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"809"
]
},
"execution_count": 31,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"items_total"
]
},
{
"cell_type": "code",
"execution_count": 32,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
2021-05-10 10:55:55 +02:00
"accuracy: 0.5599505562422744\n"
2021-05-04 23:02:28 +02:00
]
}
],
"source": [
"print(f'accuracy: {acc_score / items_total}')"
]
},
{
"cell_type": "code",
"execution_count": 33,
"metadata": {
"scrolled": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
2021-05-10 10:55:55 +02:00
"BCE loss: 0.6745760098965412\n"
2021-05-04 23:02:28 +02:00
]
}
],
"source": [
"print(f'BCE loss: {loss_score / items_total}')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### model - ewaluacja"
]
},
{
"cell_type": "code",
"execution_count": 34,
"metadata": {},
"outputs": [],
"source": [
"def get_loss_acc(model, X_dataset, Y_dataset):\n",
" loss_score = 0\n",
" acc_score = 0\n",
" items_total = 0\n",
" model.eval()\n",
" for i in range(0, Y_dataset.shape[0], BATCH_SIZE):\n",
" X = X_dataset[i:i+BATCH_SIZE]\n",
" X = torch.tensor(X.astype(np.float32).todense())\n",
" Y = Y_dataset[i:i+BATCH_SIZE]\n",
" Y = torch.tensor(Y.astype(np.float32)).reshape(-1,1)\n",
" Y_predictions = model(X)\n",
" acc_score += torch.sum((Y_predictions > 0.5) == Y).item()\n",
" items_total += Y.shape[0] \n",
"\n",
" loss = criterion(Y_predictions, Y)\n",
"\n",
" loss_score += loss.item() * Y.shape[0] \n",
" return (loss_score / items_total), (acc_score / items_total)"
]
},
{
"cell_type": "code",
"execution_count": 35,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
2021-05-10 10:55:55 +02:00
"(0.6443268107837445, 0.6254635352286774)"
2021-05-04 23:02:28 +02:00
]
},
"execution_count": 35,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"get_loss_acc(lr_model, X_train, Y_train)"
]
},
{
"cell_type": "code",
"execution_count": 36,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
2021-05-10 10:55:55 +02:00
"(0.6371536641209213, 0.6074074074074074)"
2021-05-04 23:02:28 +02:00
]
},
"execution_count": 36,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"get_loss_acc(lr_model, X_dev, Y_dev)"
]
},
{
"cell_type": "code",
"execution_count": 37,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
2021-05-10 10:55:55 +02:00
"(0.6322633745447529, 0.6485355648535565)"
2021-05-04 23:02:28 +02:00
]
},
"execution_count": 37,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"get_loss_acc(lr_model, X_test, Y_test)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### wagi modelu"
]
},
{
"cell_type": "code",
"execution_count": 38,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"[Parameter containing:\n",
2021-05-10 10:55:55 +02:00
" tensor([[ 0.0379, -0.0485, 0.0113, ..., 0.0035, 0.0083, -0.0044]],\n",
2021-05-04 23:02:28 +02:00
" requires_grad=True),\n",
" Parameter containing:\n",
2021-05-10 10:55:55 +02:00
" tensor([0.0556], requires_grad=True)]"
2021-05-04 23:02:28 +02:00
]
},
"execution_count": 38,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"list(lr_model.parameters())"
]
},
{
"cell_type": "code",
"execution_count": 39,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
2021-05-10 10:55:55 +02:00
"tensor([ 0.0379, -0.0485, 0.0113, ..., 0.0035, 0.0083, -0.0044],\n",
2021-05-04 23:02:28 +02:00
" grad_fn=<SelectBackward>)"
]
},
"execution_count": 39,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"list(lr_model.parameters())[0][0]"
]
},
{
"cell_type": "code",
"execution_count": 40,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"torch.return_types.topk(\n",
2021-05-10 10:55:55 +02:00
"values=tensor([0.3804, 0.2315, 0.2033, 0.2026, 0.2014, 0.1993, 0.1942, 0.1890, 0.1868,\n",
" 0.1818, 0.1727, 0.1542, 0.1474, 0.1458, 0.1360, 0.1359, 0.1260, 0.1204,\n",
" 0.1184, 0.1174], grad_fn=<TopkBackward>),\n",
"indices=tensor([8942, 6336, 1865, 1852, 8208, 9056, 7820, 4039, 5002, 1857, 9709, 803,\n",
" 130, 1046, 4370, 4259, 4306, 1855, 4285, 6481]))"
2021-05-04 23:02:28 +02:00
]
},
"execution_count": 40,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"torch.topk(list(lr_model.parameters())[0][0], 20)"
]
},
{
"cell_type": "code",
"execution_count": 41,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"the\n",
"of\n",
2021-05-10 10:55:55 +02:00
"church\n",
2021-05-04 23:02:28 +02:00
"christ\n",
2021-05-10 10:55:55 +02:00
"sin\n",
2021-05-04 23:02:28 +02:00
"to\n",
"rutgers\n",
2021-05-10 10:55:55 +02:00
"god\n",
2021-05-04 23:02:28 +02:00
"jesus\n",
"christians\n",
"we\n",
"and\n",
"1993\n",
2021-05-10 10:55:55 +02:00
"athos\n",
2021-05-04 23:02:28 +02:00
"his\n",
"he\n",
2021-05-10 10:55:55 +02:00
"hell\n",
"christian\n",
2021-05-04 23:02:28 +02:00
"heaven\n",
2021-05-10 10:55:55 +02:00
"our\n"
2021-05-04 23:02:28 +02:00
]
}
],
"source": [
"for i in torch.topk(list(lr_model.parameters())[0][0], 20)[1]:\n",
" print(vectorizer.get_feature_names()[i])"
]
},
{
"cell_type": "code",
"execution_count": 42,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"torch.return_types.topk(\n",
2021-05-10 10:55:55 +02:00
"values=tensor([-0.3464, -0.2578, -0.2372, -0.2307, -0.2300, -0.2259, -0.2227, -0.2107,\n",
" -0.2054, -0.1949, -0.1919, -0.1767, -0.1767, -0.1749, -0.1747, -0.1739,\n",
" -0.1715, -0.1633, -0.1567, -0.1562], grad_fn=<TopkBackward>),\n",
"indices=tensor([5119, 8096, 5420, 1627, 6194, 6901, 4436, 9970, 5946, 3116, 1036, 9906,\n",
" 7869, 5654, 1991, 8329, 4925, 4926, 6373, 1039]))"
2021-05-04 23:02:28 +02:00
]
},
"execution_count": 42,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"torch.topk(list(lr_model.parameters())[0][0], 20, largest = False)"
]
},
{
"cell_type": "code",
"execution_count": 43,
"metadata": {
"scrolled": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"keith\n",
"sgi\n",
"livesey\n",
"caltech\n",
2021-05-10 10:55:55 +02:00
"nntp\n",
2021-05-04 23:02:28 +02:00
"posting\n",
2021-05-10 10:55:55 +02:00
"host\n",
2021-05-04 23:02:28 +02:00
"you\n",
2021-05-10 10:55:55 +02:00
"morality\n",
2021-05-04 23:02:28 +02:00
"edu\n",
"atheism\n",
"wpd\n",
"sandvik\n",
2021-05-10 10:55:55 +02:00
"mathew\n",
2021-05-04 23:02:28 +02:00
"com\n",
2021-05-10 10:55:55 +02:00
"solntze\n",
"islam\n",
2021-05-04 23:02:28 +02:00
"islamic\n",
2021-05-10 10:55:55 +02:00
"okcforum\n",
"atheists\n"
2021-05-04 23:02:28 +02:00
]
}
],
"source": [
"for i in torch.topk(list(lr_model.parameters())[0][0], 20, largest = False)[1]:\n",
" print(vectorizer.get_feature_names()[i])"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### sieć neuronowa"
]
},
{
"cell_type": "code",
"execution_count": 44,
"metadata": {},
"outputs": [],
"source": [
"class NeuralNetworkModel(torch.nn.Module):\n",
"\n",
" def __init__(self):\n",
" super(NeuralNetworkModel, self).__init__()\n",
" self.fc1 = torch.nn.Linear(FEAUTERES,500)\n",
" self.fc2 = torch.nn.Linear(500,1)\n",
"\n",
" def forward(self, x):\n",
" x = self.fc1(x)\n",
2021-05-10 10:55:55 +02:00
" x = torch.relu(x)\n",
2021-05-04 23:02:28 +02:00
" x = self.fc2(x)\n",
" x = torch.sigmoid(x)\n",
" return x"
]
},
{
"cell_type": "code",
"execution_count": 45,
"metadata": {},
"outputs": [],
"source": [
"nn_model = NeuralNetworkModel()"
]
},
{
"cell_type": "code",
"execution_count": 46,
"metadata": {},
"outputs": [],
"source": [
"BATCH_SIZE = 5"
]
},
{
"cell_type": "code",
"execution_count": 47,
"metadata": {},
"outputs": [],
"source": [
"criterion = torch.nn.BCELoss()"
]
},
{
"cell_type": "code",
"execution_count": 48,
"metadata": {},
"outputs": [],
"source": [
"optimizer = torch.optim.SGD(nn_model.parameters(), lr = 0.1)"
]
},
{
"cell_type": "code",
"execution_count": 49,
"metadata": {
"scrolled": true
},
"outputs": [
{
"data": {
"text/plain": [
"0"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/plain": [
2021-05-10 10:55:55 +02:00
"(0.6734723948651398, 0.5636588380716935)"
2021-05-04 23:02:28 +02:00
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/plain": [
2021-05-10 10:55:55 +02:00
"(0.6606645694485417, 0.5777777777777777)"
2021-05-04 23:02:28 +02:00
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/plain": [
"1"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/plain": [
2021-05-10 10:55:55 +02:00
"(0.5035873688342987, 0.8677379480840544)"
2021-05-04 23:02:28 +02:00
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/plain": [
2021-05-10 10:55:55 +02:00
"(0.43131878033832266, 0.8851851851851852)"
2021-05-04 23:02:28 +02:00
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/plain": [
"2"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/plain": [
2021-05-10 10:55:55 +02:00
"(0.22238253315332793, 0.9678615574783683)"
2021-05-04 23:02:28 +02:00
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/plain": [
2021-05-10 10:55:55 +02:00
"(0.18925935278336206, 0.9814814814814815)"
2021-05-04 23:02:28 +02:00
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/plain": [
"3"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/plain": [
2021-05-10 10:55:55 +02:00
"(0.10367853983509158, 0.9913473423980222)"
2021-05-04 23:02:28 +02:00
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/plain": [
2021-05-10 10:55:55 +02:00
"(0.09969225936327819, 0.9962962962962963)"
2021-05-04 23:02:28 +02:00
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/plain": [
"4"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/plain": [
2021-05-10 10:55:55 +02:00
"(0.0588170926504491, 0.9987639060568603)"
2021-05-04 23:02:28 +02:00
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/plain": [
2021-05-10 10:55:55 +02:00
"(0.06267384567332489, 1.0)"
2021-05-04 23:02:28 +02:00
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"for epoch in range(5):\n",
" loss_score = 0\n",
" acc_score = 0\n",
" items_total = 0\n",
" nn_model.train()\n",
" for i in range(0, Y_train.shape[0], BATCH_SIZE):\n",
" X = X_train[i:i+BATCH_SIZE]\n",
" X = torch.tensor(X.astype(np.float32).todense())\n",
" Y = Y_train[i:i+BATCH_SIZE]\n",
" Y = torch.tensor(Y.astype(np.float32)).reshape(-1,1)\n",
" Y_predictions = nn_model(X)\n",
" acc_score += torch.sum((Y_predictions > 0.5) == Y).item()\n",
" items_total += Y.shape[0] \n",
"\n",
" optimizer.zero_grad()\n",
" loss = criterion(Y_predictions, Y)\n",
" loss.backward()\n",
" optimizer.step()\n",
"\n",
"\n",
" loss_score += loss.item() * Y.shape[0] \n",
"\n",
" display(epoch)\n",
" display(get_loss_acc(nn_model, X_train, Y_train))\n",
" display(get_loss_acc(nn_model, X_dev, Y_dev))"
]
},
{
"cell_type": "code",
"execution_count": 50,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
2021-05-10 10:55:55 +02:00
"(0.17201613383874234, 0.9414225941422594)"
2021-05-04 23:02:28 +02:00
]
},
"execution_count": 50,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"get_loss_acc(nn_model, X_test, Y_test)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Zadanie domowe\n",
"\n",
"- wybrać jedno z poniższych repozytoriów i je sforkować:\n",
" - https://git.wmi.amu.edu.pl/kubapok/paranormal-or-skeptic-ISI-public\n",
" - https://git.wmi.amu.edu.pl/kubapok/sport-text-classification-ball-ISI-public\n",
"- stworzyć klasyfikator bazujący na prostej sieci neuronowej feed forward w pytorchu (można bazować na tym jupyterze). Zamiast tfidf proszę skorzystać z jakieś reprezentacji gęstej (np. word2vec).\n",
"- stworzyć predykcje w plikach dev-0/out.tsv oraz test-A/out.tsv\n",
"- wynik accuracy sprawdzony za pomocą narzędzia geval (patrz poprzednie zadanie) powinien wynosić conajmniej 0.67\n",
"- proszę umieścić predykcję oraz skrypty generujące (w postaci tekstowej a nie jupyter) w repo, a w MS TEAMS umieścić link do swojego repo\n",
"termin 25.05, 70 punktów\n"
]
}
],
"metadata": {
2021-09-27 12:34:44 +02:00
"author": "Jakub Pokrywka",
"email": "kubapok@wmi.amu.edu.pl",
2021-05-04 23:02:28 +02:00
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
2021-09-27 12:34:44 +02:00
"lang": "pl",
2021-05-04 23:02:28 +02:00
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
2021-09-27 12:34:44 +02:00
"version": "3.8.3"
},
"subtitle": "8.Regresja logistyczna[ćwiczenia]",
"title": "Ekstrakcja informacji",
"year": "2021"
2021-05-04 23:02:28 +02:00
},
"nbformat": 4,
"nbformat_minor": 4
}