aitech-eks-pub/cw/08_regresja_logistyczna_ODPOWIEDZI.ipynb
2021-09-27 12:34:44 +02:00

28 KiB

Logo 1

Ekstrakcja informacji

8. Regresja logistyczna [ćwiczenia]

Jakub Pokrywka (2021)

Logo 2

Regresja logistyczna

import bibliotek

import numpy as np
import gensim
import torch
import pandas as pd
from sklearn.model_selection import train_test_split

from sklearn.datasets import fetch_20newsgroups
# https://scikit-learn.org/0.19/datasets/twenty_newsgroups.html

from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import accuracy_score
/media/kuba/ssdsam/anaconda3/lib/python3.8/site-packages/gensim/similarities/__init__.py:15: UserWarning: The gensim.similarities.levenshtein submodule is disabled, because the optional Levenshtein package <https://pypi.org/project/python-Levenshtein/> is unavailable. Install Levenhstein (e.g. `pip install python-Levenshtein`) to suppress this warning.
  warnings.warn(msg)
CATEGORIES = ['soc.religion.christian', 'alt.atheism']
newsgroups_train_dev = fetch_20newsgroups(subset = 'train', categories=CATEGORIES)
newsgroups_test = fetch_20newsgroups(subset = 'test', categories=CATEGORIES)
newsgroups_train_dev_text = newsgroups_train_dev['data']
newsgroups_test_text = newsgroups_test['data']
Y_train_dev = newsgroups_train_dev['target']
Y_test = newsgroups_test['target']
newsgroups_train_text, newsgroups_dev_text, Y_train, Y_dev = train_test_split(newsgroups_train_dev_text, Y_train_dev, random_state=42)
Y_names = newsgroups_train_dev['target_names']
Y_names
['alt.atheism', 'soc.religion.christian']

baseline

Y_train
array([1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1,
       1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0,
       1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0,
       1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1,
       0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1,
       1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0,
       1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0,
       0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,
       1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1,
       1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0,
       0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0,
       0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0,
       0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0,
       1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0,
       1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1,
       1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1,
       0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1,
       1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0,
       0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0,
       1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1,
       0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1,
       0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1,
       0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1,
       0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0,
       0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0,
       1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0,
       0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1,
       0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1,
       1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0,
       1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0,
       1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0,
       0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1,
       1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1,
       1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1,
       1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1,
       0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1,
       1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0])
pd.value_counts(Y_train)
1    450
0    359
dtype: int64

train

accuracy_score(np.ones_like(Y_train) * 1, Y_train)
0.5562422744128553

dev

accuracy_score(np.ones_like(Y_dev) * 1, Y_dev)
0.5518518518518518

test

accuracy_score(np.ones_like(Y_test) * 1, Y_test)
0.5550906555090656

PYTANIE: co jest nie tak z regresją liniową?

Regresja logistyczna

wektoryzacja

FEAUTERES = 10_000
vectorizer = TfidfVectorizer(max_features=10_000)
X_train = vectorizer.fit_transform(newsgroups_train_text)
X_dev = vectorizer.transform(newsgroups_dev_text)
X_test = vectorizer.transform(newsgroups_test_text)
X_test
<717x10000 sparse matrix of type '<class 'numpy.float64'>'
	with 120739 stored elements in Compressed Sparse Row format>

model - inicjalizacja

class LogisticRegressionModel(torch.nn.Module):

    def __init__(self):
        super(LogisticRegressionModel, self).__init__()
        self.fc = torch.nn.Linear(FEAUTERES,1)

    def forward(self, x):
        x = self.fc(x)
        x = torch.sigmoid(x)
        return x
lr_model = LogisticRegressionModel()
lr_model(torch.Tensor(X_train[0:5].astype(np.float32).todense()))
tensor([[0.4989],
        [0.4985],
        [0.4970],
        [0.4968],
        [0.5007]], grad_fn=<SigmoidBackward>)
lr_model
LogisticRegressionModel(
  (fc): Linear(in_features=10000, out_features=1, bias=True)
)
list(lr_model.parameters())
[Parameter containing:
 tensor([[ 0.0006, -0.0076,  0.0002,  ...,  0.0051,  0.0034, -0.0004]],
        requires_grad=True),
 Parameter containing:
 tensor([-0.0099], requires_grad=True)]

model - trenowanie

BATCH_SIZE = 5
criterion = torch.nn.BCELoss()
optimizer = torch.optim.SGD(lr_model.parameters(), lr = 0.1)
Y_train.shape[0]
809
loss_score = 0
acc_score = 0
items_total = 0
lr_model.train()
for i in range(0, Y_train.shape[0], BATCH_SIZE):
    X = X_train[i:i+BATCH_SIZE]
    X = torch.tensor(X.astype(np.float32).todense())
    Y = Y_train[i:i+BATCH_SIZE]
    Y = torch.tensor(Y.astype(np.float32)).reshape(-1,1)
    Y_predictions = lr_model(X)
    acc_score += torch.sum((Y_predictions > 0.5) == Y).item()
    items_total += Y.shape[0] 
    
    optimizer.zero_grad()
    loss = criterion(Y_predictions, Y)
    loss.backward()
    optimizer.step()
    

    loss_score += loss.item() * Y.shape[0] 
Y_predictions
tensor([[0.5657],
        [0.5827],
        [0.5727],
        [0.5672]], grad_fn=<SigmoidBackward>)
Y
tensor([[0.],
        [1.],
        [1.],
        [0.]])
acc_score
453
items_total
809
print(f'accuracy: {acc_score / items_total}')
accuracy: 0.5599505562422744
print(f'BCE loss: {loss_score / items_total}')
BCE loss: 0.6745760098965412

model - ewaluacja

def get_loss_acc(model, X_dataset, Y_dataset):
    loss_score = 0
    acc_score = 0
    items_total = 0
    model.eval()
    for i in range(0, Y_dataset.shape[0], BATCH_SIZE):
        X = X_dataset[i:i+BATCH_SIZE]
        X = torch.tensor(X.astype(np.float32).todense())
        Y = Y_dataset[i:i+BATCH_SIZE]
        Y = torch.tensor(Y.astype(np.float32)).reshape(-1,1)
        Y_predictions = model(X)
        acc_score += torch.sum((Y_predictions > 0.5) == Y).item()
        items_total += Y.shape[0] 

        loss = criterion(Y_predictions, Y)

        loss_score += loss.item() * Y.shape[0] 
    return (loss_score / items_total), (acc_score / items_total)
get_loss_acc(lr_model, X_train, Y_train)
(0.6443268107837445, 0.6254635352286774)
get_loss_acc(lr_model, X_dev, Y_dev)
(0.6371536641209213, 0.6074074074074074)
get_loss_acc(lr_model, X_test, Y_test)
(0.6322633745447529, 0.6485355648535565)

wagi modelu

list(lr_model.parameters())
[Parameter containing:
 tensor([[ 0.0379, -0.0485,  0.0113,  ...,  0.0035,  0.0083, -0.0044]],
        requires_grad=True),
 Parameter containing:
 tensor([0.0556], requires_grad=True)]
list(lr_model.parameters())[0][0]
tensor([ 0.0379, -0.0485,  0.0113,  ...,  0.0035,  0.0083, -0.0044],
       grad_fn=<SelectBackward>)
torch.topk(list(lr_model.parameters())[0][0], 20)
torch.return_types.topk(
values=tensor([0.3804, 0.2315, 0.2033, 0.2026, 0.2014, 0.1993, 0.1942, 0.1890, 0.1868,
        0.1818, 0.1727, 0.1542, 0.1474, 0.1458, 0.1360, 0.1359, 0.1260, 0.1204,
        0.1184, 0.1174], grad_fn=<TopkBackward>),
indices=tensor([8942, 6336, 1865, 1852, 8208, 9056, 7820, 4039, 5002, 1857, 9709,  803,
         130, 1046, 4370, 4259, 4306, 1855, 4285, 6481]))
for i in torch.topk(list(lr_model.parameters())[0][0], 20)[1]:
    print(vectorizer.get_feature_names()[i])
the
of
church
christ
sin
to
rutgers
god
jesus
christians
we
and
1993
athos
his
he
hell
christian
heaven
our
torch.topk(list(lr_model.parameters())[0][0], 20, largest = False)
torch.return_types.topk(
values=tensor([-0.3464, -0.2578, -0.2372, -0.2307, -0.2300, -0.2259, -0.2227, -0.2107,
        -0.2054, -0.1949, -0.1919, -0.1767, -0.1767, -0.1749, -0.1747, -0.1739,
        -0.1715, -0.1633, -0.1567, -0.1562], grad_fn=<TopkBackward>),
indices=tensor([5119, 8096, 5420, 1627, 6194, 6901, 4436, 9970, 5946, 3116, 1036, 9906,
        7869, 5654, 1991, 8329, 4925, 4926, 6373, 1039]))
for i in torch.topk(list(lr_model.parameters())[0][0], 20, largest = False)[1]:
    print(vectorizer.get_feature_names()[i])
keith
sgi
livesey
caltech
nntp
posting
host
you
morality
edu
atheism
wpd
sandvik
mathew
com
solntze
islam
islamic
okcforum
atheists

sieć neuronowa

class NeuralNetworkModel(torch.nn.Module):

    def __init__(self):
        super(NeuralNetworkModel, self).__init__()
        self.fc1 = torch.nn.Linear(FEAUTERES,500)
        self.fc2 = torch.nn.Linear(500,1)

    def forward(self, x):
        x = self.fc1(x)
        x = torch.relu(x)
        x = self.fc2(x)
        x = torch.sigmoid(x)
        return x
nn_model = NeuralNetworkModel()
BATCH_SIZE = 5
criterion = torch.nn.BCELoss()
optimizer = torch.optim.SGD(nn_model.parameters(), lr = 0.1)
for epoch in range(5):
    loss_score = 0
    acc_score = 0
    items_total = 0
    nn_model.train()
    for i in range(0, Y_train.shape[0], BATCH_SIZE):
        X = X_train[i:i+BATCH_SIZE]
        X = torch.tensor(X.astype(np.float32).todense())
        Y = Y_train[i:i+BATCH_SIZE]
        Y = torch.tensor(Y.astype(np.float32)).reshape(-1,1)
        Y_predictions = nn_model(X)
        acc_score += torch.sum((Y_predictions > 0.5) == Y).item()
        items_total += Y.shape[0] 

        optimizer.zero_grad()
        loss = criterion(Y_predictions, Y)
        loss.backward()
        optimizer.step()


        loss_score += loss.item() * Y.shape[0] 

    display(epoch)
    display(get_loss_acc(nn_model, X_train, Y_train))
    display(get_loss_acc(nn_model, X_dev, Y_dev))
0
(0.6734723948651398, 0.5636588380716935)
(0.6606645694485417, 0.5777777777777777)
1
(0.5035873688342987, 0.8677379480840544)
(0.43131878033832266, 0.8851851851851852)
2
(0.22238253315332793, 0.9678615574783683)
(0.18925935278336206, 0.9814814814814815)
3
(0.10367853983509158, 0.9913473423980222)
(0.09969225936327819, 0.9962962962962963)
4
(0.0588170926504491, 0.9987639060568603)
(0.06267384567332489, 1.0)
get_loss_acc(nn_model, X_test, Y_test)
(0.17201613383874234, 0.9414225941422594)

Zadanie domowe

  • wybrać jedno z poniższych repozytoriów i je sforkować:
  • stworzyć klasyfikator bazujący na prostej sieci neuronowej feed forward w pytorchu (można bazować na tym jupyterze). Zamiast tfidf proszę skorzystać z jakieś reprezentacji gęstej (np. word2vec).
  • stworzyć predykcje w plikach dev-0/out.tsv oraz test-A/out.tsv
  • wynik accuracy sprawdzony za pomocą narzędzia geval (patrz poprzednie zadanie) powinien wynosić conajmniej 0.67
  • proszę umieścić predykcję oraz skrypty generujące (w postaci tekstowej a nie jupyter) w repo, a w MS TEAMS umieścić link do swojego repo termin 25.05, 70 punktów