23 KiB
23 KiB
Ekstrakcja informacji
8. Regresja logistyczna [ćwiczenia]
Jakub Pokrywka (2021)
Regresja logistyczna
import bibliotek
import numpy as np
import gensim
import torch
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.datasets import fetch_20newsgroups
# https://scikit-learn.org/0.19/datasets/twenty_newsgroups.html
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import accuracy_score
/media/kuba/ssdsam/anaconda3/lib/python3.8/site-packages/gensim/similarities/__init__.py:15: UserWarning: The gensim.similarities.levenshtein submodule is disabled, because the optional Levenshtein package <https://pypi.org/project/python-Levenshtein/> is unavailable. Install Levenhstein (e.g. `pip install python-Levenshtein`) to suppress this warning. warnings.warn(msg)
CATEGORIES = ['soc.religion.christian', 'alt.atheism']
newsgroups_train_dev = fetch_20newsgroups(subset = 'train', categories=CATEGORIES)
newsgroups_test = fetch_20newsgroups(subset = 'test', categories=CATEGORIES)
newsgroups_train_dev_text = newsgroups_train_dev['data']
newsgroups_test_text = newsgroups_test['data']
Y_train_dev = newsgroups_train_dev['target']
Y_test = newsgroups_test['target']
newsgroups_train_text, newsgroups_dev_text, Y_train, Y_dev = train_test_split(newsgroups_train_dev_text, Y_train_dev, random_state=42)
Y_names = newsgroups_train_dev['target_names']
Y_names
['alt.atheism', 'soc.religion.christian']
baseline
zadanie (5 minut)
- stworzyć baseline
PYTANIE: co jest nie tak z regresją liniową?
Regresja logistyczna
wektoryzacja
zadanie (5 minut)
- na podstawie newsgroups_train_text stworzyć tfidf wektoryzer ze słownikiem max 10_000
- wygenerować wektory: X_train, X_dev, X_test
model - inicjalizacja
class LogisticRegressionModel(torch.nn.Module):
def __init__(self):
super(LogisticRegressionModel, self).__init__()
self.fc = torch.nn.Linear(FEAUTERES,1)
def forward(self, x):
x = self.fc(x)
x = torch.sigmoid(x)
return x
lr_model = LogisticRegressionModel()
lr_model(torch.Tensor(X_train[0:5].astype(np.float32).todense()))
tensor([[0.4978], [0.5009], [0.4998], [0.4990], [0.5018]], grad_fn=<SigmoidBackward>)
lr_model
LogisticRegressionModel( (fc): Linear(in_features=10000, out_features=1, bias=True) )
list(lr_model.parameters())
[Parameter containing: tensor([[-0.0059, 0.0035, 0.0021, ..., -0.0042, -0.0057, -0.0049]], requires_grad=True), Parameter containing: tensor([-0.0023], requires_grad=True)]
model - trenowanie
BATCH_SIZE = 5
criterion = torch.nn.BCELoss()
optimizer = torch.optim.SGD(lr_model.parameters(), lr = 0.1)
Y_train.shape[0]
809
loss_score = 0
acc_score = 0
items_total = 0
lr_model.train()
for i in range(0, Y_train.shape[0], BATCH_SIZE):
X = X_train[i:i+BATCH_SIZE]
X = torch.tensor(X.astype(np.float32).todense())
Y = Y_train[i:i+BATCH_SIZE]
Y = torch.tensor(Y.astype(np.float32)).reshape(-1,1)
Y_predictions = lr_model(X)
acc_score += torch.sum((Y_predictions > 0.5) == Y).item()
items_total += Y.shape[0]
optimizer.zero_grad()
loss = criterion(Y_predictions, Y)
loss.backward()
optimizer.step()
loss_score += loss.item() * Y.shape[0]
Y_predictions
tensor([[0.5667], [0.5802], [0.5757], [0.5670]], grad_fn=<SigmoidBackward>)
Y
tensor([[0.], [1.], [1.], [0.]])
acc_score
452
items_total
809
print(f'accuracy: {acc_score / items_total}')
accuracy: 0.5587144622991347
print(f'BCE loss: {loss_score / items_total}')
BCE loss: 0.6745463597170355
model - ewaluacja
def get_loss_acc(model, X_dataset, Y_dataset):
loss_score = 0
acc_score = 0
items_total = 0
model.eval()
for i in range(0, Y_dataset.shape[0], BATCH_SIZE):
X = X_dataset[i:i+BATCH_SIZE]
X = torch.tensor(X.astype(np.float32).todense())
Y = Y_dataset[i:i+BATCH_SIZE]
Y = torch.tensor(Y.astype(np.float32)).reshape(-1,1)
Y_predictions = model(X)
acc_score += torch.sum((Y_predictions > 0.5) == Y).item()
items_total += Y.shape[0]
loss = criterion(Y_predictions, Y)
loss_score += loss.item() * Y.shape[0]
return (loss_score / items_total), (acc_score / items_total)
get_loss_acc(lr_model, X_train, Y_train)
(0.6443227143826974, 0.622991347342398)
get_loss_acc(lr_model, X_dev, Y_dev)
(0.6369243131743537, 0.6037037037037037)
get_loss_acc(lr_model, X_test, Y_test)
(0.6323775731785694, 0.6499302649930265)
wagi modelu
list(lr_model.parameters())
[Parameter containing: tensor([[ 0.0314, -0.0375, 0.0131, ..., -0.0057, -0.0008, -0.0089]], requires_grad=True), Parameter containing: tensor([0.0563], requires_grad=True)]
list(lr_model.parameters())[0][0]
tensor([ 0.0314, -0.0375, 0.0131, ..., -0.0057, -0.0008, -0.0089], grad_fn=<SelectBackward>)
torch.topk(list(lr_model.parameters())[0][0], 20)
torch.return_types.topk( values=tensor([0.3753, 0.2305, 0.2007, 0.2006, 0.1993, 0.1952, 0.1930, 0.1898, 0.1831, 0.1731, 0.1649, 0.1647, 0.1543, 0.1320, 0.1314, 0.1303, 0.1296, 0.1261, 0.1245, 0.1243], grad_fn=<TopkBackward>), indices=tensor([8942, 6336, 1852, 9056, 1865, 4039, 7820, 5002, 8208, 1857, 9709, 803, 1046, 130, 4306, 6481, 4370, 4259, 4285, 1855]))
for i in torch.topk(list(lr_model.parameters())[0][0], 20)[1]:
print(vectorizer.get_feature_names()[i])
the of christ to church god rutgers jesus sin christians we and athos 1993 hell our his he heaven christian
torch.topk(list(lr_model.parameters())[0][0], 20, largest = False)
torch.return_types.topk( values=tensor([-0.3478, -0.2578, -0.2455, -0.2347, -0.2330, -0.2265, -0.2205, -0.2050, -0.2044, -0.1979, -0.1876, -0.1790, -0.1747, -0.1745, -0.1734, -0.1647, -0.1639, -0.1617, -0.1601, -0.1592], grad_fn=<TopkBackward>), indices=tensor([5119, 8096, 5420, 4436, 6194, 1627, 6901, 5946, 9970, 3116, 1036, 9906, 5654, 8329, 7869, 1039, 1991, 4926, 5035, 4925]))
for i in torch.topk(list(lr_model.parameters())[0][0], 20, largest = False)[1]:
print(vectorizer.get_feature_names()[i])
keith sgi livesey host nntp caltech posting morality you edu atheism wpd mathew solntze sandvik atheists com islamic jon islam
sieć neuronowa
class NeuralNetworkModel(torch.nn.Module):
def __init__(self):
super(NeuralNetworkModel, self).__init__()
self.fc1 = torch.nn.Linear(FEAUTERES,500)
self.fc2 = torch.nn.Linear(500,1)
def forward(self, x):
x = self.fc1(x)
x = torch.relu(x)
x = self.fc2(x)
x = torch.sigmoid(x)
return x
nn_model = NeuralNetworkModel()
BATCH_SIZE = 5
criterion = torch.nn.BCELoss()
optimizer = torch.optim.SGD(nn_model.parameters(), lr = 0.1)
for epoch in range(5):
loss_score = 0
acc_score = 0
items_total = 0
nn_model.train()
for i in range(0, Y_train.shape[0], BATCH_SIZE):
X = X_train[i:i+BATCH_SIZE]
X = torch.tensor(X.astype(np.float32).todense())
Y = Y_train[i:i+BATCH_SIZE]
Y = torch.tensor(Y.astype(np.float32)).reshape(-1,1)
Y_predictions = nn_model(X)
acc_score += torch.sum((Y_predictions > 0.5) == Y).item()
items_total += Y.shape[0]
optimizer.zero_grad()
loss = criterion(Y_predictions, Y)
loss.backward()
optimizer.step()
loss_score += loss.item() * Y.shape[0]
display(epoch)
display(get_loss_acc(nn_model, X_train, Y_train))
display(get_loss_acc(nn_model, X_dev, Y_dev))
0
(0.6605833534551934, 0.5908529048207664)
(0.6379233609747004, 0.6481481481481481)
1
(0.4341224195120214, 0.896168108776267)
(0.3649017943276299, 0.9074074074074074)
2
(0.18619558424660096, 0.9765142150803461)
(0.16293201995668588, 0.9888888888888889)
3
(0.09108264647580784, 0.9962917181705809)
(0.08985773311858927, 0.9962962962962963)
4
(0.053487053708540566, 0.9987639060568603)
(0.05794332528279887, 1.0)
get_loss_acc(nn_model, X_test, Y_test)
(0.16834938257537793, 0.9428172942817294)
Zadanie domowe
- wybrać jedno z poniższych repozytoriów i je sforkować:
- stworzyć klasyfikator bazujący na prostej sieci neuronowej feed forward w pytorchu (można bazować na tym jupyterze). Zamiast tfidf proszę skorzystać z jakieś reprezentacji gęstej (np. word2vec).
- stworzyć predykcje w plikach dev-0/out.tsv oraz test-A/out.tsv
- wynik accuracy sprawdzony za pomocą narzędzia geval (patrz poprzednie zadanie) powinien wynosić conajmniej 0.67
- proszę umieścić predykcję oraz skrypty generujące (w postaci tekstowej a nie jupyter) w repo, a w MS TEAMS umieścić link do swojego repo termin 25.05, 70 punktów