28 KiB
28 KiB
Ekstrakcja informacji
8. Regresja logistyczna [ćwiczenia]
Jakub Pokrywka (2021)
Regresja logistyczna
import bibliotek
import numpy as np
import torch
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.datasets import fetch_20newsgroups
# https://scikit-learn.org/0.19/datasets/twenty_newsgroups.html
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import accuracy_score
CATEGORIES = ['soc.religion.christian', 'alt.atheism']
newsgroups_train_dev = fetch_20newsgroups(subset = 'train', categories=CATEGORIES)
newsgroups_test = fetch_20newsgroups(subset = 'test', categories=CATEGORIES)
newsgroups_train_dev_text = newsgroups_train_dev['data']
newsgroups_test_text = newsgroups_test['data']
Y_train_dev = newsgroups_train_dev['target']
Y_test = newsgroups_test['target']
newsgroups_train_text, newsgroups_dev_text, Y_train, Y_dev = train_test_split(newsgroups_train_dev_text, Y_train_dev, random_state=42)
Y_names = newsgroups_train_dev['target_names']
Y_names
['alt.atheism', 'soc.religion.christian']
baseline
Y_train
array([1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0])
pd.value_counts(Y_train)
1 450 0 359 dtype: int64
train
accuracy_score(np.ones_like(Y_train) * 1, Y_train)
0.5562422744128553
dev
accuracy_score(np.ones_like(Y_dev) * 1, Y_dev)
0.5518518518518518
test
accuracy_score(np.ones_like(Y_test) * 1, Y_test)
0.5550906555090656
PYTANIE: co jest nie tak z regresją liniową?
Regresja logistyczna
wektoryzacja
FEAUTERES = 10_000
vectorizer = TfidfVectorizer(max_features=10_000)
X_train = vectorizer.fit_transform(newsgroups_train_text)
X_dev = vectorizer.transform(newsgroups_dev_text)
X_test = vectorizer.transform(newsgroups_test_text)
X_test
<717x10000 sparse matrix of type '<class 'numpy.float64'>' with 120739 stored elements in Compressed Sparse Row format>
model - inicjalizacja
class LogisticRegressionModel(torch.nn.Module):
def __init__(self):
super(LogisticRegressionModel, self).__init__()
self.fc = torch.nn.Linear(FEAUTERES,1)
def forward(self, x):
x = self.fc(x)
x = torch.sigmoid(x)
return x
lr_model = LogisticRegressionModel()
lr_model(torch.Tensor(X_train[0:5].astype(np.float32).todense()))
tensor([[0.4983], [0.4978], [0.5004], [0.4991], [0.5014]], grad_fn=<SigmoidBackward0>)
lr_model
LogisticRegressionModel( (fc): Linear(in_features=10000, out_features=1, bias=True) )
list(lr_model.parameters())
[Parameter containing: tensor([[-0.0022, 0.0024, 0.0013, ..., 0.0090, 0.0095, 0.0065]], requires_grad=True), Parameter containing: tensor([0.0043], requires_grad=True)]
model - trenowanie
BATCH_SIZE = 5
criterion = torch.nn.BCELoss()
optimizer = torch.optim.SGD(lr_model.parameters(), lr = 0.1)
Y_train.shape[0]
809
loss_score = 0
acc_score = 0
items_total = 0
lr_model.train()
for i in range(0, Y_train.shape[0], BATCH_SIZE):
X = X_train[i:i+BATCH_SIZE]
X = torch.tensor(X.astype(np.float32).todense())
Y = Y_train[i:i+BATCH_SIZE]
Y = torch.tensor(Y.astype(np.float32)).reshape(-1,1)
Y_predictions = lr_model(X)
acc_score += torch.sum((Y_predictions > 0.5) == Y).item()
items_total += Y.shape[0]
optimizer.zero_grad()
loss = criterion(Y_predictions, Y)
loss.backward()
optimizer.step()
loss_score += loss.item() * Y.shape[0]
Y_predictions
tensor([[0.5029], [0.6063], [0.5796], [0.4821]], grad_fn=<SigmoidBackward0>)
Y
tensor([[0.], [1.], [1.], [0.]])
acc_score
673
items_total
809
print(f'accuracy: {acc_score / items_total}')
accuracy: 0.8318912237330037
print(f'BCE loss: {loss_score / items_total}')
BCE loss: 0.551247839174695
model - ewaluacja
def get_loss_acc(model, X_dataset, Y_dataset):
loss_score = 0
acc_score = 0
items_total = 0
model.eval()
for i in range(0, Y_dataset.shape[0], BATCH_SIZE):
X = X_dataset[i:i+BATCH_SIZE]
X = torch.tensor(X.astype(np.float32).todense())
Y = Y_dataset[i:i+BATCH_SIZE]
Y = torch.tensor(Y.astype(np.float32)).reshape(-1,1)
Y_predictions = model(X)
acc_score += torch.sum((Y_predictions > 0.5) == Y).item()
items_total += Y.shape[0]
loss = criterion(Y_predictions, Y)
loss_score += loss.item() * Y.shape[0]
return (loss_score / items_total), (acc_score / items_total)
get_loss_acc(lr_model, X_train, Y_train)
(0.5396295055765451, 0.7935723114956736)
get_loss_acc(lr_model, X_dev, Y_dev)
(0.5654726171935046, 0.7407407407407407)
get_loss_acc(lr_model, X_test, Y_test)
(0.5901291338386562, 0.6847977684797768)
wagi modelu
list(lr_model.parameters())
[Parameter containing: tensor([[ 0.0749, -0.0687, 0.0117, ..., 0.0045, 0.0223, -0.0058]], requires_grad=True), Parameter containing: tensor([0.1239], requires_grad=True)]
list(lr_model.parameters())[0][0]
tensor([ 0.0749, -0.0687, 0.0117, ..., 0.0045, 0.0223, -0.0058], grad_fn=<SelectBackward0>)
torch.topk(list(lr_model.parameters())[0][0], 20)
torch.return_types.topk( values=tensor([0.6079, 0.4051, 0.3739, 0.3648, 0.3574, 0.3527, 0.3471, 0.3414, 0.3330, 0.3024, 0.2906, 0.2766, 0.2705, 0.2418, 0.2389, 0.2333, 0.2230, 0.2156, 0.2151, 0.2129], grad_fn=<TopkBackward0>), indices=tensor([8942, 6336, 4039, 1857, 9709, 9056, 1852, 5002, 1865, 7820, 803, 3558, 4306, 4259, 8208, 1046, 1855, 4285, 6481, 130]))
for i in torch.topk(list(lr_model.parameters())[0][0], 20)[1]:
print(vectorizer.get_feature_names()[i])
the of god christians we to christ jesus church rutgers and faith hell he sin athos christian heaven our 1993
/home/kuba/anaconda3/envs/zajeciaei/lib/python3.10/site-packages/sklearn/utils/deprecation.py:87: FutureWarning: Function get_feature_names is deprecated; get_feature_names is deprecated in 1.0 and will be removed in 1.2. Please use get_feature_names_out instead. warnings.warn(msg, category=FutureWarning)
torch.topk(list(lr_model.parameters())[0][0], 20, largest = False)
torch.return_types.topk( values=tensor([-0.7723, -0.5291, -0.4631, -0.4499, -0.4225, -0.4144, -0.4041, -0.4019, -0.3622, -0.3604, -0.3442, -0.3228, -0.3218, -0.3179, -0.3162, -0.3127, -0.3034, -0.3027, -0.2983, -0.2750], grad_fn=<TopkBackward0>), indices=tensor([5119, 1627, 8096, 5420, 6194, 5946, 4436, 6901, 1991, 4925, 3116, 4926, 9906, 1036, 8329, 7869, 4959, 8800, 6289, 7921]))
for i in torch.topk(list(lr_model.parameters())[0][0], 20, largest = False)[1]:
print(vectorizer.get_feature_names()[i])
keith caltech sgi livesey nntp morality host posting com islam edu islamic wpd atheism solntze sandvik jaeger system objective schneider
sieć neuronowa
class NeuralNetworkModel(torch.nn.Module):
def __init__(self):
super(NeuralNetworkModel, self).__init__()
self.fc1 = torch.nn.Linear(FEAUTERES,500)
self.fc2 = torch.nn.Linear(500,1)
def forward(self, x):
x = self.fc1(x)
x = torch.relu(x)
x = self.fc2(x)
x = torch.sigmoid(x)
return x
nn_model = NeuralNetworkModel()
BATCH_SIZE = 5
criterion = torch.nn.BCELoss()
optimizer = torch.optim.SGD(nn_model.parameters(), lr = 0.1)
for epoch in range(5):
loss_score = 0
acc_score = 0
items_total = 0
nn_model.train()
for i in range(0, Y_train.shape[0], BATCH_SIZE):
X = X_train[i:i+BATCH_SIZE]
X = torch.tensor(X.astype(np.float32).todense())
Y = Y_train[i:i+BATCH_SIZE]
Y = torch.tensor(Y.astype(np.float32)).reshape(-1,1)
Y_predictions = nn_model(X)
acc_score += torch.sum((Y_predictions > 0.5) == Y).item()
items_total += Y.shape[0]
optimizer.zero_grad()
loss = criterion(Y_predictions, Y)
loss.backward()
optimizer.step()
loss_score += loss.item() * Y.shape[0]
display(epoch)
display(get_loss_acc(nn_model, X_train, Y_train))
display(get_loss_acc(nn_model, X_dev, Y_dev))
0
(0.6796266682657824, 0.5562422744128553)
(0.6829625014905576, 0.5518518518518518)
1
(0.6543819982056565, 0.5562422744128553)
(0.662480209712629, 0.5518518518518518)
2
(0.5808140672328888, 0.7132262051915945)
(0.6008473800288306, 0.6555555555555556)
3
(0.4458613999657637, 0.9048207663782447)
(0.48269164175898943, 0.8481481481481481)
4
(0.3061209664080287, 0.9567367119901112)
(0.3538406518874345, 0.9074074074074074)
get_loss_acc(nn_model, X_test, Y_test)
(0.4221827702666925, 0.8619246861924686)