sport-text-classification-b.../logistic.py

169 lines
5.1 KiB
Python

import io
import numpy as np
import gensim
import torch
import pandas as pd
from scipy import sparse
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import accuracy_score
import fasttext
import fasttext.util
FEATURES = 98132
df = pd.read_csv("train/train.tsv.gz", header=None, sep="\t", error_bad_lines=False, names=["score", "text"])
dev0 = pd.read_csv("dev-0/in.tsv", header=None, sep="\t", error_bad_lines=False)
testA = pd.read_csv("test-A/in.tsv", header=None, sep="\t", error_bad_lines=False)
expected = pd.read_csv("dev-0/expected.tsv", header=None, sep="\t", error_bad_lines=False, names=["score"])
vectorizer = TfidfVectorizer(max_features=FEATURES)
X_train = vectorizer.fit_transform(df.iloc[:, 1].tolist())
Y_dev = expected[["score"]].to_numpy()
print(type(X_train))
# print(X_train)
# X_dev = vectorizer.transform(dev0.iloc[:, 0].tolist())
# X_test = vectorizer.transform(testA.iloc[:, 0].tolist())
Y_train = df[["score"]].to_numpy()
ft = fasttext.load_model('cc.pl.300.bin')
def document_vector(doc):
"""Create document vectors by averaging word vectors. Remove out-of-vocabulary words."""
doc = [ft.get_word_vector(word) for word in doc]
return np.mean(np.mean(doc, axis=0))
X_train = sparse.csr_matrix(np.array([document_vector(text) for text in df.iloc[:, 1].tolist()]))
X_dev = sparse.csr_matrix(np.array([document_vector(text) for text in dev0.iloc[:, 0].tolist()]))
X_test = sparse.csr_matrix(np.array([document_vector(text) for text in testA.iloc[:, 0].tolist()]))
#
# class LogisticRegressionModel(torch.nn.Module):
#
# def __init__(self):
# super(LogisticRegressionModel, self).__init__()
# self.fc = torch.nn.Linear(FEATURES, 1)
#
# def forward(self, x):
# x = self.fc(x)
# x = torch.sigmoid(x)
# return x
#
#
# lr_model = LogisticRegressionModel()
# print(lr_model(torch.Tensor(X_train[0:5].astype(np.float32).todense())))
# print(lr_model)
# print(list(lr_model.parameters())),
#
#
# BATCH_SIZE = 5
# criterion = torch.nn.BCELoss()
# optimizer = torch.optim.SGD(lr_model.parameters(), lr = 0.1)
# print(Y_train.shape[0])
#
# loss_score = 0
# acc_score = 0
# items_total = 0
# lr_model.train()
#
# for i in range(0, Y_train.shape[0], BATCH_SIZE):
# X = X_train[i:i + BATCH_SIZE]
# X = torch.tensor(X.astype(np.float32).todense())
# Y = Y_train[i:i + BATCH_SIZE]
# Y = torch.tensor(Y.astype(np.float32)).reshape(-1, 1)
# Y_predictions = lr_model(X)
# acc_score += torch.sum((Y_predictions > 0.5) == Y).item()
# items_total += Y.shape[0]
#
# optimizer.zero_grad()
# loss = criterion(Y_predictions, Y)
# loss.backward()
# optimizer.step()
#
# loss_score += loss.item() * Y.shape[0]
#
# print(Y_predictions)
# print(Y)
# print(acc_score)
# print(items_total)
# print(f'accuracy: {acc_score / items_total}')
# print(f'BCE loss: {loss_score / items_total}')
def get_loss_acc(model, X_dataset, Y_dataset):
loss_score = 0
acc_score = 0
items_total = 0
model.eval()
for i in range(0, Y_dataset.shape[0], BATCH_SIZE):
X = X_dataset[i:i+BATCH_SIZE]
X = torch.tensor(X.astype(np.float32).todense())
Y = Y_dataset[i:i+BATCH_SIZE]
Y = torch.tensor(Y.astype(np.float32)).reshape(-1,1)
Y_predictions = model(X)
acc_score += torch.sum((Y_predictions > 0.5) == Y).item()
items_total += Y.shape[0]
loss = criterion(Y_predictions, Y)
loss_score += loss.item() * Y.shape[0]
return (loss_score / items_total), (acc_score / items_total)
#
# print(get_loss_acc(lr_model, X_train, Y_train))
# print(get_loss_acc(lr_model, X_dev, Y_dev))
#
# for i in torch.topk(list(lr_model.parameters())[0][0], 20, largest = False)[1]:
# print(vectorizer.get_feature_names()[i])
class NeuralNetworkModel(torch.nn.Module):
def __init__(self):
super(NeuralNetworkModel, self).__init__()
self.fc1 = torch.nn.Linear(FEATURES, 200)
self.fc2 = torch.nn.Linear(200, 100)
self.fc3 = torch.nn.Linear(100,1)
def forward(self, x):
x = self.fc1(x)
x = torch.relu(x)
x = self.fc2(x)
x = torch.sigmoid(x)
x = self.fc3(x)
x = torch.sigmoid(x)
return x
nn_model = NeuralNetworkModel()
BATCH_SIZE = 5
criterion = torch.nn.BCELoss()
optimizer = torch.optim.SGD(nn_model.parameters(), lr = 0.1)
for epoch in range(5):
loss_score = 0
acc_score = 0
items_total = 0
nn_model.train()
for i in range(0, Y_train.shape[0], BATCH_SIZE):
X = X_train[i:i+BATCH_SIZE]
X = torch.tensor(X.astype(np.float32).todense())
Y = Y_train[i:i+BATCH_SIZE]
Y = torch.tensor(Y.astype(np.float32)).reshape(-1,1)
Y_predictions = nn_model(X)
acc_score += torch.sum((Y_predictions > 0.5) == Y).item()
items_total += Y.shape[0]
optimizer.zero_grad()
loss = criterion(Y_predictions, Y)
loss.backward()
optimizer.step()
loss_score += loss.item() * Y.shape[0]
print(epoch)
print(get_loss_acc(nn_model, X_train, Y_train))
print(get_loss_acc(nn_model, X_dev, Y_dev))