7.4 KiB
7.4 KiB
import pandas as pd
import numpy as np
from gensim.test.utils import common_texts
from gensim.models import FastText
import os.path
import gzip
import shutil
import torch
import torch.optim as optim
features = 100
batch_size = 16
criterion = torch.nn.BCELoss()
with gzip.open('train/train.tsv.gz', 'rb') as f_in:
with open('train/train.tsv', 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
data = pd.read_csv('train/train.tsv', sep='\t', names=["Ball","Text"])
data["Text"] = data["Text"].str.lower().str.split()
data["Text"]
0 [mindaugas, budzinauskas, wierzy, w, odbudowę,... 1 [przyjmujący, reprezentacji, polski, wrócił, d... 2 [fen, 9:, zapowiedź, walki, róża, gumienna, vs... 3 [aleksander, filipiak:, czuję, się, dobrze, w,... 4 [victoria, carl, i, aleksiej, czerwotkin, mist... ... 98127 [kamil, syprzak, zaczyna, kolekcjonować, trofe... 98128 [holandia:, dwa, gole, piotra, parzyszka, piot... 98129 [sparingowo:, korona, gorsza, od, stali., lett... 98130 [vive, -, wisła., ośmiu, debiutantów, w, tegor... 98131 [wta, miami:, timea, bacsinszky, pokonana,, sw... Name: Text, Length: 98132, dtype: object
ft_model = None
if not os.path.isfile('fasttext.model'):
ft_model = FastText(size=features, window=3, min_count=1)
ft_model.build_vocab(sentences=data["Text"])
ft_model.train(data["Text"], total_examples=len(data["Text"]), epochs=10)
ft_model.save("fasttext.model")
else:
ft_model = FastText.load("fasttext.model")
def document_vector(doc):
result = ft_model.wv[doc]
return np.max(result, axis=0)
X = [document_vector(x) for x in data["Text"]]
Y = data["Ball"]
class NeuralNetworkModel(torch.nn.Module):
def __init__(self):
super(NeuralNetworkModel, self).__init__()
self.fc1 = torch.nn.Linear(features,200)
self.fc2 = torch.nn.Linear(200,150)
self.fc3 = torch.nn.Linear(150,1)
def forward(self, x):
x = self.fc1(x)
x = torch.relu(x)
x = self.fc2(x)
x = torch.sigmoid(x)
x = self.fc3(x)
x = torch.sigmoid(x)
return x
def get_loss_acc(model, X_dataset, Y_dataset):
loss_score = 0
acc_score = 0
items_total = 0
model.eval()
for i in range(0, Y_dataset.shape[0], batch_size):
x = X_dataset[i:i+batch_size]
x = torch.tensor(x)
y = Y_dataset[i:i+batch_size]
y = torch.tensor(y.astype(np.float32).to_numpy()).reshape(-1,1)
y_predictions = model(x)
acc_score += torch.sum((y_predictions >= 0.5) == y).item()
items_total += y.shape[0]
loss = criterion(y_predictions, y)
loss_score += loss.item() * y.shape[0]
return (loss_score / items_total), (acc_score / items_total)
model_path = 'nn.model'
nn_model = NeuralNetworkModel()
if not os.path.isfile(model_path):
optimizer = optim.SGD(nn_model.parameters(), lr=0.1)
display(get_loss_acc(nn_model, X, Y))
for epoch in range(5):
nn_model.train()
for i in range(0, len(X), batch_size):
x = X[i:i+batch_size]
x = torch.tensor(x)
y = Y[i:i+batch_size]
y = torch.tensor(y.astype(np.float32).to_numpy()).reshape(-1,1)
y_predictions = nn_model(x)
loss = criterion(y_predictions, y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
display(get_loss_acc(nn_model, X, Y))
torch.save(nn_model.state_dict(), model_path)
else:
nn_model.load_state_dict(torch.load(model_path))
x_dev = pd.read_csv('dev-0/in.tsv', sep='\t', names=["Text"])["Text"]
y_dev = pd.read_csv('dev-0/expected.tsv', sep='\t', names=["Ball"])["Ball"]
x_dev = [document_vector(x) for x in x_dev.str.lower().str.split()]
get_loss_acc(nn_model, x_dev, y_dev)
(0.45761072419184756, 0.7694424064563463)
y_dev_prediction = nn_model(torch.tensor(x_dev))
y_dev_prediction = np.array([round(y) for y in y_dev_prediction.flatten().tolist()])
np.savetxt("dev-0/out.tsv", y_dev_prediction, fmt='%d')
x_test = pd.read_csv('test-A/in.tsv', sep='\t', names=["Text"])["Text"]
x_test = [document_vector(x) for x in x_test.str.lower().str.split()]
y_test_prediction = nn_model(torch.tensor(x_test))
y_test_prediction = np.array([round(y) for y in y_test_prediction.flatten().tolist()])
np.savetxt("test-A/out.tsv", y_test_prediction, fmt='%d')