21 KiB
21 KiB
from torchtext.vocab import build_vocab_from_iterator
from torch.utils.data import DataLoader
import torch
from torch import nn
import pandas as pd
import nltk
import regex as re
import csv
import itertools
from nltk import word_tokenize
from os.path import exists
def clean(text):
text = str(text).strip().lower()
text = re.sub("’|>|<|\.|\\\\|\"|”|-|,|\*|:|\/", "", text)
text = text.replace('\\\\n', " ").replace("'t", " not").replace("'s", " is").replace("'ll", " will").replace("'m", " am").replace("'ve", " have")
text = text.replace("'", "")
return text
def get_words_from_line(line, specials = True):
line = line.rstrip()
if specials:
yield '<s>'
for m in re.finditer(r'[\p{L}0-9\*]+|\p{P}+', line):
yield m.group(0).lower()
if specials:
yield '</s>'
def get_word_lines_from_data(d):
for line in d:
yield get_words_from_line(line)
class SimpleBigramNeuralLanguageModel(torch.nn.Module):
def __init__(self, vocabulary_size, embedding_size):
super(SimpleBigramNeuralLanguageModel, self).__init__()
self.model = nn.Sequential(
nn.Embedding(vocabulary_size, embedding_size),
nn.Linear(embedding_size, vocabulary_size),
nn.Softmax()
)
def forward(self, x):
return self.model(x)
def look_ahead_iterator(gen):
w1 = None
for item in gen:
if w1 is not None:
yield (w1, item)
w1 = item
class Bigrams(torch.utils.data.IterableDataset):
def __init__(self, data, vocabulary_size):
self.vocab = build_vocab_from_iterator(
get_word_lines_from_data(data),
max_tokens = vocabulary_size,
specials = ['<unk>'])
self.vocab.set_default_index(self.vocab['<unk>'])
self.vocabulary_size = vocabulary_size
self.data = data
def __iter__(self):
return look_ahead_iterator(
(self.vocab[t] for t in itertools.chain.from_iterable(get_word_lines_from_data(self.data))))
# ładowanie danych treningowych
in_file = 'train/in.tsv.xz'
out_file = 'train/expected.tsv'
X_train = pd.read_csv(in_file, sep='\t', header=None, quoting=csv.QUOTE_NONE, nrows=200000, on_bad_lines="skip", encoding="UTF-8")
Y_train = pd.read_csv(out_file, sep='\t', header=None, quoting=csv.QUOTE_NONE, nrows=200000, on_bad_lines="skip", encoding="UTF-8")
X_train = X_train[[6, 7]]
X_train = pd.concat([X_train, Y_train], axis=1)
X_train = X_train[6] + X_train[0] + X_train[7]
X_train = X_train.apply(clean)
vocab_size = 30000
embed_size = 150
Dataset = Bigrams(X_train, vocab_size)
device = 'cuda' if torch.cuda.is_available() else 'cpu'
model = SimpleBigramNeuralLanguageModel(vocab_size, embed_size).to(device)
if(not exists('nn_model2.bin')):
data = DataLoader(Dataset, batch_size=8000)
optimizer = torch.optim.Adam(model.parameters())
criterion = torch.nn.NLLLoss()
model.train()
step = 0
for i in range(2):
print(f" Epoka {i}--------------------------------------------------------")
for x, y in data:
x = x.to(device)
y = y.to(device)
optimizer.zero_grad()
ypredicted = model(x)
loss = criterion(torch.log(ypredicted), y)
if step % 100 == 0:
print(step, loss)
step += 1
loss.backward()
optimizer.step()
torch.save(model.state_dict(), 'nn_model2.bin')
else:
model.load_state_dict(torch.load('nn_model2.bin'))
vocab = Dataset.vocab
# nltk.download('punkt')
def predict_word(ws):
ixs = torch.tensor(vocab.forward(ws)).to(device)
out = model(ixs)
top = torch.topk(out[0], 8)
top_indices = top.indices.tolist()
top_probs = top.values.tolist()
top_words = vocab.lookup_tokens(top_indices)
pred_str = ""
for word, prob in list(zip(top_words, top_probs)):
pred_str += f"{word}:{prob} "
# pred_str += f':0.01'
return pred_str
def word_gap_prediction(file):
X_test = pd.read_csv(f'{file}/in.tsv.xz', sep='\t', header=None, quoting=csv.QUOTE_NONE, on_bad_lines='skip', encoding="UTF-8")[6]
X_test = X_test.apply(clean)
with open(f'{file}/out.tsv', "w+", encoding="UTF-8") as f:
for row in X_test:
result = {}
before = None
for before in get_words_from_line(clean(str(row)), False):
pass
before = [before]
if(len(before) < 1):
pred_str = "a:0.2 the:0.2 to:0.2 of:0.1 and:0.1 of:0.1 :0.1"
else:
pred_str = predict_word(before)
pred_str = pred_str.strip()
f.write(pred_str + "\n")
Epoka 0--------------------------------------------------------
/home/przemek/anaconda3/envs/env/lib/python3.9/site-packages/torch/nn/modules/container.py:141: UserWarning: Implicit dimension choice for softmax has been deprecated. Change the call to include dim=X as an argument. input = module(input)
0 tensor(10.4640, device='cuda:0', grad_fn=<NllLossBackward0>) 100 tensor(8.8699, device='cuda:0', grad_fn=<NllLossBackward0>) 200 tensor(7.8760, device='cuda:0', grad_fn=<NllLossBackward0>) 300 tensor(7.3941, device='cuda:0', grad_fn=<NllLossBackward0>) 400 tensor(6.9599, device='cuda:0', grad_fn=<NllLossBackward0>) 500 tensor(6.7027, device='cuda:0', grad_fn=<NllLossBackward0>) 600 tensor(6.5332, device='cuda:0', grad_fn=<NllLossBackward0>) 700 tensor(6.4762, device='cuda:0', grad_fn=<NllLossBackward0>) 800 tensor(6.2756, device='cuda:0', grad_fn=<NllLossBackward0>) 900 tensor(6.2160, device='cuda:0', grad_fn=<NllLossBackward0>) 1000 tensor(6.2766, device='cuda:0', grad_fn=<NllLossBackward0>) 1100 tensor(6.2922, device='cuda:0', grad_fn=<NllLossBackward0>) 1200 tensor(6.0532, device='cuda:0', grad_fn=<NllLossBackward0>) 1300 tensor(6.0914, device='cuda:0', grad_fn=<NllLossBackward0>) 1400 tensor(5.9667, device='cuda:0', grad_fn=<NllLossBackward0>) 1500 tensor(6.1284, device='cuda:0', grad_fn=<NllLossBackward0>) 1600 tensor(6.1015, device='cuda:0', grad_fn=<NllLossBackward0>) 1700 tensor(6.1512, device='cuda:0', grad_fn=<NllLossBackward0>) 1800 tensor(6.1428, device='cuda:0', grad_fn=<NllLossBackward0>) 1900 tensor(6.1808, device='cuda:0', grad_fn=<NllLossBackward0>) 2000 tensor(6.3026, device='cuda:0', grad_fn=<NllLossBackward0>) 2100 tensor(5.9979, device='cuda:0', grad_fn=<NllLossBackward0>) 2200 tensor(6.1723, device='cuda:0', grad_fn=<NllLossBackward0>) 2300 tensor(6.0850, device='cuda:0', grad_fn=<NllLossBackward0>) 2400 tensor(5.9631, device='cuda:0', grad_fn=<NllLossBackward0>) 2500 tensor(6.0300, device='cuda:0', grad_fn=<NllLossBackward0>) 2600 tensor(5.9996, device='cuda:0', grad_fn=<NllLossBackward0>) 2700 tensor(5.9015, device='cuda:0', grad_fn=<NllLossBackward0>) 2800 tensor(5.9195, device='cuda:0', grad_fn=<NllLossBackward0>) 2900 tensor(5.8945, device='cuda:0', grad_fn=<NllLossBackward0>) 3000 tensor(6.1416, device='cuda:0', grad_fn=<NllLossBackward0>) 3100 tensor(6.1716, device='cuda:0', grad_fn=<NllLossBackward0>) 3200 tensor(6.1329, device='cuda:0', grad_fn=<NllLossBackward0>) 3300 tensor(6.0073, device='cuda:0', grad_fn=<NllLossBackward0>) 3400 tensor(6.0445, device='cuda:0', grad_fn=<NllLossBackward0>) 3500 tensor(6.0357, device='cuda:0', grad_fn=<NllLossBackward0>) 3600 tensor(5.9790, device='cuda:0', grad_fn=<NllLossBackward0>) 3700 tensor(5.8562, device='cuda:0', grad_fn=<NllLossBackward0>) 3800 tensor(5.8810, device='cuda:0', grad_fn=<NllLossBackward0>) 3900 tensor(5.9466, device='cuda:0', grad_fn=<NllLossBackward0>) 4000 tensor(6.0413, device='cuda:0', grad_fn=<NllLossBackward0>) 4100 tensor(5.8879, device='cuda:0', grad_fn=<NllLossBackward0>) 4200 tensor(5.9470, device='cuda:0', grad_fn=<NllLossBackward0>) 4300 tensor(5.9991, device='cuda:0', grad_fn=<NllLossBackward0>) 4400 tensor(6.1229, device='cuda:0', grad_fn=<NllLossBackward0>) 4500 tensor(5.8253, device='cuda:0', grad_fn=<NllLossBackward0>) 4600 tensor(5.8551, device='cuda:0', grad_fn=<NllLossBackward0>) 4700 tensor(5.8695, device='cuda:0', grad_fn=<NllLossBackward0>) 4800 tensor(5.8018, device='cuda:0', grad_fn=<NllLossBackward0>) 4900 tensor(5.9809, device='cuda:0', grad_fn=<NllLossBackward0>) 5000 tensor(5.8554, device='cuda:0', grad_fn=<NllLossBackward0>) 5100 tensor(5.9074, device='cuda:0', grad_fn=<NllLossBackward0>) 5200 tensor(5.8030, device='cuda:0', grad_fn=<NllLossBackward0>) 5300 tensor(5.8432, device='cuda:0', grad_fn=<NllLossBackward0>) 5400 tensor(5.8057, device='cuda:0', grad_fn=<NllLossBackward0>) 5500 tensor(5.9464, device='cuda:0', grad_fn=<NllLossBackward0>) 5600 tensor(6.0155, device='cuda:0', grad_fn=<NllLossBackward0>) 5700 tensor(5.8322, device='cuda:0', grad_fn=<NllLossBackward0>) 5800 tensor(5.8041, device='cuda:0', grad_fn=<NllLossBackward0>) 5900 tensor(5.9783, device='cuda:0', grad_fn=<NllLossBackward0>) 6000 tensor(6.0641, device='cuda:0', grad_fn=<NllLossBackward0>) 6100 tensor(5.8326, device='cuda:0', grad_fn=<NllLossBackward0>) 6200 tensor(5.9006, device='cuda:0', grad_fn=<NllLossBackward0>) 6300 tensor(5.8767, device='cuda:0', grad_fn=<NllLossBackward0>) 6400 tensor(5.8549, device='cuda:0', grad_fn=<NllLossBackward0>) 6500 tensor(5.9623, device='cuda:0', grad_fn=<NllLossBackward0>) 6600 tensor(5.7852, device='cuda:0', grad_fn=<NllLossBackward0>) 6700 tensor(5.9007, device='cuda:0', grad_fn=<NllLossBackward0>) 6800 tensor(6.0006, device='cuda:0', grad_fn=<NllLossBackward0>) 6900 tensor(5.8717, device='cuda:0', grad_fn=<NllLossBackward0>) 7000 tensor(5.8211, device='cuda:0', grad_fn=<NllLossBackward0>) 7100 tensor(6.0302, device='cuda:0', grad_fn=<NllLossBackward0>) 7200 tensor(5.8377, device='cuda:0', grad_fn=<NllLossBackward0>) 7300 tensor(6.0008, device='cuda:0', grad_fn=<NllLossBackward0>) 7400 tensor(5.9733, device='cuda:0', grad_fn=<NllLossBackward0>) 7500 tensor(6.0819, device='cuda:0', grad_fn=<NllLossBackward0>) 7600 tensor(5.8545, device='cuda:0', grad_fn=<NllLossBackward0>) 7700 tensor(5.8242, device='cuda:0', grad_fn=<NllLossBackward0>) 7800 tensor(5.8449, device='cuda:0', grad_fn=<NllLossBackward0>) 7900 tensor(5.9512, device='cuda:0', grad_fn=<NllLossBackward0>) 8000 tensor(5.6949, device='cuda:0', grad_fn=<NllLossBackward0>) 8100 tensor(5.8212, device='cuda:0', grad_fn=<NllLossBackward0>) 8200 tensor(6.2209, device='cuda:0', grad_fn=<NllLossBackward0>) Epoka 1-------------------------------------------------------- 8300 tensor(5.9703, device='cuda:0', grad_fn=<NllLossBackward0>) 8400 tensor(5.8215, device='cuda:0', grad_fn=<NllLossBackward0>) 8500 tensor(5.8680, device='cuda:0', grad_fn=<NllLossBackward0>) 8600 tensor(5.6376, device='cuda:0', grad_fn=<NllLossBackward0>) 8700 tensor(5.8291, device='cuda:0', grad_fn=<NllLossBackward0>) 8800 tensor(5.8815, device='cuda:0', grad_fn=<NllLossBackward0>) 8900 tensor(5.7486, device='cuda:0', grad_fn=<NllLossBackward0>) 9000 tensor(5.8889, device='cuda:0', grad_fn=<NllLossBackward0>) 9100 tensor(5.8058, device='cuda:0', grad_fn=<NllLossBackward0>) 9200 tensor(5.8526, device='cuda:0', grad_fn=<NllLossBackward0>) 9300 tensor(5.8363, device='cuda:0', grad_fn=<NllLossBackward0>) 9400 tensor(5.7206, device='cuda:0', grad_fn=<NllLossBackward0>) 9500 tensor(5.7525, device='cuda:0', grad_fn=<NllLossBackward0>) 9600 tensor(5.8370, device='cuda:0', grad_fn=<NllLossBackward0>) 9700 tensor(5.8589, device='cuda:0', grad_fn=<NllLossBackward0>) 9800 tensor(5.7505, device='cuda:0', grad_fn=<NllLossBackward0>) 9900 tensor(5.7570, device='cuda:0', grad_fn=<NllLossBackward0>) 10000 tensor(5.9025, device='cuda:0', grad_fn=<NllLossBackward0>) 10100 tensor(5.7193, device='cuda:0', grad_fn=<NllLossBackward0>) 10200 tensor(5.8267, device='cuda:0', grad_fn=<NllLossBackward0>) 10300 tensor(5.9407, device='cuda:0', grad_fn=<NllLossBackward0>) 10400 tensor(5.8414, device='cuda:0', grad_fn=<NllLossBackward0>) 10500 tensor(5.9946, device='cuda:0', grad_fn=<NllLossBackward0>) 10600 tensor(5.8745, device='cuda:0', grad_fn=<NllLossBackward0>) 10700 tensor(5.7626, device='cuda:0', grad_fn=<NllLossBackward0>) 10800 tensor(5.7495, device='cuda:0', grad_fn=<NllLossBackward0>) 10900 tensor(5.8720, device='cuda:0', grad_fn=<NllLossBackward0>) 11000 tensor(5.8455, device='cuda:0', grad_fn=<NllLossBackward0>) 11100 tensor(5.7123, device='cuda:0', grad_fn=<NllLossBackward0>) 11200 tensor(5.7896, device='cuda:0', grad_fn=<NllLossBackward0>) 11300 tensor(5.8969, device='cuda:0', grad_fn=<NllLossBackward0>) 11400 tensor(5.7743, device='cuda:0', grad_fn=<NllLossBackward0>) 11500 tensor(5.6588, device='cuda:0', grad_fn=<NllLossBackward0>) 11600 tensor(5.8743, device='cuda:0', grad_fn=<NllLossBackward0>) 11700 tensor(5.8964, device='cuda:0', grad_fn=<NllLossBackward0>) 11800 tensor(5.7968, device='cuda:0', grad_fn=<NllLossBackward0>) 11900 tensor(5.8222, device='cuda:0', grad_fn=<NllLossBackward0>) 12000 tensor(5.7421, device='cuda:0', grad_fn=<NllLossBackward0>) 12100 tensor(5.8565, device='cuda:0', grad_fn=<NllLossBackward0>) 12200 tensor(5.7788, device='cuda:0', grad_fn=<NllLossBackward0>) 12300 tensor(5.7469, device='cuda:0', grad_fn=<NllLossBackward0>) 12400 tensor(5.8372, device='cuda:0', grad_fn=<NllLossBackward0>) 12500 tensor(5.7905, device='cuda:0', grad_fn=<NllLossBackward0>) 12600 tensor(5.8497, device='cuda:0', grad_fn=<NllLossBackward0>) 12700 tensor(5.7814, device='cuda:0', grad_fn=<NllLossBackward0>) 12800 tensor(5.7847, device='cuda:0', grad_fn=<NllLossBackward0>) 12900 tensor(5.6603, device='cuda:0', grad_fn=<NllLossBackward0>) 13000 tensor(5.7659, device='cuda:0', grad_fn=<NllLossBackward0>) 13100 tensor(5.8337, device='cuda:0', grad_fn=<NllLossBackward0>) 13200 tensor(5.7703, device='cuda:0', grad_fn=<NllLossBackward0>) 13300 tensor(5.8301, device='cuda:0', grad_fn=<NllLossBackward0>) 13400 tensor(5.6971, device='cuda:0', grad_fn=<NllLossBackward0>) 13500 tensor(5.8216, device='cuda:0', grad_fn=<NllLossBackward0>) 13600 tensor(5.7899, device='cuda:0', grad_fn=<NllLossBackward0>) 13700 tensor(5.7258, device='cuda:0', grad_fn=<NllLossBackward0>) 13800 tensor(5.9402, device='cuda:0', grad_fn=<NllLossBackward0>) 13900 tensor(5.8674, device='cuda:0', grad_fn=<NllLossBackward0>) 14000 tensor(5.7627, device='cuda:0', grad_fn=<NllLossBackward0>) 14100 tensor(5.8849, device='cuda:0', grad_fn=<NllLossBackward0>) 14200 tensor(5.7721, device='cuda:0', grad_fn=<NllLossBackward0>) 14300 tensor(5.7737, device='cuda:0', grad_fn=<NllLossBackward0>) 14400 tensor(5.7790, device='cuda:0', grad_fn=<NllLossBackward0>) 14500 tensor(5.8570, device='cuda:0', grad_fn=<NllLossBackward0>) 14600 tensor(5.8281, device='cuda:0', grad_fn=<NllLossBackward0>) 14700 tensor(5.7613, device='cuda:0', grad_fn=<NllLossBackward0>) 14800 tensor(5.8226, device='cuda:0', grad_fn=<NllLossBackward0>) 14900 tensor(5.7584, device='cuda:0', grad_fn=<NllLossBackward0>) 15000 tensor(5.7686, device='cuda:0', grad_fn=<NllLossBackward0>) 15100 tensor(5.8094, device='cuda:0', grad_fn=<NllLossBackward0>) 15200 tensor(5.7397, device='cuda:0', grad_fn=<NllLossBackward0>) 15300 tensor(5.7407, device='cuda:0', grad_fn=<NllLossBackward0>) 15400 tensor(5.5733, device='cuda:0', grad_fn=<NllLossBackward0>) 15500 tensor(5.5254, device='cuda:0', grad_fn=<NllLossBackward0>) 15600 tensor(5.7856, device='cuda:0', grad_fn=<NllLossBackward0>) 15700 tensor(5.6769, device='cuda:0', grad_fn=<NllLossBackward0>) 15800 tensor(5.5810, device='cuda:0', grad_fn=<NllLossBackward0>) 15900 tensor(5.8195, device='cuda:0', grad_fn=<NllLossBackward0>) 16000 tensor(5.8086, device='cuda:0', grad_fn=<NllLossBackward0>) 16100 tensor(5.8340, device='cuda:0', grad_fn=<NllLossBackward0>) 16200 tensor(5.8087, device='cuda:0', grad_fn=<NllLossBackward0>) 16300 tensor(5.8688, device='cuda:0', grad_fn=<NllLossBackward0>) 16400 tensor(5.6974, device='cuda:0', grad_fn=<NllLossBackward0>) 16500 tensor(5.8742, device='cuda:0', grad_fn=<NllLossBackward0>)
word_gap_prediction("dev-0/")
/home/przemek/anaconda3/envs/env/lib/python3.9/site-packages/torch/nn/modules/container.py:141: UserWarning: Implicit dimension choice for softmax has been deprecated. Change the call to include dim=X as an argument. input = module(input)
word_gap_prediction("test-A/")
/home/przemek/anaconda3/envs/env/lib/python3.9/site-packages/torch/nn/modules/container.py:141: UserWarning: Implicit dimension choice for softmax has been deprecated. Change the call to include dim=X as an argument. input = module(input)