7.3 KiB
7.3 KiB
import itertools
import lzma
import regex as re
import torch
from torch import nn
from torch.utils.data import IterableDataset, DataLoader
from torchtext.vocab import build_vocab_from_iterator
from google.colab import drive
def clean_line(line: str):
separated = line.split('\t')
prefix = separated[6].replace(r'\n', ' ')
suffix = separated[7].replace(r'\n', ' ')
return prefix + ' ' + suffix
def get_words_from_line(line):
line = clean_line(line)
for m in re.finditer(r'[\p{L}0-9\*]+|\p{P}+', line):
yield m.group(0).lower()
def get_word_lines_from_file(file_name):
with lzma.open(file_name, mode='rt', encoding='utf-8') as fid:
for line in fid:
yield get_words_from_line(line)
def look_ahead_iterator(gen):
prev = None
for item in gen:
if prev is not None:
yield (prev, item)
prev = item
def prediction(word: str) -> str:
ixs = torch.tensor(vocab.forward([word])).to(device)
out = model(ixs)
top = torch.topk(out[0], 5)
top_indices = top.indices.tolist()
top_probs = top.values.tolist()
top_words = vocab.lookup_tokens(top_indices)
zipped = list(zip(top_words, top_probs))
for index, element in enumerate(zipped):
unk = None
if '<unk>' in element:
unk = zipped.pop(index)
zipped.append(('', unk[1]))
break
if unk is None:
zipped[-1] = ('', zipped[-1][1])
return ' '.join([f'{x[0]}:{x[1]}' for x in zipped])
def create_outputs(folder_name):
print(f'Creating outputs in {folder_name}')
with lzma.open(f'{folder_name}/in.tsv.xz', mode='rt', encoding='utf-8') as fid:
with open(f'{folder_name}/out.tsv', 'w', encoding='utf-8', newline='\n') as f:
for line in fid:
separated = line.split('\t')
prefix = separated[6].replace(r'\n', ' ').split()[-1]
output_line = prediction(prefix)
f.write(output_line + '\n')
class Bigrams(IterableDataset):
def __init__(self, text_file, vocabulary_size):
self.vocab = build_vocab_from_iterator(
get_word_lines_from_file(text_file),
max_tokens=vocabulary_size,
specials=['<unk>'])
self.vocab.set_default_index(self.vocab['<unk>'])
self.vocabulary_size = vocabulary_size
self.text_file = text_file
def __iter__(self):
return look_ahead_iterator(
(self.vocab[t] for t in itertools.chain.from_iterable(get_word_lines_from_file(self.text_file))))
class SimpleBigramNeuralLanguageModel(nn.Module):
def __init__(self, vocabulary_size, embedding_size):
super(SimpleBigramNeuralLanguageModel, self).__init__()
self.model = nn.Sequential(
nn.Embedding(vocabulary_size, embedding_size),
nn.Linear(embedding_size, vocabulary_size),
nn.Softmax()
)
def forward(self, x):
return self.model(x)
vocab_size = 15000
embed_size = 150
batch_size = 3000
device = 'cuda'
path_to_train = 'train/in.tsv.xz'
path_to_model = 'model1.bin'
drive.mount('/content/drive')
%cd /content/drive/MyDrive/
vocab = build_vocab_from_iterator(
get_word_lines_from_file(path_to_train),
max_tokens=vocab_size,
specials=['<unk>']
)
vocab.set_default_index(vocab['<unk>'])
train_dataset = Bigrams(path_to_train, vocab_size)
model = SimpleBigramNeuralLanguageModel(vocab_size, embed_size).to(device)
data = DataLoader(train_dataset, batch_size=batch_size)
optimizer = torch.optim.Adam(model.parameters())
criterion = torch.nn.NLLLoss()
model.train()
step = 0
for x, y in data:
x = x.to(device)
y = y.to(device)
optimizer.zero_grad()
ypredicted = model(x)
loss = criterion(torch.log(ypredicted), y)
if step % 100 == 0:
print(step, loss)
step += 1
loss.backward()
optimizer.step()
torch.save(model.state_dict(), path_to_model)
model = SimpleBigramNeuralLanguageModel(vocab_size, embed_size).to(device)
model.load_state_dict(torch.load(path_to_model))
model.eval()
create_outputs('dev-0')
create_outputs('test-A')