30 KiB
30 KiB
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
torch.manual_seed(1)
<torch._C.Generator at 0x7f3f2c178990>
Data loading
import pickle
import lzma
import regex as re
def load_pickle(filename):
with open(filename, "rb") as f:
return pickle.load(f)
def save_pickle(d):
with open("vocabulary.pkl", "wb") as f:
pickle.dump(d, f, protocol=pickle.HIGHEST_PROTOCOL)
def clean_document(document: str) -> str:
document = document.lower().replace("’", "'")
document = re.sub(r"'s|[\-]\\\\n", "", document)
document = re.sub(
r"(\\\\+n|[{}\[\]”&:•¦()*0-9;\"«»$\-><^,®¬¿?¡!#+. \t\n])+", " ", document
)
for to_find, substitute in zip(
["i'm", "won't", "n't", "'ll"], ["i am", "will not", " not", " will"]
):
document = document.replace(to_find, substitute)
return document
def get_words_from_line(line, clean_text=True):
if clean_text:
line = clean_document(line) # .rstrip()
else:
line = line.strip()
yield "<s>"
for m in re.finditer(r"[\p{L}0-9\*]+|\p{P}+", line):
yield m.group(0).lower()
yield "</s>"
def get_word_lines_from_file(file_name, clean_text=True, only_text=False):
with lzma.open(file_name, "r") as fh:
for i, line in enumerate(fh):
if only_text:
line = "\t".join(line.decode("utf-8").split("\t")[:-2])
else:
line = line.decode("utf-8")
if i % 10000 == 0:
print(i)
yield get_words_from_line(line, clean_text)
Dataclasses
from torch.utils.data import IterableDataset
from torchtext.vocab import build_vocab_from_iterator
import itertools
VOCAB_SIZE = 20000
def look_ahead_iterator(gen):
prev = None
for item in gen:
if prev is not None:
yield (prev, item)
prev = item
class Bigrams(IterableDataset):
def __init__(
self, text_file, vocabulary_size, vocab=None, only_text=False, clean_text=True
):
self.vocab = (
build_vocab_from_iterator(
get_word_lines_from_file(text_file, clean_text, only_text),
max_tokens=vocabulary_size,
specials=["<unk>"],
)
if vocab is None
else vocab
)
self.vocab.set_default_index(self.vocab["<unk>"])
self.vocabulary_size = vocabulary_size
self.text_file = text_file
self.clean_text = clean_text
self.only_text = only_text
def __iter__(self):
return look_ahead_iterator(
(
self.vocab[t]
for t in itertools.chain.from_iterable(
get_word_lines_from_file(
self.text_file, self.clean_text, self.only_text
)
)
)
)
vocab = None # torch.load('./vocab.pth')
train_dataset = Bigrams("/content/train/in.tsv.xz", VOCAB_SIZE, vocab, clean_text=False)
0 10000 20000 30000 40000 50000 60000 70000 80000 90000 100000 110000 120000 130000 140000 150000 160000 170000 180000 190000 200000 210000 220000 230000 240000 250000 260000 270000 280000 290000 300000 310000 320000 330000 340000 350000 360000 370000 380000 390000 400000 410000 420000 430000
# torch.save(train_dataset.vocab, "vocab.pth")
# torch.save(train_dataset.vocab, "vocab_only_text.pth")
# torch.save(train_dataset.vocab, "vocab_only_text_clean.pth")
torch.save(train_dataset.vocab, "vocab_2.pth")
Model definition
class SimpleBigramNeuralLanguageModel(nn.Module):
def __init__(self, vocabulary_size, embedding_size):
super(SimpleBigramNeuralLanguageModel, self).__init__()
self.model = nn.Sequential(
nn.Embedding(vocabulary_size, embedding_size),
nn.Linear(embedding_size, vocabulary_size),
nn.Softmax(),
)
def forward(self, x):
return self.model(x)
EMBED_SIZE = 100
model = SimpleBigramNeuralLanguageModel(VOCAB_SIZE, EMBED_SIZE)
from torch.utils.data import DataLoader
device = "cuda"
model = SimpleBigramNeuralLanguageModel(VOCAB_SIZE, EMBED_SIZE).to(device)
data = DataLoader(train_dataset, batch_size=5000)
optimizer = torch.optim.Adam(model.parameters())
criterion = torch.nn.NLLLoss()
model.train()
step = 0
for x, y in data:
x = x.to(device)
y = y.to(device)
optimizer.zero_grad()
ypredicted = model(x)
loss = criterion(torch.log(ypredicted), y)
if step % 100 == 0:
print(step, loss)
step += 1
loss.backward()
optimizer.step()
torch.save(model.state_dict(), "model_2.bin")
0
/usr/local/lib/python3.10/dist-packages/torch/nn/modules/container.py:217: UserWarning: Implicit dimension choice for softmax has been deprecated. Change the call to include dim=X as an argument. input = module(input)
0 tensor(10.0674, device='cuda:0', grad_fn=<NllLossBackward0>) 100 tensor(8.4352, device='cuda:0', grad_fn=<NllLossBackward0>) 200 tensor(7.6662, device='cuda:0', grad_fn=<NllLossBackward0>) 300 tensor(7.0716, device='cuda:0', grad_fn=<NllLossBackward0>) 400 tensor(6.6710, device='cuda:0', grad_fn=<NllLossBackward0>) 500 tensor(6.4540, device='cuda:0', grad_fn=<NllLossBackward0>) 600 tensor(5.9974, device='cuda:0', grad_fn=<NllLossBackward0>) 700 tensor(5.7973, device='cuda:0', grad_fn=<NllLossBackward0>) 800 tensor(5.8026, device='cuda:0', grad_fn=<NllLossBackward0>) 10000 900 tensor(5.7118, device='cuda:0', grad_fn=<NllLossBackward0>) 1000 tensor(5.7471, device='cuda:0', grad_fn=<NllLossBackward0>) 1100 tensor(5.6865, device='cuda:0', grad_fn=<NllLossBackward0>) 1200 tensor(5.4205, device='cuda:0', grad_fn=<NllLossBackward0>) 1300 tensor(5.4954, device='cuda:0', grad_fn=<NllLossBackward0>) 1400 tensor(5.5415, device='cuda:0', grad_fn=<NllLossBackward0>) 1500 tensor(5.3322, device='cuda:0', grad_fn=<NllLossBackward0>) 1600 tensor(5.4665, device='cuda:0', grad_fn=<NllLossBackward0>) 1700 tensor(5.4710, device='cuda:0', grad_fn=<NllLossBackward0>) 20000 1800 tensor(5.3953, device='cuda:0', grad_fn=<NllLossBackward0>) 1900 tensor(5.4881, device='cuda:0', grad_fn=<NllLossBackward0>) 2000 tensor(5.4915, device='cuda:0', grad_fn=<NllLossBackward0>) 2100 tensor(5.3621, device='cuda:0', grad_fn=<NllLossBackward0>) 2200 tensor(5.2872, device='cuda:0', grad_fn=<NllLossBackward0>) 2300 tensor(5.2590, device='cuda:0', grad_fn=<NllLossBackward0>) 2400 tensor(5.3661, device='cuda:0', grad_fn=<NllLossBackward0>) 2500 tensor(5.3305, device='cuda:0', grad_fn=<NllLossBackward0>) 30000 2600 tensor(5.3789, device='cuda:0', grad_fn=<NllLossBackward0>) 2700 tensor(5.3548, device='cuda:0', grad_fn=<NllLossBackward0>) 2800 tensor(5.4579, device='cuda:0', grad_fn=<NllLossBackward0>) 2900 tensor(5.2660, device='cuda:0', grad_fn=<NllLossBackward0>) 3000 tensor(5.3253, device='cuda:0', grad_fn=<NllLossBackward0>) 3100 tensor(5.4020, device='cuda:0', grad_fn=<NllLossBackward0>) 3200 tensor(5.2962, device='cuda:0', grad_fn=<NllLossBackward0>) 3300 tensor(5.2570, device='cuda:0', grad_fn=<NllLossBackward0>) 3400 tensor(5.2317, device='cuda:0', grad_fn=<NllLossBackward0>) 40000 3500 tensor(5.2410, device='cuda:0', grad_fn=<NllLossBackward0>) 3600 tensor(5.2404, device='cuda:0', grad_fn=<NllLossBackward0>) 3700 tensor(5.1738, device='cuda:0', grad_fn=<NllLossBackward0>) 3800 tensor(5.2654, device='cuda:0', grad_fn=<NllLossBackward0>) 3900 tensor(5.2595, device='cuda:0', grad_fn=<NllLossBackward0>) 4000 tensor(5.2850, device='cuda:0', grad_fn=<NllLossBackward0>) 4100 tensor(5.2995, device='cuda:0', grad_fn=<NllLossBackward0>) 4200 tensor(5.2581, device='cuda:0', grad_fn=<NllLossBackward0>) 4300 tensor(5.3323, device='cuda:0', grad_fn=<NllLossBackward0>) 50000 4400 tensor(5.2498, device='cuda:0', grad_fn=<NllLossBackward0>) 4500 tensor(5.2674, device='cuda:0', grad_fn=<NllLossBackward0>) 4600 tensor(5.3033, device='cuda:0', grad_fn=<NllLossBackward0>) 4700 tensor(5.2066, device='cuda:0', grad_fn=<NllLossBackward0>) 4800 tensor(5.2302, device='cuda:0', grad_fn=<NllLossBackward0>) 4900 tensor(5.2617, device='cuda:0', grad_fn=<NllLossBackward0>) 5000 tensor(5.2306, device='cuda:0', grad_fn=<NllLossBackward0>) 5100 tensor(5.2781, device='cuda:0', grad_fn=<NllLossBackward0>) 60000 5200 tensor(5.1833, device='cuda:0', grad_fn=<NllLossBackward0>) 5300 tensor(5.2166, device='cuda:0', grad_fn=<NllLossBackward0>) 5400 tensor(5.0845, device='cuda:0', grad_fn=<NllLossBackward0>) 5500 tensor(5.2272, device='cuda:0', grad_fn=<NllLossBackward0>) 5600 tensor(5.3175, device='cuda:0', grad_fn=<NllLossBackward0>) 5700 tensor(5.2425, device='cuda:0', grad_fn=<NllLossBackward0>) 5800 tensor(5.2449, device='cuda:0', grad_fn=<NllLossBackward0>) 5900 tensor(5.3225, device='cuda:0', grad_fn=<NllLossBackward0>) 6000 tensor(5.2786, device='cuda:0', grad_fn=<NllLossBackward0>) 70000 6100 tensor(5.1489, device='cuda:0', grad_fn=<NllLossBackward0>) 6200 tensor(5.1793, device='cuda:0', grad_fn=<NllLossBackward0>) 6300 tensor(5.2194, device='cuda:0', grad_fn=<NllLossBackward0>) 6400 tensor(5.1708, device='cuda:0', grad_fn=<NllLossBackward0>) 6500 tensor(5.1394, device='cuda:0', grad_fn=<NllLossBackward0>) 6600 tensor(5.1280, device='cuda:0', grad_fn=<NllLossBackward0>) 6700 tensor(5.0869, device='cuda:0', grad_fn=<NllLossBackward0>) 6800 tensor(5.3255, device='cuda:0', grad_fn=<NllLossBackward0>) 6900 tensor(5.3426, device='cuda:0', grad_fn=<NllLossBackward0>) 80000 7000 tensor(5.1176, device='cuda:0', grad_fn=<NllLossBackward0>) 7100 tensor(5.1991, device='cuda:0', grad_fn=<NllLossBackward0>) 7200 tensor(5.1227, device='cuda:0', grad_fn=<NllLossBackward0>) 7300 tensor(5.1744, device='cuda:0', grad_fn=<NllLossBackward0>) 7400 tensor(5.2222, device='cuda:0', grad_fn=<NllLossBackward0>) 7500 tensor(5.2110, device='cuda:0', grad_fn=<NllLossBackward0>) 7600 tensor(5.1553, device='cuda:0', grad_fn=<NllLossBackward0>) 7700 tensor(5.3283, device='cuda:0', grad_fn=<NllLossBackward0>) 90000 7800 tensor(5.2544, device='cuda:0', grad_fn=<NllLossBackward0>) 7900 tensor(5.1871, device='cuda:0', grad_fn=<NllLossBackward0>) 8000 tensor(5.2215, device='cuda:0', grad_fn=<NllLossBackward0>) 8100 tensor(5.1744, device='cuda:0', grad_fn=<NllLossBackward0>) 8200 tensor(5.1087, device='cuda:0', grad_fn=<NllLossBackward0>) 8300 tensor(5.1639, device='cuda:0', grad_fn=<NllLossBackward0>) 8400 tensor(5.1604, device='cuda:0', grad_fn=<NllLossBackward0>) 8500 tensor(5.1612, device='cuda:0', grad_fn=<NllLossBackward0>) 8600 tensor(5.2307, device='cuda:0', grad_fn=<NllLossBackward0>) 100000 8700 tensor(5.1648, device='cuda:0', grad_fn=<NllLossBackward0>) 8800 tensor(5.1066, device='cuda:0', grad_fn=<NllLossBackward0>) 8900 tensor(5.2405, device='cuda:0', grad_fn=<NllLossBackward0>) 9000 tensor(5.2184, device='cuda:0', grad_fn=<NllLossBackward0>) 9100 tensor(5.2677, device='cuda:0', grad_fn=<NllLossBackward0>) 9200 tensor(5.0773, device='cuda:0', grad_fn=<NllLossBackward0>)
[0;31m---------------------------------------------------------------------------[0m [0;31mKeyboardInterrupt[0m Traceback (most recent call last) [0;32m<ipython-input-9-c690ed9ba7ad>[0m in [0;36m<cell line: 11>[0;34m()[0m [1;32m 18[0m [0mprint[0m[0;34m([0m[0mstep[0m[0;34m,[0m [0mloss[0m[0;34m)[0m[0;34m[0m[0;34m[0m[0m [1;32m 19[0m [0mstep[0m [0;34m+=[0m [0;36m1[0m[0;34m[0m[0;34m[0m[0m [0;32m---> 20[0;31m [0mloss[0m[0;34m.[0m[0mbackward[0m[0;34m([0m[0;34m)[0m[0;34m[0m[0;34m[0m[0m [0m[1;32m 21[0m [0moptimizer[0m[0;34m.[0m[0mstep[0m[0;34m([0m[0;34m)[0m[0;34m[0m[0;34m[0m[0m [1;32m 22[0m [0;34m[0m[0m [0;32m/usr/local/lib/python3.10/dist-packages/torch/_tensor.py[0m in [0;36mbackward[0;34m(self, gradient, retain_graph, create_graph, inputs)[0m [1;32m 485[0m [0minputs[0m[0;34m=[0m[0minputs[0m[0;34m,[0m[0;34m[0m[0;34m[0m[0m [1;32m 486[0m ) [0;32m--> 487[0;31m torch.autograd.backward( [0m[1;32m 488[0m [0mself[0m[0;34m,[0m [0mgradient[0m[0;34m,[0m [0mretain_graph[0m[0;34m,[0m [0mcreate_graph[0m[0;34m,[0m [0minputs[0m[0;34m=[0m[0minputs[0m[0;34m[0m[0;34m[0m[0m [1;32m 489[0m ) [0;32m/usr/local/lib/python3.10/dist-packages/torch/autograd/__init__.py[0m in [0;36mbackward[0;34m(tensors, grad_tensors, retain_graph, create_graph, grad_variables, inputs)[0m [1;32m 198[0m [0;31m# some Python versions print out the first line of a multi-line function[0m[0;34m[0m[0;34m[0m[0m [1;32m 199[0m [0;31m# calls in the traceback and some print out the last line[0m[0;34m[0m[0;34m[0m[0m [0;32m--> 200[0;31m Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass [0m[1;32m 201[0m [0mtensors[0m[0;34m,[0m [0mgrad_tensors_[0m[0;34m,[0m [0mretain_graph[0m[0;34m,[0m [0mcreate_graph[0m[0;34m,[0m [0minputs[0m[0;34m,[0m[0;34m[0m[0;34m[0m[0m [1;32m 202[0m allow_unreachable=True, accumulate_grad=True) # Calls into the C++ engine to run the backward pass [0;31mKeyboardInterrupt[0m:
ixs = torch.tensor(train_dataset.vocab.forward(["when"])).to(device)
out = model(ixs)
top = torch.topk(out[0], 10)
top_indices = top.indices.tolist()
top_probs = top.values.tolist()
top_words = train_dataset.vocab.lookup_tokens(top_indices)
list(zip(top_words, top_indices, top_probs))
[('the', 2, 0.15899169445037842), ('\\\\', 1, 0.10546761751174927), ('he', 28, 0.06849857419729233), ('it', 15, 0.05329886078834534), ('i', 26, 0.0421920120716095), ('they', 50, 0.03895237296819687), ('a', 8, 0.03352600708603859), ('<unk>', 0, 0.031062396243214607), ('we', 61, 0.02323235757648945), ('she', 104, 0.02003088779747486)]
device = "cuda"
model = SimpleBigramNeuralLanguageModel(VOCAB_SIZE, EMBED_SIZE).to(device)
model.load_state_dict(torch.load("model1.bin"))
<All keys matched successfully>
def predict_word(ixs, model, top_k=5):
out = model(ixs)
top = torch.topk(out[0], 10)
top_indices = top.indices.tolist()
top_probs = top.values.tolist()
top_words = train_dataset.vocab.lookup_tokens(top_indices)
return list(zip(top_words, top_indices, top_probs))
def get_one_word(text, context="left"):
# print("Getting word from:", text)
if context == "left":
context = -1
else:
context = 0
return text.rstrip().split(" ")[context]
def inference_on_file(filename, model, lines_no=1):
results_path = "/".join(filename.split("/")[:-1]) + "/out.tsv"
with lzma.open(filename, "r") as fp, open(results_path, "w") as out_file:
print("Training on", filename)
for i, line in enumerate(fp):
# left, right = [ get_one_word(text_part, context)
# for context, text_part in zip(line.split('\t')[:-2], ('left', 'right'))]
line = line.decode("utf-8")
# print(line)
left = get_one_word(line.split("\t")[-2])
# print("Current word:", left)
tensor = torch.tensor(train_dataset.vocab.forward([left])).to(device)
results = predict_word(tensor, model, 9)
prob_sum = sum([word[2] for word in results])
result_line = (
" ".join([f"{word[0]}:{word[2]}" for word in results])
+ f" :{prob_sum}\n"
)
# print(result_line)
out_file.write(result_line)
print(f"\rProgress: {(((i+1) / lines_no) * 100):.2f}%", end="")
print()
model.eval()
for filepath, lines_no in zip(
("/content/dev-0/in.tsv.xz", "/content/test-A/in.tsv.xz"), (10519.0, 7414.0)
):
inference_on_file(filepath, model, lines_no)
Training on /content/dev-0/in.tsv.xz Progress: 0.01% Progress: 0.02% Progress: 0.03% Progress: 0.04% Progress: 0.05% Progress: 0.06% Progress: 0.07% Progress: 0.08% Progress: 0.09% Progress: 0.10% Progress: 0.10% Progress: 0.11% Progress: 0.12% Progress: 0.13% Progress: 0.14% Progress: 0.15% Progress: 0.16% Progress: 0.17% Progress: 0.18% Progress: 0.19% Progress: 0.20% Progress: 0.21% Progress: 0.22% Progress: 0.23% Progress: 0.24% Progress: 0.25% Progress: 0.26% Progress: 0.27% Progress: 0.28% Progress: 0.29% Progress: 0.29% Progress: 0.30% Progress: 0.31% Progress: 0.32% Progress: 0.33% Progress: 0.34% Progress: 0.35% Progress: 0.36% Progress: 0.37% Progress: 0.38% Progress: 0.39% Progress: 0.40% Progress: 0.41% Progress: 0.42% Progress: 0.43% Progress: 0.44% Progress: 0.45% Progress: 0.46% Progress: 0.47% Progress: 0.48% Progress: 0.48% Progress: 0.49% Progress: 0.50% Progress: 0.51% Progress: 0.52% Progress: 0.53% Progress: 0.54% Progress: 0.55% Progress: 0.56% Progress: 0.57% Progress: 0.58% Progress: 0.59% Progress: 0.60% Progress: 0.61% Progress: 0.62% Progress: 0.63% Progress: 0.64% Progress: 0.65% Progress: 0.66% Progress: 0.67% Progress: 0.67% Progress: 0.68% Progress: 0.69% Progress: 0.70% Progress: 0.71% Progress: 0.72% Progress: 0.73% Progress: 0.74% Progress: 0.75% Progress: 0.76% Progress: 0.77% Progress: 0.78% Progress: 0.79% Progress: 0.80% Progress: 0.81% Progress: 0.82% Progress: 0.83% Progress: 0.84% Progress: 0.85% Progress: 0.86% Progress: 0.87% Progress: 0.87% Progress: 0.88% Progress: 0.89% Progress: 0.90% Progress: 0.91% Progress: 0.92% Progress: 0.93% Progress: 0.94% Progress: 0.95% Progress: 0.96% Progress: 0.97% Progress: 0.98% Progress: 0.99% Progress: 1.00% Progress: 1.01% Progress: 1.02% Progress: 1.03% Progress: 1.04% Progress: 1.05% Progress: 1.06% Progress: 1.06% Progress: 1.07% Progress: 1.08% Progress: 1.09% Progress: 1.10% Progress: 1.11% Progress: 1.12% Progress: 1.13% Progress: 1.14% Progress: 1.15% Progress: 1.16% Progress: 1.17% Progress: 1.18% Progress: 1.19% Progress: 1.20% Progress: 1.21% Progress: 1.22% Progress: 1.23% Progress: 1.24% Progress: 1.25% Progress: 1.25% Progress: 1.26% Progress: 1.27% Progress: 1.28% Progress: 1.29% Progress: 1.30% Progress: 1.31% Progress: 1.32% Progress: 1.33% Progress: 1.34% Progress: 1.35% Progress: 1.36% Progress: 1.37% Progress: 1.38% Progress: 1.39% Progress: 1.40% Progress: 1.41% Progress: 1.42% Progress: 1.43% Progress: 1.44% Progress: 1.45% Progress: 1.45% Progress: 1.46% Progress: 1.47% Progress: 1.48% Progress: 1.49% Progress: 1.50% Progress: 1.51% Progress: 1.52% Progress: 1.53% Progress: 1.54% Progress: 1.55% Progress: 1.56% Progress: 1.57% Progress: 1.58% Progress: 1.59% Progress: 1.60% Progress: 1.61% Progress: 1.62%
/usr/local/lib/python3.10/dist-packages/torch/nn/modules/container.py:217: UserWarning: Implicit dimension choice for softmax has been deprecated. Change the call to include dim=X as an argument. input = module(input)
Progress: 100.00% Training on /content/test-A/in.tsv.xz Progress: 100.00%