diff --git a/run2.py b/run2.py
index eaa805a..9b05c31 100644
--- a/run2.py
+++ b/run2.py
@@ -1,104 +1,171 @@
-import pandas as pd
-import csv
+import itertools
+import lzma
+
import regex as re
-import kenlm
-from english_words import english_words_alpha_set
-from nltk import word_tokenize
-from math import log10
-from pathlib import Path
-import os
+import torch
+from nltk.tokenize import RegexpTokenizer
+from torch import nn
+from torch.utils.data import DataLoader, IterableDataset
+from torchtext.vocab import build_vocab_from_iterator
+
+VOCAB_SIZE = 40000
+EMBED_SIZE = 100
+DEVICE = "cuda"
+
+tokenizer = RegexpTokenizer(r"\w+")
-KENLM_BUILD_PATH = Path("/home/bartek/Pulpit/challenging-america-word-gap-prediction/kenlm/build")
-KENLM_LMPLZ_PATH = KENLM_BUILD_PATH / "bin" / "lmplz"
-KENLM_BUILD_BINARY_PATH = KENLM_BUILD_PATH / "bin" / "build_binary"
-SUDO_PASSWORD = ""
-PREDICTION = 'the:0.03 be:0.03 to:0.03 of:0.025 and:0.025 a:0.025 in:0.020 that:0.020 have:0.015 I:0.010 it:0.010 for:0.010 not:0.010 on:0.010 with:0.010 he:0.010 as:0.010 you:0.010 do:0.010 at:0.010 :0.77'
+def read_file(file):
+ for line in file:
+ text = line.split("\t")
+ yield re.sub(
+ r"[^\w\d'\s]+",
+ "",
+ re.sub(" +", " ", text[6].replace("\\n", " ").replace("\n", "").lower()),
+ )
-def clean(text):
- text = str(text).lower().replace("-\\n", "").replace("\\n", " ")
- return re.sub(r"\p{P}", "", text)
+def get_words(line):
+ line = line.rstrip()
+ yield ""
+ for m in re.finditer(r"[\p{L}0-9\*]+|\p{P}+", line):
+ yield m.group(0).lower()
+ yield ""
-def create_train_data():
- data = pd.read_csv(
- "train/in.tsv.xz",
- sep="\t",
- error_bad_lines=False,
- header=None,
- quoting=csv.QUOTE_NONE,
- nrows=10000
- )
- train_labels = pd.read_csv(
- "train/expected.tsv",
- sep="\t",
- error_bad_lines=False,
- header=None,
- quoting=csv.QUOTE_NONE,
- nrows=10000
+def get_line(file_path):
+ with lzma.open(file_path, mode="rt") as file:
+ for _, line in enumerate(file):
+ text = line.split("\t")
+ yield get_words(
+ re.sub(
+ r"[^\w\d'\s]+",
+ "",
+ re.sub(
+ " +",
+ " ",
+ " ".join([text[6], text[7]])
+ .replace("\\n", " ")
+ .replace("\n", "")
+ .lower(),
+ ),
+ )
+ )
+
+
+def buidl_vocab():
+ vocab = build_vocab_from_iterator(
+ get_line("train/in.tsv.xz"), max_tokens=VOCAB_SIZE, specials=[""]
)
- train_data = data[[6, 7]]
- train_data = pd.concat([train_data, train_labels], axis=1)
-
- return train_data[6] + train_data[0] + train_data[7]
+ vocab.set_default_index(vocab[""])
+ return vocab
-def create_train_file(filename="train.txt"):
- with open(filename, "w") as f:
- for line in create_train_data():
- f.write(clean(line) + "\n")
-
+def look_ahead_iterator(gen):
+ prev = None
+ for item in gen:
+ if prev is not None:
+ yield (prev, item)
+ prev = item
-def train_model():
- lmplz_command = f"{KENLM_LMPLZ_PATH} -o 4 < train.txt > model.arpa"
- build_binary_command = f"{KENLM_BUILD_BINARY_PATH} model.arpa model.binary"
- os.system('echo %s|sudo -S %s' % (SUDO_PASSWORD, lmplz_command))
- os.system('echo %s|sudo -S %s' % (SUDO_PASSWORD, build_binary_command))
-
-def predict(model, before, after):
- prob = 0.0
- best = []
- for word in english_words_alpha_set:
- text = ' '.join([before, word, after])
- text_score = model.score(text, bos=False, eos=False)
- if len(best) < 12:
- best.append((word, text_score))
- else:
- worst_score = None
- for score in best:
- if not worst_score:
- worst_score = score
+class SimpleBigramNeuralLanguageModel(nn.Module):
+ def __init__(self, vocabulary_size, embedding_size):
+ super(SimpleBigramNeuralLanguageModel, self).__init__()
+ self.model = nn.Sequential(
+ nn.Embedding(vocabulary_size, embedding_size),
+ nn.Linear(embedding_size, vocabulary_size),
+ nn.Softmax(),
+ )
+
+ def forward(self, x):
+ return self.model(x)
+
+
+class Bigrams(IterableDataset):
+ def __init__(self, text_file, vocabulary_size):
+ self.vocab = build_vocab_from_iterator(
+ get_line(text_file), max_tokens=vocabulary_size, specials=[""]
+ )
+ self.vocab.set_default_index(self.vocab[""])
+ self.vocabulary_size = vocabulary_size
+ self.text_file = text_file
+
+ def __iter__(self):
+ return look_ahead_iterator(
+ (
+ self.vocab[t]
+ for t in itertools.chain.from_iterable(get_line(self.text_file))
+ )
+ )
+
+
+vocab = buidl_vocab()
+
+
+def train():
+ batch_size = 10000
+
+ train_dataset = Bigrams("train/in.tsv.xz", VOCAB_SIZE)
+
+ device = "cuda"
+ model = SimpleBigramNeuralLanguageModel(VOCAB_SIZE, EMBED_SIZE).to(device)
+ train_data_loader = DataLoader(train_dataset, batch_size=batch_size)
+ optimizer = torch.optim.Adam(model.parameters())
+ criterion = torch.nn.NLLLoss()
+
+ model.train()
+ step = 0
+ for x, y in train_data_loader:
+ x = x.to(device)
+ y = y.to(device)
+ optimizer.zero_grad()
+ ypredicted = model(x)
+ loss = criterion(torch.log(ypredicted), y)
+ if step % 100 == 0:
+ print(step, loss)
+ step += 1
+ loss.backward()
+ optimizer.step()
+ torch.save(model.state_dict(), "model1.bin")
+
+
+def predict(word, model):
+ ixs = torch.tensor(vocab.forward([word])).to(DEVICE)
+
+ out = model(ixs)
+ top = torch.topk(out[0], 8)
+ top_indices = top.indices.tolist()
+ top_probs = top.values.tolist()
+ top_words = vocab.lookup_tokens(top_indices)
+ str_predictions = ""
+ lht = 1.0
+ for pred_word in list(zip(top_words, top_indices, top_probs)):
+ if lht - pred_word[2] >= 0:
+ str_predictions += f"{pred_word[0]}:{pred_word[2]} "
+ lht -= pred_word[2]
+ if lht != 1.0:
+ str_predictions += f":{lht}"
+ return str_predictions
+
+
+def generate_predictions(input_file, output_file, model):
+ with open(output_file, "w") as outputf:
+ with lzma.open(input_file, mode="rt") as file:
+ for _, text in enumerate(read_file(file)):
+ tokens = tokenizer.tokenize(text)
+ if len(tokens) < 4:
+ prediction = "the:0.2 be:0.2 to:0.2 of:0.1 and:0.1 a:0.1 :0.1"
else:
- if worst_score[1] > score[1]:
- worst_score = score
- if worst_score[1] < text_score:
- best.remove(worst_score)
- best.append((word, text_score))
- probs = sorted(best, key=lambda tup: tup[1], reverse=True)
- pred_str = ''
- for word, prob in probs:
- pred_str += f'{word}:{prob} '
- pred_str += f':{log10(0.99)}'
- return pred_str
-
-def make_prediction(model, path, result_path):
- data = pd.read_csv(path, sep='\t', header=None, quoting=csv.QUOTE_NONE)
- with open(result_path, 'w', encoding='utf-8') as file_out:
- for _, row in data.iterrows():
- before, after = word_tokenize(clean(str(row[6]))), word_tokenize(clean(str(row[7])))
- if len(before) < 2 or len(after) < 2:
- pred = PREDICTION
- else:
- pred = predict(model, before[-1], after[0])
- file_out.write(pred + '\n')
+ prediction = predict(tokens[-1], model)
+ outputf.write(prediction + "\n")
if __name__ == "__main__":
- create_train_file()
- train_model()
- model = kenlm.Model('model.arpa')
- make_prediction(model, "dev-0/in.tsv.xz", "dev-0/out.tsv")
- make_prediction(model, "test-A/in.tsv.xz", "test-A/out.tsv")
\ No newline at end of file
+ train()
+ model = SimpleBigramNeuralLanguageModel(VOCAB_SIZE, EMBED_SIZE).to(DEVICE)
+ model.load_state_dict(torch.load("model1.bin"))
+ model.eval()
+ generate_predictions("dev-0/in.tsv.xz", "dev-0/out.tsv", model)
+ generate_predictions("test-A/in.tsv.xz", "test-A/out.tsv", model)