# -*- coding: utf-8 -*- """Untitled12.ipynb Automatically generated by Colaboratory. Original file is located at https://colab.research.google.com/drive/1ia944iiX5i5KOxESwbcHksNJrG4L12U6 """ !pip install torch regexp pandas !git clone --single-branch git://gonito.net/challenging-america-word-gap-prediction -b master !xzcat ./challenging-america-word-gap-prediction/train/in.tsv.xz > ./challenging-america-word-gap-prediction/train/in.tsv !xzcat ./challenging-america-word-gap-prediction/dev-0/in.tsv.xz > ./challenging-america-word-gap-prediction/dev-0/in.tsv !xzcat ./challenging-america-word-gap-prediction/test-A/in.tsv.xz > ./challenging-america-word-gap-prediction/test-A/in.tsv import pandas as pd def read_train_data(file): data = pd.read_csv(file, sep="\t", error_bad_lines=False, index_col=0, header=None) with open('input_train.txt', 'w') as f: for index, row in data[:500000].iterrows(): first_part = str(row[6]).replace('\\n', '') sec_part = str(row[7]).replace('\\n', '') if first_part != 'nan': f.write(first_part + '\n') if sec_part != 'nan': f.write(sec_part + '\n') read_train_data('./challenging-america-word-gap-prediction/train/in.tsv') !head -10 input_train.txt from itertools import islice import regex as re import sys from torchtext.vocab import build_vocab_from_iterator def get_words_from_line(line): line = line.rstrip() yield '' for m in re.finditer(r'[\p{L}0-9\*]+|\p{P}+', line): yield m.group(0).lower() yield '' def get_word_lines_from_file(file_name): with open(file_name, 'r') as fh: for line in fh: yield get_words_from_line(line) vocab_size = 30000 vocab = build_vocab_from_iterator( get_word_lines_from_file('input_train.txt'), max_tokens = vocab_size, specials = ['']) vocab['is'] print(vocab['is']) from torch import nn import torch embed_size = 100 class SimpleBigramNeuralLanguageModel(nn.Module): def __init__(self, vocabulary_size, embedding_size): super(SimpleBigramNeuralLanguageModel, self).__init__() self.model = nn.Sequential( nn.Embedding(vocabulary_size, embedding_size), nn.Linear(embedding_size, vocabulary_size), nn.Softmax() ) def forward(self, x): return self.model(x) model = SimpleBigramNeuralLanguageModel(vocab_size, embed_size) vocab.set_default_index(vocab['']) !shuf < input_train.txt > input_train.shuf.txt from torch.utils.data import IterableDataset import itertools def look_ahead_iterator(gen): prev = None for item in gen: if prev is not None: yield (prev, item) prev = item class Bigrams(IterableDataset): def __init__(self, text_file, vocabulary_size): self.vocab = build_vocab_from_iterator( get_word_lines_from_file(text_file), max_tokens = vocabulary_size, specials = ['']) self.vocab.set_default_index(self.vocab['']) self.vocabulary_size = vocabulary_size self.text_file = text_file def __iter__(self): return look_ahead_iterator( (self.vocab[t] for t in itertools.chain.from_iterable(get_word_lines_from_file(self.text_file)))) train_dataset = Bigrams('input_train.shuf.txt', vocab_size) from torch.utils.data import DataLoader device = 'cuda' model = SimpleBigramNeuralLanguageModel(vocab_size, embed_size).to(device) data = DataLoader(train_dataset, batch_size=5000) optimizer = torch.optim.Adam(model.parameters()) criterion = torch.nn.NLLLoss() model.train() step = 0 for x, y in data: x = x.to(device) y = y.to(device) optimizer.zero_grad() ypredicted = model(x) loss = criterion(torch.log(ypredicted), y) if step % 100 == 0: print(step, loss) step += 1 loss.backward() optimizer.step() torch.save(model.state_dict(), 'model1.bin') device = 'cuda' model = SimpleBigramNeuralLanguageModel(vocab_size, embed_size).to(device) model.load_state_dict(torch.load('model1.bin')) model.eval() ixs = torch.tensor(vocab.forward(['he'])).to(device) out = model(ixs) top = torch.topk(out[0], 10) top_indices = top.indices.tolist() top_probs = top.values.tolist() top_words = vocab.lookup_tokens(top_indices) list(zip(top_words, top_indices, top_probs)) import regex as re def predict_word(word): device = 'cuda' model = SimpleBigramNeuralLanguageModel(vocab_size, embed_size).to(device) model.load_state_dict(torch.load('model1.bin')) model.eval() ixs = torch.tensor(vocab.forward([word])).to(device) out = model(ixs) top = torch.topk(out[0], 8) top_indices = top.indices.tolist() top_probs = top.values.tolist() top_words = vocab.lookup_tokens(top_indices) to_return = '' total = 1.0 for el in list(zip(top_words, top_indices, top_probs)): pattern = re.compile("^([A-Za-z0-9])+$") if re.match(pattern, el[0]): if total - top_probs[0] >= 0: to_return += f'{el[0]}:{top_probs[0]} ' total -= top_probs[0] if total != 1.0: to_return += f':{total}' return to_return !pip install nltk from nltk.tokenize import RegexpTokenizer tokenizer = RegexpTokenizer(r"\w+") import csv def generate_outputs(input_file, output_file): data = pd.read_csv(input_file, sep='\t', error_bad_lines=False, index_col=0, header=None, quoting=csv.QUOTE_NONE) with open(output_file, 'w') as f: for index, row in data.iterrows(): first_context = row[6] sec_context = row[7] first_context_tokens = tokenizer.tokenize(first_context) sec_context_tokens = tokenizer.tokenize(sec_context) if len(first_context_tokens) + len(sec_context_tokens) < 4: prediction = 'the:0.2 be:0.2 to:0.2 of:0.1 and:0.1 a:0.1 :0.1' else: prediction = predict_word(first_context_tokens[-1]) if not prediction: prediction = 'the:0.2 be:0.2 to:0.2 of:0.1 and:0.1 a:0.1 :0.1' f.write(prediction + '\n') generate_outputs('./challenging-america-word-gap-prediction/dev-0/in.tsv', './challenging-america-word-gap-prediction/dev-0/out.tsv') generate_outputs('./challenging-america-word-gap-prediction/test-A/in.tsv', './challenging-america-word-gap-prediction/test-A/out.tsv') !wget https://gonito.net/get/bin/geval !chmod u+x geval !./geval -t ./challenging-america-word-gap-prediction/dev-0/ --metric PerplexityHashed