Neural bigrams
This commit is contained in:
parent
a0c4ca1217
commit
37775b359b
21038
dev-0/out.tsv
21038
dev-0/out.tsv
File diff suppressed because it is too large
Load Diff
242
run.py
242
run.py
@ -1,76 +1,212 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
"""Untitled12.ipynb
|
||||||
|
|
||||||
|
Automatically generated by Colaboratory.
|
||||||
|
|
||||||
|
Original file is located at
|
||||||
|
https://colab.research.google.com/drive/1ia944iiX5i5KOxESwbcHksNJrG4L12U6
|
||||||
|
"""
|
||||||
|
|
||||||
|
!pip install torch regexp pandas
|
||||||
|
|
||||||
|
!git clone --single-branch git://gonito.net/challenging-america-word-gap-prediction -b master
|
||||||
|
|
||||||
|
!xzcat ./challenging-america-word-gap-prediction/train/in.tsv.xz > ./challenging-america-word-gap-prediction/train/in.tsv
|
||||||
|
|
||||||
|
!xzcat ./challenging-america-word-gap-prediction/dev-0/in.tsv.xz > ./challenging-america-word-gap-prediction/dev-0/in.tsv
|
||||||
|
|
||||||
|
!xzcat ./challenging-america-word-gap-prediction/test-A/in.tsv.xz > ./challenging-america-word-gap-prediction/test-A/in.tsv
|
||||||
|
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
import csv
|
|
||||||
from nltk.tokenize import RegexpTokenizer
|
|
||||||
from english_words import english_words_set
|
|
||||||
from nltk import trigrams
|
|
||||||
import os
|
|
||||||
import kenlm
|
|
||||||
from math import log10
|
|
||||||
|
|
||||||
|
def read_train_data(file):
|
||||||
class WordGapPrediction:
|
|
||||||
|
|
||||||
def __init__(self):
|
|
||||||
self.tokenizer = RegexpTokenizer(r"\w+")
|
|
||||||
self.model = None
|
|
||||||
self.vocab = set()
|
|
||||||
self.alpha = 0.6
|
|
||||||
|
|
||||||
def read_train_data(self, file):
|
|
||||||
data = pd.read_csv(file, sep="\t", error_bad_lines=False, index_col=0, header=None)
|
data = pd.read_csv(file, sep="\t", error_bad_lines=False, index_col=0, header=None)
|
||||||
with open('input_train.txt', 'w') as f:
|
with open('input_train.txt', 'w') as f:
|
||||||
for index, row in data[:500000].iterrows():
|
for index, row in data[:500000].iterrows():
|
||||||
first_part = str(row[6])
|
first_part = str(row[6]).replace('\\n', '')
|
||||||
sec_part = str(row[7])
|
sec_part = str(row[7]).replace('\\n', '')
|
||||||
if first_part != 'nan':
|
if first_part != 'nan':
|
||||||
f.write(first_part + '\n')
|
f.write(first_part + '\n')
|
||||||
if sec_part != 'nan':
|
if sec_part != 'nan':
|
||||||
f.write(sec_part + '\n')
|
f.write(sec_part + '\n')
|
||||||
os.system('sh ./kenlm.sh')
|
|
||||||
self.model = kenlm.Model("model.binary")
|
|
||||||
|
|
||||||
def generate_outputs(self, input_file, output_file):
|
read_train_data('./challenging-america-word-gap-prediction/train/in.tsv')
|
||||||
|
|
||||||
|
!head -10 input_train.txt
|
||||||
|
|
||||||
|
from itertools import islice
|
||||||
|
import regex as re
|
||||||
|
import sys
|
||||||
|
from torchtext.vocab import build_vocab_from_iterator
|
||||||
|
|
||||||
|
|
||||||
|
def get_words_from_line(line):
|
||||||
|
line = line.rstrip()
|
||||||
|
yield '<s>'
|
||||||
|
for m in re.finditer(r'[\p{L}0-9\*]+|\p{P}+', line):
|
||||||
|
yield m.group(0).lower()
|
||||||
|
yield '</s>'
|
||||||
|
|
||||||
|
|
||||||
|
def get_word_lines_from_file(file_name):
|
||||||
|
with open(file_name, 'r') as fh:
|
||||||
|
for line in fh:
|
||||||
|
yield get_words_from_line(line)
|
||||||
|
|
||||||
|
vocab_size = 30000
|
||||||
|
|
||||||
|
vocab = build_vocab_from_iterator(
|
||||||
|
get_word_lines_from_file('input_train.txt'),
|
||||||
|
max_tokens = vocab_size,
|
||||||
|
specials = ['<unk>'])
|
||||||
|
|
||||||
|
vocab['is']
|
||||||
|
|
||||||
|
print(vocab['is'])
|
||||||
|
|
||||||
|
from torch import nn
|
||||||
|
import torch
|
||||||
|
|
||||||
|
embed_size = 100
|
||||||
|
|
||||||
|
class SimpleBigramNeuralLanguageModel(nn.Module):
|
||||||
|
def __init__(self, vocabulary_size, embedding_size):
|
||||||
|
super(SimpleBigramNeuralLanguageModel, self).__init__()
|
||||||
|
self.model = nn.Sequential(
|
||||||
|
nn.Embedding(vocabulary_size, embedding_size),
|
||||||
|
nn.Linear(embedding_size, vocabulary_size),
|
||||||
|
nn.Softmax()
|
||||||
|
)
|
||||||
|
|
||||||
|
def forward(self, x):
|
||||||
|
return self.model(x)
|
||||||
|
|
||||||
|
model = SimpleBigramNeuralLanguageModel(vocab_size, embed_size)
|
||||||
|
|
||||||
|
vocab.set_default_index(vocab['<unk>'])
|
||||||
|
|
||||||
|
!shuf < input_train.txt > input_train.shuf.txt
|
||||||
|
|
||||||
|
from torch.utils.data import IterableDataset
|
||||||
|
import itertools
|
||||||
|
|
||||||
|
def look_ahead_iterator(gen):
|
||||||
|
prev = None
|
||||||
|
for item in gen:
|
||||||
|
if prev is not None:
|
||||||
|
yield (prev, item)
|
||||||
|
prev = item
|
||||||
|
|
||||||
|
class Bigrams(IterableDataset):
|
||||||
|
def __init__(self, text_file, vocabulary_size):
|
||||||
|
self.vocab = build_vocab_from_iterator(
|
||||||
|
get_word_lines_from_file(text_file),
|
||||||
|
max_tokens = vocabulary_size,
|
||||||
|
specials = ['<unk>'])
|
||||||
|
self.vocab.set_default_index(self.vocab['<unk>'])
|
||||||
|
self.vocabulary_size = vocabulary_size
|
||||||
|
self.text_file = text_file
|
||||||
|
|
||||||
|
def __iter__(self):
|
||||||
|
return look_ahead_iterator(
|
||||||
|
(self.vocab[t] for t in itertools.chain.from_iterable(get_word_lines_from_file(self.text_file))))
|
||||||
|
|
||||||
|
train_dataset = Bigrams('input_train.shuf.txt', vocab_size)
|
||||||
|
|
||||||
|
from torch.utils.data import DataLoader
|
||||||
|
|
||||||
|
|
||||||
|
device = 'cuda'
|
||||||
|
model = SimpleBigramNeuralLanguageModel(vocab_size, embed_size).to(device)
|
||||||
|
data = DataLoader(train_dataset, batch_size=5000)
|
||||||
|
optimizer = torch.optim.Adam(model.parameters())
|
||||||
|
criterion = torch.nn.NLLLoss()
|
||||||
|
|
||||||
|
model.train()
|
||||||
|
step = 0
|
||||||
|
for x, y in data:
|
||||||
|
x = x.to(device)
|
||||||
|
y = y.to(device)
|
||||||
|
optimizer.zero_grad()
|
||||||
|
ypredicted = model(x)
|
||||||
|
loss = criterion(torch.log(ypredicted), y)
|
||||||
|
if step % 100 == 0:
|
||||||
|
print(step, loss)
|
||||||
|
step += 1
|
||||||
|
loss.backward()
|
||||||
|
optimizer.step()
|
||||||
|
|
||||||
|
torch.save(model.state_dict(), 'model1.bin')
|
||||||
|
|
||||||
|
device = 'cuda'
|
||||||
|
model = SimpleBigramNeuralLanguageModel(vocab_size, embed_size).to(device)
|
||||||
|
model.load_state_dict(torch.load('model1.bin'))
|
||||||
|
model.eval()
|
||||||
|
|
||||||
|
ixs = torch.tensor(vocab.forward(['he'])).to(device)
|
||||||
|
|
||||||
|
out = model(ixs)
|
||||||
|
top = torch.topk(out[0], 10)
|
||||||
|
top_indices = top.indices.tolist()
|
||||||
|
top_probs = top.values.tolist()
|
||||||
|
top_words = vocab.lookup_tokens(top_indices)
|
||||||
|
list(zip(top_words, top_indices, top_probs))
|
||||||
|
|
||||||
|
import regex as re
|
||||||
|
|
||||||
|
def predict_word(word):
|
||||||
|
device = 'cuda'
|
||||||
|
model = SimpleBigramNeuralLanguageModel(vocab_size, embed_size).to(device)
|
||||||
|
model.load_state_dict(torch.load('model1.bin'))
|
||||||
|
model.eval()
|
||||||
|
|
||||||
|
ixs = torch.tensor(vocab.forward([word])).to(device)
|
||||||
|
|
||||||
|
out = model(ixs)
|
||||||
|
top = torch.topk(out[0], 8)
|
||||||
|
top_indices = top.indices.tolist()
|
||||||
|
top_probs = top.values.tolist()
|
||||||
|
top_words = vocab.lookup_tokens(top_indices)
|
||||||
|
to_return = ''
|
||||||
|
total = 1.0
|
||||||
|
for el in list(zip(top_words, top_indices, top_probs)):
|
||||||
|
pattern = re.compile("^([A-Za-z0-9])+$")
|
||||||
|
if re.match(pattern, el[0]):
|
||||||
|
if total - top_probs[0] >= 0:
|
||||||
|
to_return += f'{el[0]}:{top_probs[0]} '
|
||||||
|
total -= top_probs[0]
|
||||||
|
if total != 1.0:
|
||||||
|
to_return += f':{total}'
|
||||||
|
return to_return
|
||||||
|
|
||||||
|
!pip install nltk
|
||||||
|
|
||||||
|
from nltk.tokenize import RegexpTokenizer
|
||||||
|
tokenizer = RegexpTokenizer(r"\w+")
|
||||||
|
|
||||||
|
import csv
|
||||||
|
|
||||||
|
def generate_outputs(input_file, output_file):
|
||||||
data = pd.read_csv(input_file, sep='\t', error_bad_lines=False, index_col=0, header=None, quoting=csv.QUOTE_NONE)
|
data = pd.read_csv(input_file, sep='\t', error_bad_lines=False, index_col=0, header=None, quoting=csv.QUOTE_NONE)
|
||||||
with open(output_file, 'w') as f:
|
with open(output_file, 'w') as f:
|
||||||
for index, row in data.iterrows():
|
for index, row in data.iterrows():
|
||||||
first_context = row[6]
|
first_context = row[6]
|
||||||
sec_context = row[7]
|
sec_context = row[7]
|
||||||
first_context_tokens = self.tokenizer.tokenize(first_context)
|
first_context_tokens = tokenizer.tokenize(first_context)
|
||||||
sec_context_tokens = self.tokenizer.tokenize(sec_context)
|
sec_context_tokens = tokenizer.tokenize(sec_context)
|
||||||
if len(first_context_tokens) + len(sec_context_tokens) < 4:
|
if len(first_context_tokens) + len(sec_context_tokens) < 4:
|
||||||
prediction = 'the:0.2 be:0.2 to:0.2 of:0.1 and:0.1 a:0.1 :0.1'
|
prediction = 'the:0.2 be:0.2 to:0.2 of:0.1 and:0.1 a:0.1 :0.1'
|
||||||
else:
|
else:
|
||||||
prediction = word_gap_prediction.predict_probs(first_context_tokens[-1], sec_context_tokens[0])
|
prediction = predict_word(first_context_tokens[-1])
|
||||||
|
if not prediction:
|
||||||
|
prediction = 'the:0.2 be:0.2 to:0.2 of:0.1 and:0.1 a:0.1 :0.1'
|
||||||
f.write(prediction + '\n')
|
f.write(prediction + '\n')
|
||||||
|
|
||||||
def predict_probs(self, word1, word2):
|
generate_outputs('./challenging-america-word-gap-prediction/dev-0/in.tsv', './challenging-america-word-gap-prediction/dev-0/out.tsv')
|
||||||
|
generate_outputs('./challenging-america-word-gap-prediction/test-A/in.tsv', './challenging-america-word-gap-prediction/test-A/out.tsv')
|
||||||
|
|
||||||
predictions = []
|
!wget https://gonito.net/get/bin/geval
|
||||||
for word in english_words_set:
|
|
||||||
sentence = word1 + ' ' + word + ' ' + word2
|
|
||||||
text_score = self.model.score(sentence, bos=False, eos=False)
|
|
||||||
|
|
||||||
if len(predictions) < 12:
|
!chmod u+x geval
|
||||||
predictions.append((word, text_score))
|
|
||||||
else:
|
|
||||||
worst_score = None
|
|
||||||
for score in predictions:
|
|
||||||
if not worst_score:
|
|
||||||
worst_score = score
|
|
||||||
else:
|
|
||||||
if worst_score[1] > score[1]:
|
|
||||||
worst_score = score
|
|
||||||
if worst_score[1] < text_score:
|
|
||||||
predictions.remove(worst_score)
|
|
||||||
predictions.append((word, text_score))
|
|
||||||
probs = sorted(predictions, key=lambda tup: tup[1], reverse=True)
|
|
||||||
pred_str = ''
|
|
||||||
for word, prob in probs:
|
|
||||||
pred_str += f'{word}:{prob} '
|
|
||||||
pred_str += f':{log10(0.99)}'
|
|
||||||
return pred_str
|
|
||||||
|
|
||||||
word_gap_prediction = WordGapPrediction()
|
!./geval -t ./challenging-america-word-gap-prediction/dev-0/ --metric PerplexityHashed
|
||||||
word_gap_prediction.read_train_data('./train/in.tsv')
|
|
||||||
word_gap_prediction.generate_outputs('dev-0/in.tsv', 'dev-0/out.tsv')
|
|
||||||
word_gap_prediction.generate_outputs('test-A/in.tsv', 'test-A/out.tsv')
|
|
14828
test-A/out.tsv
14828
test-A/out.tsv
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue
Block a user