7.3 KiB
7.3 KiB
from collections import defaultdict, Counter
from nltk import trigrams, word_tokenize
import csv
import regex as re
import pandas as pd
import numpy as np
import time
in_file = 'train/in.tsv.xz'
out_file = 'train/expected.tsv'
X_train = pd.read_csv(in_file, sep='\t', header=None, quoting=csv.QUOTE_NONE, nrows=30000, on_bad_lines='skip')
Y_train = pd.read_csv(out_file, sep='\t', header=None, quoting=csv.QUOTE_NONE, nrows=30000, on_bad_lines='skip')
X_train = X_train[[6, 7]]
X_train = pd.concat([X_train, Y_train], axis=1)
X_train['row'] = X_train[6] + X_train[0] + X_train[7]
def train(X_train, Y_train, alpha):
model = defaultdict(lambda: defaultdict(lambda: 0))
vocabulary = set()
for _, (_, row) in enumerate(X_train.iterrows()):
text = preprocess(str(row['row']))
words = word_tokenize(text)
for w1, w2, w3 in trigrams(words, pad_right=True, pad_left=True):
if w1 and w2 and w3:
model[(w1, w3)][w2] += 1
vocabulary.add(w1)
vocabulary.add(w2)
vocabulary.add(w3)
for _, w13 in enumerate(model):
count = float(sum(model[w13].values()))
denominator = count + alpha * len(vocabulary)
for w2 in model[w13]:
nominator = model[w13][w2] + alpha
model[w13][w2] = nominator / denominator
return model
def preprocess(row):
row = re.sub(r'\p{P}', '', row.lower().replace('-\\\\n', '').replace('\\\\n', ' '))
return row
def predict_word(before, after, model):
output = ''
p = 0.0
Y_pred = dict(Counter(dict(model[before, after])).most_common(7))
for key, value in Y_pred.items():
p += value
output += f'{key}:{value} '
if p == 0.0:
output = 'the:0.04 be:0.04 to:0.04 and:0.02 not:0.02 or:0.02 a:0.02 :0.8'
return output
output += f':{max(1 - p, 0.01)}'
return output
def word_gap_prediction(file, model):
X_test = pd.read_csv(f'{file}/in.tsv.xz', sep='\t', header=None, quoting=csv.QUOTE_NONE, on_bad_lines='skip')
with open(f'{file}/out.tsv', 'w', encoding='utf-8') as output_file:
for _, row in X_test.iterrows():
before, after = word_tokenize(preprocess(str(row[6]))), word_tokenize(preprocess(str(row[7])))
if len(before) < 2 or len(after) < 2:
output = 'the:0.04 be:0.04 to:0.04 and:0.02 not:0.02 or:0.02 a:0.02 :0.8'
else:
output = predict_word(before[-1], after[0],model)
output_file.write(output + '\n')
def alpha_tuning(alphas):
for alpha in alphas:
model = train(X_train, Y_train, alpha)
word_gap_prediction('dev-0',model)
time.sleep(10)
print("Alpha = ",alpha)
print("dev-0 score")
!./geval -t dev-0
alphas = np.round(np.arange(0.1, 0.6, 0.1).tolist(),2)
alphas2 = np.round(alphas * 0.01,3)
alphas3 = np.round(alphas * 0.001,4)
alphas4 = np.round(alphas * 0.0001,5)
alphas5 = np.round(alphas * 0.00001,6)
alpha_tuning(alphas)
Alpha = 0.1 dev-0 score 789.71 Alpha = 0.2 dev-0 score 819.57 Alpha = 0.3 dev-0 score 833.52 Alpha = 0.4 dev-0 score 841.93 Alpha = 0.5 dev-0 score 847.66
alpha_tuning(alphas2)
Alpha = 0.001 dev-0 score 472.05 Alpha = 0.002 dev-0 score 519.17 Alpha = 0.003 dev-0 score 548.93 Alpha = 0.004 dev-0 score 570.68 Alpha = 0.005 dev-0 score 587.76
alpha_tuning(alphas3)
Alpha = 0.0001 dev-0 score 367.28 Alpha = 0.0002 dev-0 score 389.51 Alpha = 0.0003 dev-0 score 406.30 Alpha = 0.0004 dev-0 score 419.89 Alpha = 0.0005 dev-0 score 431.39
alpha_tuning(alphas4)
Alpha = 1e-05 dev-0 score 350.33 Alpha = 2e-05 dev-0 score 346.35 Alpha = 3e-05 dev-0 score 347.66 Alpha = 4e-05 dev-0 score 350.20 Alpha = 5e-05 dev-0 score 353.09
alpha_tuning(alphas5)
Alpha = 1e-06 dev-0 score 422.25 Alpha = 2e-06 dev-0 score 390.96 Alpha = 3e-06 dev-0 score 376.49 Alpha = 4e-06 dev-0 score 367.96 Alpha = 5e-06 dev-0 score 362.34