Add alpha plus smoothing

This commit is contained in:
Wojciech Jarmosz 2022-04-10 14:56:28 +02:00
parent b8ad7dd579
commit dc61acc340
3 changed files with 16815 additions and 16810 deletions

File diff suppressed because it is too large Load Diff

9
run.py
View File

@ -10,21 +10,26 @@ class WordGapPrediction:
def __init__(self):
self.tokenizer = RegexpTokenizer(r"\w+")
self.model = defaultdict(lambda: defaultdict(lambda: 0))
self.vocab = set()
self.alpha = 0.001
def read_train_data(self, file):
data = pd.read_csv(file, sep="\t", error_bad_lines=False, index_col=0, header=None)
for index, row in data[:140000].iterrows():
for index, row in data[:100000].iterrows():
text = str(row[6]) + ' ' + str(row[7])
tokens = self.tokenizer.tokenize(text)
for w1, w2, w3 in trigrams(tokens, pad_right=True, pad_left=True):
if w1 and w2 and w3:
self.model[(w2, w3)][w1] += 1
self.model[(w1, w2)][w3] += 1
self.vocab.add(w1)
self.vocab.add(w2)
self.vocab.add(w3)
for word_pair in self.model:
num_n_grams = float(sum(self.model[word_pair].values()))
for word in self.model[word_pair]:
self.model[word_pair][word] /= num_n_grams
self.model[word_pair][word] = (self.model[word_pair][word] + self.alpha) / (num_n_grams + self.alpha*len(self.vocab))
def generate_outputs(self, input_file, output_file):
data = pd.read_csv(input_file, sep='\t', error_bad_lines=False, index_col=0, header=None, quoting=csv.QUOTE_NONE)

File diff suppressed because it is too large Load Diff