custom alpha
This commit is contained in:
parent
bd345e8e26
commit
96643f581b
121
run.py
121
run.py
@ -4,72 +4,77 @@ from collections import defaultdict, Counter
|
||||
import pandas as pd
|
||||
import csv
|
||||
import regex as re
|
||||
import sys
|
||||
|
||||
|
||||
DEFAULT_PREDICTION = 'the:0.2 be:0.2 to:0.2 of:0.1 and:0.1 a:0.1 :0.1'
|
||||
|
||||
|
||||
def preprocess(text):
|
||||
text = text.lower().replace('-\\n', '').replace('\\n', ' ')
|
||||
return re.sub(r'\p{P}', '', text)
|
||||
|
||||
|
||||
def predict(word_before, word_after):
|
||||
prediction = dict(Counter(dict(model[word_before, word_after])).most_common(6))
|
||||
result = []
|
||||
prob = 0.0
|
||||
for key, value in prediction.items():
|
||||
prob += value
|
||||
result.append(f'{key}:{value}')
|
||||
if prob == 0.0:
|
||||
return DEFAULT_PREDICTION
|
||||
result.append(f':{max(1 - prob, 0.01)}')
|
||||
return ' '.join(result)
|
||||
|
||||
class Model():
|
||||
def __init__(self, alpha, train_file_name, test_file_name):
|
||||
|
||||
file_expected = pd.read_csv(f'{train_file_name}/expected.tsv', sep='\t', on_bad_lines='skip', header=None, quoting=csv.QUOTE_NONE, nrows=200000)
|
||||
file_in = pd.read_csv(f'{train_file_name}/in.tsv.xz', sep='\t', on_bad_lines='skip', header=None, quoting=csv.QUOTE_NONE, nrows=200000)
|
||||
file_in = file_in[[6, 7]]
|
||||
file_concat = pd.concat([file_in, file_expected], axis=1)
|
||||
file_concat['text'] = file_concat[6] + file_concat[0] + file_concat[7]
|
||||
|
||||
self.file = file_concat[['text']]
|
||||
self.test_file_name = test_file_name
|
||||
self.alpha = alpha;
|
||||
self.model = defaultdict(lambda: defaultdict(lambda: 0))
|
||||
|
||||
def train(self):
|
||||
rows = self.file.iterrows()
|
||||
rows_len = len(self.file)
|
||||
for index, (_, row) in enumerate(rows):
|
||||
if index % 1000 == 0:
|
||||
print(f'uczenie modelu: {index / rows_len}')
|
||||
words = word_tokenize(preprocess(str(row['text'])))
|
||||
for word_1, word_2, word_3 in trigrams(words, pad_right=True, pad_left=True):
|
||||
if word_1 and word_2 and word_3:
|
||||
self.model[(word_1, word_3)][word_2] += 1
|
||||
model_len = len(self.model)
|
||||
for index, words_1_3 in enumerate(self.model):
|
||||
if index % 100000 == 0:
|
||||
print(f'normalizacja i wygładzanie: {index / model_len}')
|
||||
occurrences = sum(self.model[words_1_3].values())
|
||||
for word_2 in self.model[words_1_3]:
|
||||
self.model[words_1_3][word_2] += self.alpha
|
||||
self.model[words_1_3][word_2] /= float(occurrences + self.alpha + len(word_2))
|
||||
|
||||
def predict_row(self, word_before, word_after):
|
||||
prediction = dict(Counter(dict(self.model[word_before, word_after])).most_common(6))
|
||||
result = []
|
||||
prob = 0.0
|
||||
for key, value in prediction.items():
|
||||
prob += value
|
||||
result.append(f'{key}:{value}')
|
||||
if prob == 0.0:
|
||||
return DEFAULT_PREDICTION
|
||||
result.append(f':{max(1 - prob, 0.01)}')
|
||||
return ' '.join(result)
|
||||
|
||||
def predict(self):
|
||||
data = pd.read_csv(f'{self.test_file_name}/in.tsv.xz', sep='\t', on_bad_lines='skip', header=None, quoting=csv.QUOTE_NONE)
|
||||
with open(f'{self.test_file_name}/out.tsv', 'w', encoding='utf-8') as file_out:
|
||||
for _, row in data.iterrows():
|
||||
words_before, words_after = word_tokenize(preprocess(str(row[6]))), word_tokenize(preprocess(str(row[7])))
|
||||
if len(words_before) < 3 or len(words_after) < 3:
|
||||
prediction = DEFAULT_PREDICTION
|
||||
else:
|
||||
prediction = self.predict_row(words_before[-1], words_after[0])
|
||||
file_out.write(prediction + '\n')
|
||||
|
||||
|
||||
|
||||
def make_prediction(file):
|
||||
data = pd.read_csv(f'{file}/in.tsv.xz', sep='\t', on_bad_lines='skip', header=None, quoting=csv.QUOTE_NONE)
|
||||
with open(f'{file}/out.tsv', 'w', encoding='utf-8') as file_out:
|
||||
for _, row in data.iterrows():
|
||||
words_before, words_after = word_tokenize(preprocess(str(row[6]))), word_tokenize(preprocess(str(row[7])))
|
||||
if len(words_before) < 3 or len(words_after) < 3:
|
||||
prediction = DEFAULT_PREDICTION
|
||||
else:
|
||||
prediction = predict(words_before[-1], words_after[0])
|
||||
file_out.write(prediction + '\n')
|
||||
|
||||
|
||||
file_in = pd.read_csv('train/in.tsv.xz', sep='\t', on_bad_lines='skip', header=None, quoting=csv.QUOTE_NONE, nrows=200000)
|
||||
file_expected = pd.read_csv('train/expected.tsv', sep='\t', on_bad_lines='skip', header=None, quoting=csv.QUOTE_NONE, nrows=200000)
|
||||
file_in = file_in[[6, 7]]
|
||||
file_concat = pd.concat([file_in, file_expected], axis=1)
|
||||
file_concat['text'] = file_concat[6] + file_concat[0] + file_concat[7]
|
||||
file_concat = file_concat[['text']]
|
||||
trigrams_list = []
|
||||
model = defaultdict(lambda: defaultdict(lambda: 0))
|
||||
|
||||
rows = file_concat.iterrows()
|
||||
rows_len = len(file_concat)
|
||||
for index, (_, row) in enumerate(rows):
|
||||
if index % 1000 == 0:
|
||||
print(f'uczenie modelu: {index / rows_len}')
|
||||
words = word_tokenize(preprocess(str(row['text'])))
|
||||
for word_1, word_2, word_3 in trigrams(words, pad_right=True, pad_left=True):
|
||||
if word_1 and word_2 and word_3:
|
||||
model[(word_1, word_3)][word_2] += 1
|
||||
|
||||
alpha = 0.25
|
||||
model_len = len(model)
|
||||
for index, words_1_3 in enumerate(model):
|
||||
if index % 100000 == 0:
|
||||
print(f'normalizacja: {index / model_len}')
|
||||
occurrences = sum(model[words_1_3].values())
|
||||
for word_2 in model[words_1_3]:
|
||||
model[words_1_3][word_2] += alpha
|
||||
model[words_1_3][word_2] /= float(occurrences + alpha + len(word_2))
|
||||
|
||||
|
||||
make_prediction('test-A')
|
||||
make_prediction('dev-0')
|
||||
|
||||
print('koniec')
|
||||
alpha = float(sys.argv[1])
|
||||
print(f'alfa: {alpha}')
|
||||
model = Model(alpha, 'dev-0', 'test-A')
|
||||
model.train()
|
||||
model.predict()
|
||||
|
Loading…
Reference in New Issue
Block a user