smoothing

This commit is contained in:
Bartosz Karwacki 2022-04-10 18:13:50 +02:00
parent e49b8826cb
commit 4f08607d34
3 changed files with 18018 additions and 17996 deletions

File diff suppressed because it is too large Load Diff

150
run.py
View File

@ -3,84 +3,106 @@ import csv
import regex as re import regex as re
from nltk import bigrams, word_tokenize from nltk import bigrams, word_tokenize
from collections import Counter, defaultdict from collections import Counter, defaultdict
import string
import unicodedata
data = pd.read_csv(
"train/in.tsv.xz",
sep="\t",
error_bad_lines=False,
header=None,
quoting=csv.QUOTE_NONE,
)
train_labels = pd.read_csv(
"train/expected.tsv",
sep="\t",
error_bad_lines=False,
header=None,
quoting=csv.QUOTE_NONE,
)
train_data = data[[6, 7]]
train_data = pd.concat([train_data, train_labels], axis=1)
train_data["final"] = train_data[6] + train_data[0] + train_data[7]
model = defaultdict(lambda: defaultdict(lambda: 0))
def clean(text): def clean(text):
text = str(text).lower().replace("-\\n", "").replace("\\n", " ") text = str(text).lower().replace("-\\n", "").replace("\\n", " ")
return re.sub(r"\p{P}", "", text) return re.sub(r"\p{P}", "", text)
def train_model(data):
for _, row in data.iterrows(): class Collection:
words = word_tokenize(clean(row["final"])) def __init__(self, path: str) -> None:
self._path = path
def read(self, nrows=200_000):
self.data = pd.read_csv(
self._path,
sep="\t",
error_bad_lines=False,
header=None,
quoting=csv.QUOTE_NONE,
nrows=nrows,
)
class Model:
def __init__(self, alpha: float = 0.01) -> None:
self.alpha = alpha
self.model = defaultdict(lambda: defaultdict(lambda: 0))
self.vocab = set()
def train(self, data):
for _, row in data.iterrows():
words = word_tokenize(clean(row["final"]))
for w1, w2 in bigrams(words, pad_left=True, pad_right=True): for w1, w2 in bigrams(words, pad_left=True, pad_right=True):
if w1 and w2: if w1 and w2:
model[w1][w2] += 1 self.model[w1][w2] += 1
for w1 in model: self.vocab.add(w1)
total_count = float(sum(model[w1].values())) self.vocab.add(w2)
for w2 in model[w1]:
model[w1][w2] /= total_count for w1 in self.model:
total_count = float(sum(self.model[w1].values()))
for w2 in self.model[w1]:
self.model[w1][w2] /= total_count
self.model[w1][w2] = (self.model[w1][w2] + self.alpha) / (
total_count + self.alpha * len(self.vocab)
)
def _predict(self, word):
predictions = dict(self.model[word])
most_common = dict(Counter(predictions).most_common(5))
total_prob = 0.0
str_prediction = ""
for word, prob in most_common.items():
total_prob += prob
str_prediction += f"{word}:{prob} "
if not total_prob:
return "the:0.2 be:0.2 to:0.2 of:0.1 and:0.1 a:0.1 :0.1"
if 1 - total_prob >= 0.01:
str_prediction += f":{1-total_prob}"
else:
str_prediction += f":0.01"
return str_prediction
def _save(self, save_path: str, data):
with open(save_path, "w") as file:
for _, row in data.iterrows():
words = word_tokenize(clean(row[6]))
if len(words) < 3:
prediction = "the:0.2 be:0.2 to:0.2 of:0.1 and:0.1 a:0.1 :0.1"
else:
prediction = self._predict(words[-1])
file.write(prediction + "\n")
def predict(self, read_path: str, save_path: str):
collection = Collection(read_path)
collection.read()
self._save(save_path, collection.data)
def predict(word):
predictions = dict(model[word])
most_common = dict(Counter(predictions).most_common(5))
total_prob = 0.0 if __name__ == '__main__':
str_prediction = ""
for word, prob in most_common.items(): data = Collection("train/in.tsv.xz")
total_prob += prob data.read()
str_prediction += f"{word}:{prob} "
if not total_prob: train_labels = Collection("train/expected.tsv")
return "the:0.2 be:0.2 to:0.2 of:0.1 and:0.1 a:0.1 :0.1" train_labels.read()
if 1 - total_prob >= 0.01:
str_prediction += f":{1-total_prob}"
else:
str_prediction += f":0.01"
return str_prediction
def predict_data(read_path, save_path): train_data = data.data[[6, 7]]
data = pd.read_csv( train_data = pd.concat([train_data, train_labels.data], axis=1)
read_path, sep="\t", error_bad_lines=False, header=None, quoting=csv.QUOTE_NONE
) train_data["final"] = train_data[6] + train_data[0] + train_data[7]
with open(save_path, "w") as file:
for _, row in data.iterrows():
words = word_tokenize(clean(row[6]))
if len(words) < 3:
prediction = "the:0.2 be:0.2 to:0.2 of:0.1 and:0.1 a:0.1 :0.1"
else:
prediction = predict(words[-1])
file.write(prediction + "\n")
train_model(train_data) model = Model()
predict_data("dev-0/in.tsv.xz", "dev-0/out.tsv") model.train(train_data)
predict_data("test-A/in.tsv.xz", "test-A/out.tsv")
model.predict("dev-0/in.tsv.xz", "dev-0/out.tsv")
model.predict("test-A/in.tsv.xz", "test-A/out.tsv")

File diff suppressed because it is too large Load Diff