challenging-america-word-ga.../run.py
2022-04-24 20:32:19 +02:00

66 lines
2.4 KiB
Python
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

#%%
import pandas as pd
import csv
import os
import kenlm
from collections import Counter, defaultdict
from math import log10
#%%
def clean(text):
text = str(text).lower().strip().replace("", "'").replace('\\n', " ").replace("'t", " not").replace("'s", " is").replace("'ll", " will").replace("'m", " am").replace("'ve", " have").replace(",", "").replace("-", "").replace(".", "").replace("'", "".replace("", "").replace(">", ""))
return text
train_in = pd.read_csv("train/in.tsv.xz", sep='\t', header=None, encoding="UTF-8", on_bad_lines="skip", quoting=csv.QUOTE_NONE, nrows=300000)[[6, 7]]
train_expected = pd.read_csv("train/expected.tsv", sep='\t', header=None, encoding="UTF-8", on_bad_lines="skip", quoting=csv.QUOTE_NONE, nrows=300000)
data = pd.concat([train_in, train_expected], axis=1)
data = data[6] + data[0] + data[7]
data = data.apply(clean)
if not os.path.isfile('train_file.txt'):
with open("train_file.txt", "w+") as f:
for text in data:
f.write(text + "\n")
#%%
#get_ipython().system('../kenlm/build/bin/lmplz -o 4 < train_file.txt > model.arpa --skip_symbols')
model = kenlm.Model("model.arpa")
#%%
import nltk
from nltk import word_tokenize
nltk.download('punkt')
most_common = defaultdict(lambda: 0)
for text in data:
words = word_tokenize(text)
if "d" in words:
words.remove("d")
for w in words:
most_common[w] += 1
most_common = Counter(most_common).most_common(8000)
#%%
def predict(path, result_path):
data = pd.read_csv(path, sep='\t', header=None, encoding="UTF-8", on_bad_lines="skip", quoting=csv.QUOTE_NONE)
with open(result_path, "w+", encoding="UTF-8") as f:
for i, row in data.iterrows():
result = {}
before = word_tokenize(clean(str(row[6])))[-3:]
if(len(before) < 2):
result = "a:0.2 the:0.2 to:0.2 of:0.1 and:0.1 of:0.1 :0.1"
else:
for w in most_common:
word = w[0]
prob = model.score(" ".join(before + [word]))
result[word] = prob
predictions = dict(Counter(result).most_common(12))
result = ""
for word, prob in predictions.items():
result += f"{word}:{prob} "
result += f':{log10(0.99)}'
f.write(result + "\n")
print(result)
predict("dev-0/in.tsv.xz", "dev-0/out.tsv")
predict("test-A/in.tsv.xz", "test-A/out.tsv")