challenging-america-word-ga.../testing.ipynb
2022-04-04 15:07:07 +02:00

18 KiB
Raw Blame History

import pandas as pd
import csv
import regex as re
import nltk
from collections import Counter, defaultdict
import string
import unicodedata

def clean_text(text): 
    return re.sub(r"\p{P}", "", str(text).lower().replace("-\\\\n", "").replace("\\\\n", " "))

def train_model(data, model):
    for _, row in data.iterrows():
        words = nltk.word_tokenize(clean_text(row["final"]))
        for w1, w2 in nltk.bigrams(words, pad_left=True, pad_right=True):
            if w1 and w2:
                model[w2][w1] += 1
    for w2 in model:
        total_count = float(sum(model[w2].values()))
        for w1 in model[w2]:
            model[w2][w1] /= total_count


def predict(word, model):
    predictions = dict(model[word])
    most_common = dict(Counter(predictions).most_common(5))

    total_prob = 0.0
    str_prediction = ""

    for word, prob in most_common.items():
        total_prob += prob
        str_prediction += f"{word}:{prob} "

    if not total_prob:
        return "the:0.2 be:0.2 to:0.2 of:0.1 and:0.1 a:0.1 :0.1"

    if 1 - total_prob >= 0.01:
        str_prediction += f":{1-total_prob}"
    else:
        str_prediction += f":0.01"

    return str_prediction


def predict_data(read_path, save_path, model):
    data = pd.read_csv(
        read_path,
        sep="\t",
        error_bad_lines=False,
        header=None,
        quoting=csv.QUOTE_NONE,
        encoding="utf-8"
    )
    with open(save_path, "w", encoding="utf-8") as f:
        for _, row in data.iterrows():
            words = nltk.word_tokenize(clean_text(row[7]))
            if len(words) < 3:
                prediction = "the:0.2 be:0.2 to:0.2 of:0.1 and:0.1 a:0.1 :0.1"
            else:
                prediction = predict(words[-1], model)
            f.write(prediction + "\n")
with open("in-header.tsv") as f:
    in_cols = f.read().strip().split("\t")

with open("out-header.tsv") as f:
    out_cols = f.read().strip().split("\t")
in_cols
['FileId', 'Year', 'LeftContext', 'RightContext']
out_cols
['Word']
data = pd.read_csv(
    "train/in.tsv.xz",
    sep="\t",
    on_bad_lines='skip',
    header=None,
    # names=in_cols,
    quoting=csv.QUOTE_NONE,
    encoding="utf-8"
)

train_words = pd.read_csv(
    "train/expected.tsv",
    sep="\t",
    on_bad_lines='skip',
    header=None,
    # names=out_cols,
    quoting=csv.QUOTE_NONE,,
    encoding="utf-8"
)

train_data = data[[7, 6]]
train_data = pd.concat([train_data, train_words], axis=1)

train_data["final"] = train_data[7] + train_data[0] + train_data[6]
train_data
7 6 0 final
0 said\nit's all squash. The best I could get\ni... came fiom the last place to this\nplace, and t... lie said\nit's all squash. The best I could get\ni...
1 \ninto a proper perspective with those\nminor ... MB. BOOT'S POLITICAL OBEED\nAttempt to imagine... himself \ninto a proper perspective with those\nminor ...
2 all notU\nashore and afloat arc subjects for I... "Thera were in 1771 only aeventy-nine\n*ub*erl... of all notU\nashore and afloat arc subjects for I...
3 ceucju l< d no; <o waste it nud so\nsunk it in... A gixnl man y nitereRtiiiv dii-clos-\nur«s reg... ably ceucju l< d no; <o waste it nud so\nsunk it in...
4 ascertained w? OCt the COOltS of ibis\nletale ... Tin: 188UB TV THF BBABBT QABJE\nMr. Schiffs *t... j ascertained w? OCt the COOltS of ibis\nletale ...
... ... ... ... ...
432017 \nSam was arrested.\nThe case excited a great ... Sam Clendenin bad a fancy for Ui«\nscience of ... and \nSam was arrested.\nThe case excited a great ...
432018 through the alnp the »Uitors laapeeeed tia.»\n... Wita.htt halting the party ware dilven to the ... paasliic through the alnp the »Uitors laapeeeed tia.»\n...
432019 Agua Negra across the line.\nIt was a grim pla... It was the last thing that either of\nthem exp... for Agua Negra across the line.\nIt was a grim pla...
432020 \na note of Wood, Dialogue fc Co., for\nc27,im... settlement with the department.\nIt is also sh... for \na note of Wood, Dialogue fc Co., for\nc27,im...
432021 3214c;do White at 3614c: Mixed Western at\n331... Flour quotations—low extras at 1 R0®2 50;\ncit... at 3214c;do White at 3614c: Mixed Western at\n331...

432022 rows × 4 columns


model = defaultdict(lambda: defaultdict(lambda: 0))

train_model(train_data, model)
C:\Users\Norbert\AppData\Local\Temp\ipykernel_15436\842062938.py:47: FutureWarning: The error_bad_lines argument has been deprecated and will be removed in a future version. Use on_bad_lines in the future.


  data = pd.read_csv(
C:\Users\Norbert\AppData\Local\Temp\ipykernel_15436\842062938.py:47: FutureWarning: The error_bad_lines argument has been deprecated and will be removed in a future version. Use on_bad_lines in the future.


  data = pd.read_csv(
---------------------------------------------------------------------------
UnicodeEncodeError                        Traceback (most recent call last)
c:\Users\Norbert\code\challenging-america-word-gap-prediction\testing.ipynb Cell 7' in <cell line: 5>()
      <a href='vscode-notebook-cell:/c%3A/Users/Norbert/code/challenging-america-word-gap-prediction/testing.ipynb#ch0000006?line=2'>3</a> train_model(train_data, model)
      <a href='vscode-notebook-cell:/c%3A/Users/Norbert/code/challenging-america-word-gap-prediction/testing.ipynb#ch0000006?line=3'>4</a> predict_data("dev-0/in.tsv.xz", "dev-0/out.tsv", model)
----> <a href='vscode-notebook-cell:/c%3A/Users/Norbert/code/challenging-america-word-gap-prediction/testing.ipynb#ch0000006?line=4'>5</a> predict_data("test-A/in.tsv.xz", "test-A/out.tsv", model)

c:\Users\Norbert\code\challenging-america-word-gap-prediction\testing.ipynb Cell 1' in predict_data(read_path, save_path, model)
     <a href='vscode-notebook-cell:/c%3A/Users/Norbert/code/challenging-america-word-gap-prediction/testing.ipynb#ch0000000?line=54'>55</a> else:
     <a href='vscode-notebook-cell:/c%3A/Users/Norbert/code/challenging-america-word-gap-prediction/testing.ipynb#ch0000000?line=55'>56</a>     prediction = predict(words[-1], model)
---> <a href='vscode-notebook-cell:/c%3A/Users/Norbert/code/challenging-america-word-gap-prediction/testing.ipynb#ch0000000?line=56'>57</a> file.write(prediction + "\n")

File C:\Python310\lib\encodings\cp1250.py:19, in IncrementalEncoder.encode(self, input, final)
     <a href='file:///c%3A/Python310/lib/encodings/cp1250.py?line=17'>18</a> def encode(self, input, final=False):
---> <a href='file:///c%3A/Python310/lib/encodings/cp1250.py?line=18'>19</a>     return codecs.charmap_encode(input,self.errors,encoding_table)[0]

UnicodeEncodeError: 'charmap' codec can't encode character '\u03b2' in position 21: character maps to <undefined>
predict_data("dev-0/in.tsv.xz", "dev-0/out.tsv", model)
C:\Users\Norbert\AppData\Local\Temp\ipykernel_15436\842062938.py:47: FutureWarning: The error_bad_lines argument has been deprecated and will be removed in a future version. Use on_bad_lines in the future.


  data = pd.read_csv(
predict_data("test-A/in.tsv.xz", "test-A/out.tsv", model)
C:\Users\Norbert\AppData\Local\Temp\ipykernel_15436\751703071.py:47: FutureWarning: The error_bad_lines argument has been deprecated and will be removed in a future version. Use on_bad_lines in the future.


  data = pd.read_csv(