challenging-america-word-ga.../Untitled.ipynb
2022-06-25 21:43:48 +02:00

8.2 KiB
Raw Blame History

import pandas as pd
import csv
import regex as re
from nltk import bigrams, word_tokenize
from collections import Counter, defaultdict
import string
import unicodedata

data = pd.read_csv(
    "train/in.tsv.xz",
    sep="\t",
    error_bad_lines=False,
    header=None,
    quoting=csv.QUOTE_NONE,
    
)
train_labels = pd.read_csv(
    "train/expected.tsv",
    sep="\t",
    error_bad_lines=False,
    header=None,
    quoting=csv.QUOTE_NONE,
)

train_data = data[[6, 7]]
train_data = pd.concat([train_data, train_labels], axis=1)

train_data["final"] = train_data[6] + train_data[0] + train_data[7]

model = defaultdict(lambda: defaultdict(lambda: 0))


def clean(text):
    text = str(text).lower().replace("-\\\\n", "").replace("\\\\n", " ")
    return re.sub(r"\p{P}", "", text)

def train_model(data):
    for _, row in data.iterrows():
        words = word_tokenize(clean(row["final"]))
        for w1, w2 in bigrams(words, pad_left=True, pad_right=True):
            if w1 and w2:
                model[w1][w2] += 1
    for w1 in model:
        total_count = float(sum(model[w1].values()))
        for w2 in model[w1]:
            model[w1][w2] /= total_count


def predict(word):
    predictions = dict(model[word])
    most_common = dict(Counter(predictions).most_common(5))

    total_prob = 0.0
    str_prediction = ""

    for word, prob in most_common.items():
        total_prob += prob
        str_prediction += f"{word}:{prob} "

    if not total_prob:
        return "the:0.2 be:0.2 to:0.2 of:0.1 and:0.1 a:0.1 :0.1"

    if 1 - total_prob >= 0.01:
        str_prediction += f":{1-total_prob}"
    else:
        str_prediction += f":0.01"

    return str_prediction


def predict_data(read_path, save_path):
    data = pd.read_csv(
        read_path, sep="\t", error_bad_lines=False, header=None, quoting=csv.QUOTE_NONE
    )
    with open(save_path, "w", encoding="UTF-8") as file:
        for _, row in data.iterrows():
            words = word_tokenize(clean(row[6]))
            if len(words) < 3:
                prediction = "the:0.2 be:0.2 to:0.2 of:0.1 and:0.1 a:0.1 :0.1"
            else:
                prediction = predict(words[-1])
            file.write(prediction + "\n")


train_model(train_data)
predict_data("dev-0/in.tsv.xz", "dev-0/out.tsv")
predict_data("test-A/in.tsv.xz", "test-A/out.tsv")
C:\Users\grzyb\AppData\Local\Temp/ipykernel_34768/887107210.py:86: FutureWarning: The error_bad_lines argument has been deprecated and will be removed in a future version.


  predict_data("dev-0/in.tsv.xz", "dev-0/out.tsv")
---------------------------------------------------------------------------
UnicodeEncodeError                        Traceback (most recent call last)
~\AppData\Local\Temp/ipykernel_34768/887107210.py in <module>
     84 
     85 train_model(train_data)
---> 86 predict_data("dev-0/in.tsv.xz", "dev-0/out.tsv")
     87 predict_data("test-A/in.tsv.xz", "test-A/out.tsv")

~\AppData\Local\Temp/ipykernel_34768/887107210.py in predict_data(read_path, save_path)
     80             else:
     81                 prediction = predict(words[-1])
---> 82             file.write(prediction + "\n")
     83 
     84 

c:\Users\grzyb\anaconda3\lib\encodings\cp1250.py in encode(self, input, final)
     17 class IncrementalEncoder(codecs.IncrementalEncoder):
     18     def encode(self, input, final=False):
---> 19         return codecs.charmap_encode(input,self.errors,encoding_table)[0]
     20 
     21 class IncrementalDecoder(codecs.IncrementalDecoder):

UnicodeEncodeError: 'charmap' codec can't encode character '\u25a0' in position 0: character maps to <undefined>