challenging-america-word-ga.../run.ipynb
2022-04-10 22:36:00 +02:00

7.1 KiB

import pandas as pd
import csv
import regex as re
from nltk import bigrams, word_tokenize
from collections import Counter, defaultdict
import string
import unicodedata
from tqdm import tqdm

pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)

NROWS = 50000
ALPHA = 0.1


def etl():
    data = pd.read_csv(
        "train/in.tsv.xz",
        sep="\t",
        error_bad_lines=False,
        header=None,
        quoting=csv.QUOTE_NONE,
        nrows=NROWS
    )
    train_labels = pd.read_csv(
        "train/expected.tsv",
        sep="\t",
        error_bad_lines=False,
        header=None,
        quoting=csv.QUOTE_NONE,
        nrows=NROWS
    )
    
    train_data = data[[6, 7]]
    train_data = pd.concat([train_data, train_labels], axis=1)

    train_data["final"] = train_data[6] + train_data[0] + train_data[7]

    model = defaultdict(lambda: defaultdict(lambda: 0))
    return train_data, model


def clean(text):
    text = str(text).lower().replace("-\\\\n", "").replace("\\\\n", " ")
    return re.sub(r"\p{P}", "", text)


def train_model(data):
    print("1/2")
    for _, row in tqdm(data.iterrows()):
        words = word_tokenize(clean(row["final"]))
        for word_1, word_2 in bigrams(words, pad_left=True, pad_right=True):
            if word_1 and word_2:
                vocab.add(word_1)
                vocab.add(word_2)
                model[word_1][word_2] += 1
    print("2/2")
    for word_1 in tqdm(model):
        total_count = float(sum(model[word_1].values()))
        for word_2 in model[word_1]:
            model[word_1][word_2] /= total_count


def predict(word):
    predictions = dict(model[word])
    most_common = dict(Counter(predictions).most_common(5))

    total_prob = 0.0
    str_prediction = ""

    for word, prob in most_common.items():
        total_prob += prob
        str_prediction += f"{word}:{prob} "

    if not total_prob:
        return "the:0.2 be:0.2 to:0.2 of:0.1 and:0.1 a:0.1 :0.1"

    if 1 - total_prob >= 0.01:
        str_prediction += f":{1-total_prob}"
    else:
        str_prediction += f":0.01"

    return str_prediction


def predict_data(read_path, save_path):
    data = pd.read_csv(
        read_path, sep="\t", error_bad_lines=False, header=None, quoting=csv.QUOTE_NONE
    )
    with open(save_path, "w", encoding="utf-8") as file:
        for _, row in tqdm(data.iterrows()):
            words = word_tokenize(clean(row[6]))
            if len(words) < 3:
                prediction = "the:0.2 be:0.2 to:0.2 of:0.1 and:0.1 a:0.1 :0.1"
            else:
                prediction = predict(words[-1])
            file.write(prediction + "\n")
            
def plus_alpha_smoothing():
    model_len = len(model)
    for word_1 in tqdm(model):
        word_1_occurrences = sum(model[word_1].values())
        for word_2 in model[word_1]:
            model[word_1][word_2] += ALPHA
            model[word_1][word_2] /= float(word_1_occurrences + ALPHA + len(word_2))


print("Loading data...")
train_data, model = etl()
vocab = set()
print("Training model...")
train_model(train_data)
print("Smoothing...")
plus_alpha_smoothing()
print("Predicting...")
print("Dev set")
predict_data("dev-0/in.tsv.xz", "dev-0/out.tsv")
print("Test set")
predict_data("test-A/in.tsv.xz", "test-A/out.tsv")
Loading data...
0it [00:00, ?it/s]
Training model...
1/2
50000it [03:35, 232.50it/s]
  0%|                                                                             | 8/753550 [00:00<3:31:51, 59.28it/s]
2/2
100%|██████████████████████████████████████████████████████████████████████| 753550/753550 [00:04<00:00, 176601.27it/s]
  0%|                                                                             | 3/753550 [00:00<8:51:51, 23.61it/s]
Smoothing...
100%|██████████████████████████████████████████████████████████████████████| 753550/753550 [00:06<00:00, 117904.94it/s]
Predicting...
Dev set
10519it [02:07, 82.51it/s] 
Test set
7414it [01:16, 96.50it/s]