#!/usr/bin/env python # coding: utf-8 # In[2]: import pandas as pd import csv import regex as re from nltk import bigrams, word_tokenize from collections import Counter, defaultdict import string import unicodedata from tqdm import tqdm pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) NROWS = 50000 ALPHA = 0.1 def etl(): data = pd.read_csv( "train/in.tsv.xz", sep="\t", error_bad_lines=False, header=None, quoting=csv.QUOTE_NONE, nrows=NROWS ) train_labels = pd.read_csv( "train/expected.tsv", sep="\t", error_bad_lines=False, header=None, quoting=csv.QUOTE_NONE, nrows=NROWS ) train_data = data[[6, 7]] train_data = pd.concat([train_data, train_labels], axis=1) train_data["final"] = train_data[6] + train_data[0] + train_data[7] model = defaultdict(lambda: defaultdict(lambda: 0)) return train_data, model def clean(text): text = str(text).lower().replace("-\\n", "").replace("\\n", " ") return re.sub(r"\p{P}", "", text) def train_model(data): print("1/2") for _, row in tqdm(data.iterrows()): words = word_tokenize(clean(row["final"])) for word_1, word_2 in bigrams(words, pad_left=True, pad_right=True): if word_1 and word_2: vocab.add(word_1) vocab.add(word_2) model[word_1][word_2] += 1 print("2/2") for word_1 in tqdm(model): total_count = float(sum(model[word_1].values())) for word_2 in model[word_1]: model[word_1][word_2] /= total_count def predict(word): predictions = dict(model[word]) most_common = dict(Counter(predictions).most_common(5)) total_prob = 0.0 str_prediction = "" for word, prob in most_common.items(): total_prob += prob str_prediction += f"{word}:{prob} " if not total_prob: return "the:0.2 be:0.2 to:0.2 of:0.1 and:0.1 a:0.1 :0.1" if 1 - total_prob >= 0.01: str_prediction += f":{1-total_prob}" else: str_prediction += f":0.01" return str_prediction def predict_data(read_path, save_path): data = pd.read_csv( read_path, sep="\t", error_bad_lines=False, header=None, quoting=csv.QUOTE_NONE ) with open(save_path, "w", encoding="utf-8") as file: for _, row in tqdm(data.iterrows()): words = word_tokenize(clean(row[6])) if len(words) < 3: prediction = "the:0.2 be:0.2 to:0.2 of:0.1 and:0.1 a:0.1 :0.1" else: prediction = predict(words[-1]) file.write(prediction + "\n") def plus_alpha_smoothing(): model_len = len(model) for word_1 in tqdm(model): word_1_occurrences = sum(model[word_1].values()) for word_2 in model[word_1]: model[word_1][word_2] += ALPHA model[word_1][word_2] /= float(word_1_occurrences + ALPHA + len(word_2)) print("Loading data...") train_data, model = etl() vocab = set() print("Training model...") train_model(train_data) print("Smoothing...") plus_alpha_smoothing() print("Predicting...") print("Dev set") predict_data("dev-0/in.tsv.xz", "dev-0/out.tsv") print("Test set") predict_data("test-A/in.tsv.xz", "test-A/out.tsv") # In[ ]: