challenging-america-word-ga.../kenlm.ipynb

10 KiB
Raw Blame History

from google.colab import drive
drive.mount('/content/gdrive')
Mounted at /content/gdrive
!pip install https://github.com/kpu/kenlm/archive/master.zip
Looking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/
Collecting https://github.com/kpu/kenlm/archive/master.zip
  Using cached https://github.com/kpu/kenlm/archive/master.zip (550 kB)
!pip install english_words
Looking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/
Collecting english_words
  Downloading english-words-1.1.0.tar.gz (1.1 MB)
     |████████████████████████████████| 1.1 MB 5.4 MB/s 
[?25hBuilding wheels for collected packages: english-words
  Building wheel for english-words (setup.py) ... [?25l[?25hdone
  Created wheel for english-words: filename=english_words-1.1.0-py3-none-any.whl size=1106680 sha256=9959ed5d02a4c06063019ede18eebf1ef1be2562a62aa85f86a13d6a3fe1e34b
  Stored in directory: /root/.cache/pip/wheels/25/3d/4c/12a119ce90b46b4f90f9ddf41d719ecabb40faec6103379fc8
Successfully built english-words
Installing collected packages: english-words
Successfully installed english-words-1.1.0
import nltk
nltk.download("punkt")
[nltk_data] Downloading package punkt to /root/nltk_data...
[nltk_data]   Unzipping tokenizers/punkt.zip.
True
  lmplz_command = f"{KENLM_LMPLZ_PATH} -o 4 < train.txt > model.arpa"
  build_binary_command = f"{KENLM_BUILD_BINARY_PATH} model.arpa model.binary"
  os.system('echo %s|sudo -S %s' % (SUDO_PASSWORD, lmplz_command))
  os.system('echo %s|sudo -S %s' % (SUDO_PASSWORD, build_binary_command))
256
import pandas as pd
import csv
import regex as re
import kenlm
from english_words import english_words_alpha_set
from nltk import  word_tokenize
from math import log10
from pathlib import  Path
import os
import numpy as np


KENLM_BUILD_PATH = Path("gdrive/My Drive/gonito/kenlm/build")
KENLM_LMPLZ_PATH = KENLM_BUILD_PATH / "bin" / "lmplz"
KENLM_BUILD_BINARY_PATH = KENLM_BUILD_PATH / "bin" / "build_binary"
SUDO_PASSWORD = ""
PREDICTION = 'the:0.03 be:0.03 to:0.03 of:0.025 and:0.025 a:0.025 in:0.020 that:0.020 have:0.015 I:0.010 it:0.010 for:0.010 not:0.010 on:0.010 with:0.010 he:0.010 as:0.010 you:0.010 do:0.010 at:0.010 :0.77'


def clean(text):
    text = str(text).lower().replace("-\\\\n", "").replace("\\\\n", " ")
    return re.sub(r"\p{P}", "", text)


def create_train_data():
    data = pd.read_csv("gdrive/My Drive/gonito/train/in.tsv.xz", sep="\t", error_bad_lines=False, header=None, quoting=csv.QUOTE_NONE, nrows=50000)
    train_labels = pd.read_csv("gdrive/My Drive/gonito/train/expected.tsv", sep="\t", error_bad_lines=False, header=None, quoting=csv.QUOTE_NONE, nrows=50000)

    train_data = data[[6, 7]]
    train_data = pd.concat([train_data, train_labels], axis=1)

    return train_data[6] + train_data[0] + train_data[7]


def create_train_file(filename="gdrive/My Drive/gonito/train.txt"):
    with open(filename, "w") as f:
        for line in create_train_data():
            f.write(clean(line) + "\n")
    

def train_model():
    lmplz_command = f"{KENLM_LMPLZ_PATH} -o 4 < train.txt > model.arpa"
    build_binary_command = f"{KENLM_BUILD_BINARY_PATH} model.arpa model.binary"
    os.system('echo %s|sudo -S %s' % (SUDO_PASSWORD, lmplz_command))
    os.system('echo %s|sudo -S %s' % (SUDO_PASSWORD, build_binary_command))
    

def softmax(x):
    e_x = np.exp(x - np.max(x))
    return e_x / e_x.sum(axis=0)

def predict(model, before, after):
    best_scores = []
    for word in english_words_alpha_set:
        text = ' '.join([before, word, after])
        text_score = model.score(text, bos=False, eos=False)
        if len(best_scores) < 12:
            best_scores.append((word, text_score))
        else:
            worst_score = None
            for score in best_scores:
                if not worst_score:
                    worst_score = score
                else:
                    if worst_score[1] > score[1]:
                        worst_score = score
            if worst_score[1] < text_score:
                best_scores.remove(worst_score)
                best_scores.append((word, text_score))
    probs = sorted(best_scores, key=lambda tup: tup[1], reverse=True)
    pred_str = ''
    for word, prob in probs:
        pred_str += f'{word}:{prob} '
    pred_str += f':{log10(0.99)}'
    return pred_str

def make_prediction(model, path, result_path):
    data = pd.read_csv(path, sep='\t', header=None, quoting=csv.QUOTE_NONE)
    with open(result_path, 'w', encoding='utf-8') as file_out:
        for _, row in data.iterrows():
            before, after = word_tokenize(clean(str(row[6]))), word_tokenize(clean(str(row[7])))
            if len(before) < 2 or len(after) < 2:
                pred = PREDICTION
            else:
                pred = predict(model, before[-1], after[0])
            file_out.write(pred + '\n')


create_train_file()
train_model()
model = kenlm.Model('gdrive/My Drive/gonito/model.binary')
make_prediction(model, "gdrive/My Drive/gonito/dev-0/in.tsv.xz", "gdrive/My Drive/gonito/dev-0/out.tsv")
make_prediction(model, "gdrive/My Drive/gonito/test-A/in.tsv.xz", "gdrive/My Drive/gonito/test-A/out.tsv")
/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:51: FutureWarning: The error_bad_lines argument has been deprecated and will be removed in a future version.