challenging-america-word-ga.../.ipynb_checkpoints/run-checkpoint.ipynb
2022-04-21 22:17:46 +02:00

10 KiB

KENLM_BUILD_PATH='/home/haskell/kenlm/build'

Preprocessing danych

import pandas as pd
import csv
import regex as re
def clean_text(text):
    text = text.lower().replace('-\\\\n', '').replace('\\\\n', ' ')
    text = re.sub(r'\p{P}', '', text)

    return text
train_data = pd.read_csv('train/in.tsv.xz', sep='\t', error_bad_lines=False, warn_bad_lines=False, header=None, quoting=csv.QUOTE_NONE)
train_labels = pd.read_csv('train/expected.tsv', sep='\t', error_bad_lines=False, warn_bad_lines=False, header=None, quoting=csv.QUOTE_NONE)

train_data = train_data[[6, 7]]
train_data = pd.concat([train_data, train_labels], axis=1)

train_data['text'] = train_data[6] + train_data[0] + train_data[7]
train_data = train_data[['text']]

with open('processed_train.txt', 'w') as file:
    for _, row in train_data.iterrows():
        text = clean_text(str(row['text']))
        file.write(text + '\n')

Model kenLM

!$KENLM_BUILD_PATH/bin/lmplz -o 5 --skip_symbols < processed_train.txt > model/model.arpa
=== 1/5 Counting and sorting n-grams ===
Reading /home/haskell/Desktop/challenging-america-word-gap-prediction-kenlm/processed_train.txt
----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100
********************************Warning: <s> appears in the input.  All instances of <s>, </s>, and <unk> will be interpreted as whitespace.
********************************************************************
Unigram tokens 135911223 types 4381594
=== 2/5 Calculating and sorting adjusted counts ===
Chain sizes: 1:52579128 2:896866240 3:1681624320 4:2690598656 5:3923790080
Statistics:
1 4381594 D1=0.841838 D2=1.01787 D3+=1.21057
2 26800631 D1=0.836734 D2=1.01657 D3+=1.19437
3 69811700 D1=0.878562 D2=1.11227 D3+=1.27889
4 104063034 D1=0.931257 D2=1.23707 D3+=1.36664
5 119487533 D1=0.938146 D2=1.3058 D3+=1.41614
Memory estimate for binary LM:
type      MB
probing 6752 assuming -p 1.5
probing 7917 assuming -r models -p 1.5
trie    3572 without quantization
trie    2120 assuming -q 8 -b 8 quantization 
trie    3104 assuming -a 22 array pointer compression
trie    1652 assuming -a 22 -q 8 -b 8 array pointer compression and quantization
=== 3/5 Calculating and sorting initial probabilities ===
Chain sizes: 1:52579128 2:428810096 3:1396234000 4:2497512816 5:3345650924
----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100
####################################################################################################
=== 4/5 Calculating and writing order-interpolated probabilities ===
Chain sizes: 1:52579128 2:428810096 3:1396234000 4:2497512816 5:3345650924
----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100
####################################################################################################
=== 5/5 Writing ARPA model ===
----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100
****************************************************************************************************
Name:lmplz	VmPeak:9201752 kB	VmRSS:2564 kB	RSSMax:7648448 kB	user:506.342	sys:106.578	CPU:612.92	real:1564.6
!$KENLM_BUILD_PATH/bin/build_binary model/model.arpa model/model.binary
Reading model/model.arpa
----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100
****************************************************************************************************
SUCCESS
!rm processed_train.txt
!rm model/model.arpa

Predykcje

import kenlm
import csv
import pandas as pd
import regex as re
from math import log10
from nltk import word_tokenize
from english_words import english_words_alpha_set
model = kenlm.Model('model/model.binary')
def clean_text(text):
    text = text.lower().replace('-\\\\n', '').replace('\\\\n', ' ')
    text = re.sub(r'\p{P}', '', text)

    return text
def predict_probs(w1, w2, w4):
    best_scores = []
    for word in english_words_alpha_set:
        text = ' '.join([w1, w2, word, w4])
        text_score = model.score(text, bos=False, eos=False)
        if len(best_scores) < 20:
            best_scores.append((word, text_score))
        else:
            is_better = False
            worst_score = None
            for score in best_scores:
                if not worst_score:
                    worst_score = score
                else:
                    if worst_score[1] > score[1]:
                        worst_score = score
            if worst_score[1] < text_score:
                best_scores.remove(worst_score)
                best_scores.append((word, text_score))
    probs = sorted(best_scores, key=lambda tup: tup[1], reverse=True)
    pred_str = ''
    for word, prob in probs:
        pred_str += f'{word}:{prob} '
    pred_str += f':{log10(0.99)}'
    return pred_str
dev_data = pd.read_csv('dev-0/in.tsv.xz', sep='\t', error_bad_lines=False, warn_bad_lines=False, header=None, quoting=csv.QUOTE_NONE)
test_data = pd.read_csv('test-A/in.tsv.xz', sep='\t', error_bad_lines=False, warn_bad_lines=False, header=None, quoting=csv.QUOTE_NONE)
with open('dev-0/out.tsv', 'w') as file:
    for index, row in dev_data.iterrows():
        left_text = clean_text(str(row[6]))
        right_text = clean_text(str(row[7]))
        left_words = word_tokenize(left_text)
        right_words = word_tokenize(right_text)
        if len(left_words) < 2 or len(right_words) < 2:
            prediction = ':1.0'
        else:
            prediction = predict_probs(left_words[len(left_words) - 2], left_words[len(left_words) - 1], right_words[0])
        file.write(prediction + '\n')
with open('test-A/out.tsv', 'w') as file:
    for index, row in test_data.iterrows():
        left_text = clean_text(str(row[6]))
        right_text = clean_text(str(row[7]))
        left_words = word_tokenize(left_text)
        right_words = word_tokenize(right_text)
        if len(left_words) < 2 or len(right_words) < 2:
            prediction = ':1.0'
        else:
            prediction = predict_probs(left_words[len(left_words) - 2], left_words[len(left_words) - 1], right_words[0])
        file.write(prediction + '\n')