{ "cells": [ { "cell_type": "code", "execution_count": 1, "id": "f73a28ea", "metadata": {}, "outputs": [], "source": [ "KENLM_BUILD_PATH='/home/haskell/kenlm/build'" ] }, { "cell_type": "markdown", "id": "9fc5cda3", "metadata": {}, "source": [ "### Preprocessing danych" ] }, { "cell_type": "code", "execution_count": 2, "id": "d42ddd87", "metadata": {}, "outputs": [], "source": [ "import pandas as pd\n", "import csv\n", "import regex as re" ] }, { "cell_type": "code", "execution_count": 3, "id": "f84be210", "metadata": {}, "outputs": [], "source": [ "def clean_text(text):\n", " text = text.lower().replace('-\\\\n', '').replace('\\\\n', ' ')\n", " text = re.sub(r'\\p{P}', '', text)\n", "\n", " return text" ] }, { "cell_type": "code", "execution_count": 4, "id": "de0c12d6", "metadata": {}, "outputs": [], "source": [ "train_data = pd.read_csv('train/in.tsv.xz', sep='\\t', error_bad_lines=False, warn_bad_lines=False, header=None, quoting=csv.QUOTE_NONE)\n", "train_labels = pd.read_csv('train/expected.tsv', sep='\\t', error_bad_lines=False, warn_bad_lines=False, header=None, quoting=csv.QUOTE_NONE)\n", "\n", "train_data = train_data[[6, 7]]\n", "train_data = pd.concat([train_data, train_labels], axis=1)\n", "\n", "train_data['text'] = train_data[6] + train_data[0] + train_data[7]\n", "train_data = train_data[['text']]\n", "\n", "with open('processed_train.txt', 'w') as file:\n", " for _, row in train_data.iterrows():\n", " text = clean_text(str(row['text']))\n", " file.write(text + '\\n')" ] }, { "cell_type": "markdown", "id": "846b6b42", "metadata": {}, "source": [ "### Model kenLM" ] }, { "cell_type": "code", "execution_count": 4, "id": "3c74d4be", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "=== 1/5 Counting and sorting n-grams ===\n", "Reading /home/haskell/Desktop/challenging-america-word-gap-prediction-kenlm/processed_train.txt\n", "----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100\n", "********************************Warning: appears in the input. All instances of , , and will be interpreted as whitespace.\n", "********************************************************************\n", "Unigram tokens 135911223 types 4381594\n", "=== 2/5 Calculating and sorting adjusted counts ===\n", "Chain sizes: 1:52579128 2:896866240 3:1681624320 4:2690598656 5:3923790080\n", "Statistics:\n", "1 4381594 D1=0.841838 D2=1.01787 D3+=1.21057\n", "2 26800631 D1=0.836734 D2=1.01657 D3+=1.19437\n", "3 69811700 D1=0.878562 D2=1.11227 D3+=1.27889\n", "4 104063034 D1=0.931257 D2=1.23707 D3+=1.36664\n", "5 119487533 D1=0.938146 D2=1.3058 D3+=1.41614\n", "Memory estimate for binary LM:\n", "type MB\n", "probing 6752 assuming -p 1.5\n", "probing 7917 assuming -r models -p 1.5\n", "trie 3572 without quantization\n", "trie 2120 assuming -q 8 -b 8 quantization \n", "trie 3104 assuming -a 22 array pointer compression\n", "trie 1652 assuming -a 22 -q 8 -b 8 array pointer compression and quantization\n", "=== 3/5 Calculating and sorting initial probabilities ===\n", "Chain sizes: 1:52579128 2:428810096 3:1396234000 4:2497512816 5:3345650924\n", "----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100\n", "####################################################################################################\n", "=== 4/5 Calculating and writing order-interpolated probabilities ===\n", "Chain sizes: 1:52579128 2:428810096 3:1396234000 4:2497512816 5:3345650924\n", "----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100\n", "####################################################################################################\n", "=== 5/5 Writing ARPA model ===\n", "----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100\n", "****************************************************************************************************\n", "Name:lmplz\tVmPeak:9201752 kB\tVmRSS:2564 kB\tRSSMax:7648448 kB\tuser:506.342\tsys:106.578\tCPU:612.92\treal:1564.6\n" ] } ], "source": [ "!$KENLM_BUILD_PATH/bin/lmplz -o 5 --skip_symbols < processed_train.txt > model/model.arpa" ] }, { "cell_type": "code", "execution_count": 5, "id": "dc65780b", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Reading model/model.arpa\n", "----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100\n", "****************************************************************************************************\n", "SUCCESS\n" ] } ], "source": [ "!$KENLM_BUILD_PATH/bin/build_binary model/model.arpa model/model.binary" ] }, { "cell_type": "code", "execution_count": 6, "id": "2087eb80", "metadata": {}, "outputs": [], "source": [ "!rm processed_train.txt" ] }, { "cell_type": "code", "execution_count": 7, "id": "4ba1e592", "metadata": {}, "outputs": [], "source": [ "!rm model/model.arpa" ] }, { "cell_type": "markdown", "id": "e41f7951", "metadata": {}, "source": [ "### Predykcje" ] }, { "cell_type": "code", "execution_count": 32, "id": "6865301b", "metadata": {}, "outputs": [], "source": [ "import kenlm\n", "import csv\n", "import pandas as pd\n", "import regex as re\n", "from math import log10\n", "from nltk import word_tokenize\n", "from english_words import english_words_alpha_set" ] }, { "cell_type": "code", "execution_count": 4, "id": "e32de662", "metadata": {}, "outputs": [], "source": [ "model = kenlm.Model('model/model.binary')" ] }, { "cell_type": "code", "execution_count": 28, "id": "c2535482", "metadata": {}, "outputs": [], "source": [ "def clean_text(text):\n", " text = text.lower().replace('-\\\\n', '').replace('\\\\n', ' ')\n", " text = re.sub(r'\\p{P}', '', text)\n", "\n", " return text" ] }, { "cell_type": "code", "execution_count": 29, "id": "2308ccad", "metadata": {}, "outputs": [], "source": [ "def predict_probs(w1, w2, w4):\n", " best_scores = []\n", " for word in english_words_alpha_set:\n", " text = ' '.join([w1, w2, word, w4])\n", " text_score = model.score(text, bos=False, eos=False)\n", " if len(best_scores) < 20:\n", " best_scores.append((word, text_score))\n", " else:\n", " is_better = False\n", " worst_score = None\n", " for score in best_scores:\n", " if not worst_score:\n", " worst_score = score\n", " else:\n", " if worst_score[1] > score[1]:\n", " worst_score = score\n", " if worst_score[1] < text_score:\n", " best_scores.remove(worst_score)\n", " best_scores.append((word, text_score))\n", " probs = sorted(best_scores, key=lambda tup: tup[1], reverse=True)\n", " pred_str = ''\n", " for word, prob in probs:\n", " pred_str += f'{word}:{prob} '\n", " pred_str += f':{log10(0.99)}'\n", " return pred_str" ] }, { "cell_type": "code", "execution_count": 30, "id": "7245cf38", "metadata": {}, "outputs": [], "source": [ "dev_data = pd.read_csv('dev-0/in.tsv.xz', sep='\\t', error_bad_lines=False, warn_bad_lines=False, header=None, quoting=csv.QUOTE_NONE)\n", "test_data = pd.read_csv('test-A/in.tsv.xz', sep='\\t', error_bad_lines=False, warn_bad_lines=False, header=None, quoting=csv.QUOTE_NONE)" ] }, { "cell_type": "code", "execution_count": 35, "id": "ac24ff37", "metadata": {}, "outputs": [], "source": [ "with open('dev-0/out.tsv', 'w') as file:\n", " for index, row in dev_data.iterrows():\n", " left_text = clean_text(str(row[6]))\n", " right_text = clean_text(str(row[7]))\n", " left_words = word_tokenize(left_text)\n", " right_words = word_tokenize(right_text)\n", " if len(left_words) < 2 or len(right_words) < 2:\n", " prediction = ':1.0'\n", " else:\n", " prediction = predict_probs(left_words[len(left_words) - 2], left_words[len(left_words) - 1], right_words[0])\n", " file.write(prediction + '\\n')" ] }, { "cell_type": "code", "execution_count": 37, "id": "a18b6ebd", "metadata": {}, "outputs": [], "source": [ "with open('test-A/out.tsv', 'w') as file:\n", " for index, row in test_data.iterrows():\n", " left_text = clean_text(str(row[6]))\n", " right_text = clean_text(str(row[7]))\n", " left_words = word_tokenize(left_text)\n", " right_words = word_tokenize(right_text)\n", " if len(left_words) < 2 or len(right_words) < 2:\n", " prediction = ':1.0'\n", " else:\n", " prediction = predict_probs(left_words[len(left_words) - 2], left_words[len(left_words) - 1], right_words[0])\n", " file.write(prediction + '\\n')" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.8.10" } }, "nbformat": 4, "nbformat_minor": 5 }