better solution

This commit is contained in:
Łukasz Jędyk 2022-04-22 00:21:40 +02:00
parent dbc2815e28
commit 1472efcaf1
6 changed files with 17957 additions and 18287 deletions

View File

@ -1,6 +0,0 @@
{
"cells": [],
"metadata": {},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@ -1,324 +0,0 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"id": "f73a28ea",
"metadata": {},
"outputs": [],
"source": [
"KENLM_BUILD_PATH='/home/haskell/kenlm/build'"
]
},
{
"cell_type": "markdown",
"id": "9fc5cda3",
"metadata": {},
"source": [
"### Preprocessing danych"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "d42ddd87",
"metadata": {},
"outputs": [],
"source": [
"import pandas as pd\n",
"import csv\n",
"import regex as re"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "f84be210",
"metadata": {},
"outputs": [],
"source": [
"def clean_text(text):\n",
" text = text.lower().replace('-\\\\n', '').replace('\\\\n', ' ')\n",
" text = re.sub(r'\\p{P}', '', text)\n",
"\n",
" return text"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "de0c12d6",
"metadata": {},
"outputs": [],
"source": [
"train_data = pd.read_csv('train/in.tsv.xz', sep='\\t', error_bad_lines=False, warn_bad_lines=False, header=None, quoting=csv.QUOTE_NONE)\n",
"train_labels = pd.read_csv('train/expected.tsv', sep='\\t', error_bad_lines=False, warn_bad_lines=False, header=None, quoting=csv.QUOTE_NONE)\n",
"\n",
"train_data = train_data[[6, 7]]\n",
"train_data = pd.concat([train_data, train_labels], axis=1)\n",
"\n",
"train_data['text'] = train_data[6] + train_data[0] + train_data[7]\n",
"train_data = train_data[['text']]\n",
"\n",
"with open('processed_train.txt', 'w') as file:\n",
" for _, row in train_data.iterrows():\n",
" text = clean_text(str(row['text']))\n",
" file.write(text + '\\n')"
]
},
{
"cell_type": "markdown",
"id": "846b6b42",
"metadata": {},
"source": [
"### Model kenLM"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "3c74d4be",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"=== 1/5 Counting and sorting n-grams ===\n",
"Reading /home/haskell/Desktop/challenging-america-word-gap-prediction-kenlm/processed_train.txt\n",
"----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100\n",
"********************************Warning: <s> appears in the input. All instances of <s>, </s>, and <unk> will be interpreted as whitespace.\n",
"********************************************************************\n",
"Unigram tokens 135911223 types 4381594\n",
"=== 2/5 Calculating and sorting adjusted counts ===\n",
"Chain sizes: 1:52579128 2:896866240 3:1681624320 4:2690598656 5:3923790080\n",
"Statistics:\n",
"1 4381594 D1=0.841838 D2=1.01787 D3+=1.21057\n",
"2 26800631 D1=0.836734 D2=1.01657 D3+=1.19437\n",
"3 69811700 D1=0.878562 D2=1.11227 D3+=1.27889\n",
"4 104063034 D1=0.931257 D2=1.23707 D3+=1.36664\n",
"5 119487533 D1=0.938146 D2=1.3058 D3+=1.41614\n",
"Memory estimate for binary LM:\n",
"type MB\n",
"probing 6752 assuming -p 1.5\n",
"probing 7917 assuming -r models -p 1.5\n",
"trie 3572 without quantization\n",
"trie 2120 assuming -q 8 -b 8 quantization \n",
"trie 3104 assuming -a 22 array pointer compression\n",
"trie 1652 assuming -a 22 -q 8 -b 8 array pointer compression and quantization\n",
"=== 3/5 Calculating and sorting initial probabilities ===\n",
"Chain sizes: 1:52579128 2:428810096 3:1396234000 4:2497512816 5:3345650924\n",
"----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100\n",
"####################################################################################################\n",
"=== 4/5 Calculating and writing order-interpolated probabilities ===\n",
"Chain sizes: 1:52579128 2:428810096 3:1396234000 4:2497512816 5:3345650924\n",
"----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100\n",
"####################################################################################################\n",
"=== 5/5 Writing ARPA model ===\n",
"----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100\n",
"****************************************************************************************************\n",
"Name:lmplz\tVmPeak:9201752 kB\tVmRSS:2564 kB\tRSSMax:7648448 kB\tuser:506.342\tsys:106.578\tCPU:612.92\treal:1564.6\n"
]
}
],
"source": [
"!$KENLM_BUILD_PATH/bin/lmplz -o 5 --skip_symbols < processed_train.txt > model/model.arpa"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "dc65780b",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Reading model/model.arpa\n",
"----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100\n",
"****************************************************************************************************\n",
"SUCCESS\n"
]
}
],
"source": [
"!$KENLM_BUILD_PATH/bin/build_binary model/model.arpa model/model.binary"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "2087eb80",
"metadata": {},
"outputs": [],
"source": [
"!rm processed_train.txt"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "4ba1e592",
"metadata": {},
"outputs": [],
"source": [
"!rm model/model.arpa"
]
},
{
"cell_type": "markdown",
"id": "e41f7951",
"metadata": {},
"source": [
"### Predykcje"
]
},
{
"cell_type": "code",
"execution_count": 32,
"id": "6865301b",
"metadata": {},
"outputs": [],
"source": [
"import kenlm\n",
"import csv\n",
"import pandas as pd\n",
"import regex as re\n",
"from math import log10\n",
"from nltk import word_tokenize\n",
"from english_words import english_words_alpha_set"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "e32de662",
"metadata": {},
"outputs": [],
"source": [
"model = kenlm.Model('model/model.binary')"
]
},
{
"cell_type": "code",
"execution_count": 28,
"id": "c2535482",
"metadata": {},
"outputs": [],
"source": [
"def clean_text(text):\n",
" text = text.lower().replace('-\\\\n', '').replace('\\\\n', ' ')\n",
" text = re.sub(r'\\p{P}', '', text)\n",
"\n",
" return text"
]
},
{
"cell_type": "code",
"execution_count": 29,
"id": "2308ccad",
"metadata": {},
"outputs": [],
"source": [
"def predict_probs(w1, w2, w4):\n",
" best_scores = []\n",
" for word in english_words_alpha_set:\n",
" text = ' '.join([w1, w2, word, w4])\n",
" text_score = model.score(text, bos=False, eos=False)\n",
" if len(best_scores) < 20:\n",
" best_scores.append((word, text_score))\n",
" else:\n",
" is_better = False\n",
" worst_score = None\n",
" for score in best_scores:\n",
" if not worst_score:\n",
" worst_score = score\n",
" else:\n",
" if worst_score[1] > score[1]:\n",
" worst_score = score\n",
" if worst_score[1] < text_score:\n",
" best_scores.remove(worst_score)\n",
" best_scores.append((word, text_score))\n",
" probs = sorted(best_scores, key=lambda tup: tup[1], reverse=True)\n",
" pred_str = ''\n",
" for word, prob in probs:\n",
" pred_str += f'{word}:{prob} '\n",
" pred_str += f':{log10(0.99)}'\n",
" return pred_str"
]
},
{
"cell_type": "code",
"execution_count": 30,
"id": "7245cf38",
"metadata": {},
"outputs": [],
"source": [
"dev_data = pd.read_csv('dev-0/in.tsv.xz', sep='\\t', error_bad_lines=False, warn_bad_lines=False, header=None, quoting=csv.QUOTE_NONE)\n",
"test_data = pd.read_csv('test-A/in.tsv.xz', sep='\\t', error_bad_lines=False, warn_bad_lines=False, header=None, quoting=csv.QUOTE_NONE)"
]
},
{
"cell_type": "code",
"execution_count": 35,
"id": "ac24ff37",
"metadata": {},
"outputs": [],
"source": [
"with open('dev-0/out.tsv', 'w') as file:\n",
" for index, row in dev_data.iterrows():\n",
" left_text = clean_text(str(row[6]))\n",
" right_text = clean_text(str(row[7]))\n",
" left_words = word_tokenize(left_text)\n",
" right_words = word_tokenize(right_text)\n",
" if len(left_words) < 2 or len(right_words) < 2:\n",
" prediction = ':1.0'\n",
" else:\n",
" prediction = predict_probs(left_words[len(left_words) - 2], left_words[len(left_words) - 1], right_words[0])\n",
" file.write(prediction + '\\n')"
]
},
{
"cell_type": "code",
"execution_count": 37,
"id": "a18b6ebd",
"metadata": {},
"outputs": [],
"source": [
"with open('test-A/out.tsv', 'w') as file:\n",
" for index, row in test_data.iterrows():\n",
" left_text = clean_text(str(row[6]))\n",
" right_text = clean_text(str(row[7]))\n",
" left_words = word_tokenize(left_text)\n",
" right_words = word_tokenize(right_text)\n",
" if len(left_words) < 2 or len(right_words) < 2:\n",
" prediction = ':1.0'\n",
" else:\n",
" prediction = predict_probs(left_words[len(left_words) - 2], left_words[len(left_words) - 1], right_words[0])\n",
" file.write(prediction + '\\n')"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.10"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

File diff suppressed because it is too large Load Diff

View File

@ -176,7 +176,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 32, "execution_count": 1,
"id": "6865301b", "id": "6865301b",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
@ -192,7 +192,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 4, "execution_count": 2,
"id": "e32de662", "id": "e32de662",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
@ -202,7 +202,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 28, "execution_count": 3,
"id": "c2535482", "id": "c2535482",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
@ -216,17 +216,17 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 29, "execution_count": 4,
"id": "2308ccad", "id": "2308ccad",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"def predict_probs(w1, w2, w4):\n", "def predict_probs(w1, w3):\n",
" best_scores = []\n", " best_scores = []\n",
" for word in english_words_alpha_set:\n", " for word in english_words_alpha_set:\n",
" text = ' '.join([w1, w2, word, w4])\n", " text = ' '.join([w1, word, w3])\n",
" text_score = model.score(text, bos=False, eos=False)\n", " text_score = model.score(text, bos=False, eos=False)\n",
" if len(best_scores) < 20:\n", " if len(best_scores) < 12:\n",
" best_scores.append((word, text_score))\n", " best_scores.append((word, text_score))\n",
" else:\n", " else:\n",
" is_better = False\n", " is_better = False\n",
@ -250,7 +250,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 30, "execution_count": 5,
"id": "7245cf38", "id": "7245cf38",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
@ -261,7 +261,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 35, "execution_count": 7,
"id": "ac24ff37", "id": "ac24ff37",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
@ -275,13 +275,13 @@
" if len(left_words) < 2 or len(right_words) < 2:\n", " if len(left_words) < 2 or len(right_words) < 2:\n",
" prediction = ':1.0'\n", " prediction = ':1.0'\n",
" else:\n", " else:\n",
" prediction = predict_probs(left_words[len(left_words) - 2], left_words[len(left_words) - 1], right_words[0])\n", " prediction = predict_probs(left_words[len(left_words) - 1], right_words[0])\n",
" file.write(prediction + '\\n')" " file.write(prediction + '\\n')"
] ]
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 37, "execution_count": 8,
"id": "a18b6ebd", "id": "a18b6ebd",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
@ -295,7 +295,7 @@
" if len(left_words) < 2 or len(right_words) < 2:\n", " if len(left_words) < 2 or len(right_words) < 2:\n",
" prediction = ':1.0'\n", " prediction = ':1.0'\n",
" else:\n", " else:\n",
" prediction = predict_probs(left_words[len(left_words) - 2], left_words[len(left_words) - 1], right_words[0])\n", " prediction = predict_probs(left_words[len(left_words) - 1], right_words[0])\n",
" file.write(prediction + '\\n')" " file.write(prediction + '\\n')"
] ]
} }

24
run.py
View File

@ -73,7 +73,7 @@ get_ipython().system('rm model/model.arpa')
# ### Predykcje # ### Predykcje
# In[32]: # In[1]:
import kenlm import kenlm
@ -85,13 +85,13 @@ from nltk import word_tokenize
from english_words import english_words_alpha_set from english_words import english_words_alpha_set
# In[4]: # In[2]:
model = kenlm.Model('model/model.binary') model = kenlm.Model('model/model.binary')
# In[28]: # In[3]:
def clean_text(text): def clean_text(text):
@ -101,15 +101,15 @@ def clean_text(text):
return text return text
# In[29]: # In[4]:
def predict_probs(w1, w2, w4): def predict_probs(w1, w3):
best_scores = [] best_scores = []
for word in english_words_alpha_set: for word in english_words_alpha_set:
text = ' '.join([w1, w2, word, w4]) text = ' '.join([w1, word, w3])
text_score = model.score(text, bos=False, eos=False) text_score = model.score(text, bos=False, eos=False)
if len(best_scores) < 20: if len(best_scores) < 12:
best_scores.append((word, text_score)) best_scores.append((word, text_score))
else: else:
is_better = False is_better = False
@ -131,14 +131,14 @@ def predict_probs(w1, w2, w4):
return pred_str return pred_str
# In[30]: # In[5]:
dev_data = pd.read_csv('dev-0/in.tsv.xz', sep='\t', error_bad_lines=False, warn_bad_lines=False, header=None, quoting=csv.QUOTE_NONE) dev_data = pd.read_csv('dev-0/in.tsv.xz', sep='\t', error_bad_lines=False, warn_bad_lines=False, header=None, quoting=csv.QUOTE_NONE)
test_data = pd.read_csv('test-A/in.tsv.xz', sep='\t', error_bad_lines=False, warn_bad_lines=False, header=None, quoting=csv.QUOTE_NONE) test_data = pd.read_csv('test-A/in.tsv.xz', sep='\t', error_bad_lines=False, warn_bad_lines=False, header=None, quoting=csv.QUOTE_NONE)
# In[35]: # In[7]:
with open('dev-0/out.tsv', 'w') as file: with open('dev-0/out.tsv', 'w') as file:
@ -150,11 +150,11 @@ with open('dev-0/out.tsv', 'w') as file:
if len(left_words) < 2 or len(right_words) < 2: if len(left_words) < 2 or len(right_words) < 2:
prediction = ':1.0' prediction = ':1.0'
else: else:
prediction = predict_probs(left_words[len(left_words) - 2], left_words[len(left_words) - 1], right_words[0]) prediction = predict_probs(left_words[len(left_words) - 1], right_words[0])
file.write(prediction + '\n') file.write(prediction + '\n')
# In[37]: # In[8]:
with open('test-A/out.tsv', 'w') as file: with open('test-A/out.tsv', 'w') as file:
@ -166,6 +166,6 @@ with open('test-A/out.tsv', 'w') as file:
if len(left_words) < 2 or len(right_words) < 2: if len(left_words) < 2 or len(right_words) < 2:
prediction = ':1.0' prediction = ':1.0'
else: else:
prediction = predict_probs(left_words[len(left_words) - 2], left_words[len(left_words) - 1], right_words[0]) prediction = predict_probs(left_words[len(left_words) - 1], right_words[0])
file.write(prediction + '\n') file.write(prediction + '\n')

File diff suppressed because it is too large Load Diff