solution part 1

This commit is contained in:
Łukasz Jędyk 2022-05-01 12:06:02 +02:00
parent 6d845228ad
commit 3d5789c3bd
2 changed files with 2007812 additions and 160 deletions

2007723
europarl.txt Normal file

File diff suppressed because it is too large Load Diff

247
run.ipynb
View File

@ -2,215 +2,144 @@
"cells": [
{
"cell_type": "code",
"execution_count": 2,
"id": "f73a28ea",
"metadata": {},
"outputs": [],
"source": [
"KENLM_BUILD_PATH='/home/students/s434708/kenlm/build'"
]
},
{
"cell_type": "markdown",
"id": "9fc5cda3",
"metadata": {},
"source": [
"### Preprocessing danych"
]
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 1,
"id": "d42ddd87",
"metadata": {},
"outputs": [],
"source": [
"import pandas as pd\n",
"import csv\n",
"import regex as re"
"import torch\n",
"from torch import nn"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f84be210",
"execution_count": 2,
"id": "dc73124c",
"metadata": {},
"outputs": [],
"source": [
"def clean_text(text):\n",
" text = text.lower().replace('-\\\\n', '').replace('\\\\n', ' ')\n",
" text = re.sub(r'\\p{P}', '', text)\n",
"vocab_size = 20000\n",
"embed_size = 100\n",
"\n",
" return text"
"class SimpleTrigramNeuralLanguageModel(nn.Module):\n",
" def __init__(self, vocabulary_size, embedding_size):\n",
" super(SimpleTrigramNeuralLanguageModel, self).__init__()\n",
" self.embedding = nn.Embedding(vocabulary_size, embedding_size)\n",
" self.linear = nn.Linear(embedding_size, vocabulary_size)\n",
"\n",
" def forward(self, x):\n",
" x = self.embedding(x)\n",
" x = self.linear(x)\n",
" x = torch.softmax(x, dim=1)\n",
" return x"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "de0c12d6",
"execution_count": 3,
"id": "569b4c88",
"metadata": {},
"outputs": [],
"source": [
"train_data = pd.read_csv('train/in.tsv.xz', sep='\\t', error_bad_lines=False, warn_bad_lines=False, header=None, quoting=csv.QUOTE_NONE)\n",
"train_labels = pd.read_csv('train/expected.tsv', sep='\\t', error_bad_lines=False, warn_bad_lines=False, header=None, quoting=csv.QUOTE_NONE)\n",
"import regex as re\n",
"from itertools import islice, chain\n",
"from torchtext.vocab import build_vocab_from_iterator\n",
"from torch.utils.data import IterableDataset\n",
"\n",
"train_data = train_data[[6, 7]]\n",
"train_data = pd.concat([train_data, train_labels], axis=1)\n",
"def get_words_from_line(line):\n",
" line = line.rstrip()\n",
" yield '<s>'\n",
" for m in re.finditer(r'[\\p{L}0-9\\*]+|\\p{P}+', line):\n",
" yield m.group(0).lower()\n",
" yield '</s>'\n",
"\n",
"train_data['text'] = train_data[6] + train_data[0] + train_data[7]\n",
"train_data = train_data[['text']]\n",
"def get_word_lines_from_file(file_name):\n",
" with open(file_name, 'r') as fh:\n",
" for line in fh:\n",
" yield get_words_from_line(line)\n",
" \n",
"with open('processed_train.txt', 'w') as file:\n",
" for _, row in train_data.iterrows():\n",
" text = clean_text(str(row['text']))\n",
" file.write(text + '\\n')"
]
},
{
"cell_type": "markdown",
"id": "846b6b42",
"metadata": {},
"source": [
"### Model kenLM"
"def look_ahead_iterator(gen):\n",
" prev = None\n",
" for item in gen:\n",
" if prev is not None:\n",
" yield (prev, item)\n",
" prev = item"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "3c74d4be",
"id": "f95cb913",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"=== 1/5 Counting and sorting n-grams ===\n",
"Reading /home/students/s434708/Desktop/Modelowanie Języka/challenging-america-word-gap-prediction-kenlm/processed_train.txt\n",
"----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100\n",
"********************************Warning: <s> appears in the input. All instances of <s>, </s>, and <unk> will be interpreted as whitespace.\n",
"********************************************************************\n",
"Unigram tokens 135911223 types 4381594\n",
"=== 2/5 Calculating and sorting adjusted counts ===\n",
"Chain sizes: 1:52579128 2:1295655936 3:2429355008 4:3886967808 5:5668495360\n",
"Statistics:\n",
"1 4381594 D1=0.841838 D2=1.01787 D3+=1.21057\n",
"2 26800631 D1=0.836734 D2=1.01657 D3+=1.19437\n",
"3 69811700 D1=0.878562 D2=1.11227 D3+=1.27889\n",
"4 104063034 D1=0.931257 D2=1.23707 D3+=1.36664\n",
"5 119487533 D1=0.938146 D2=1.3058 D3+=1.41614\n",
"Memory estimate for binary LM:\n",
"type MB\n",
"probing 6752 assuming -p 1.5\n",
"probing 7917 assuming -r models -p 1.5\n",
"trie 3572 without quantization\n",
"trie 2120 assuming -q 8 -b 8 quantization \n",
"trie 3104 assuming -a 22 array pointer compression\n",
"trie 1652 assuming -a 22 -q 8 -b 8 array pointer compression and quantization\n",
"=== 3/5 Calculating and sorting initial probabilities ===\n",
"Chain sizes: 1:52579128 2:428810096 3:1396234000 4:2497512816 5:3345650924\n",
"----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100\n",
"####################################################################################################\n",
"=== 4/5 Calculating and writing order-interpolated probabilities ===\n",
"Chain sizes: 1:52579128 2:428810096 3:1396234000 4:2497512816 5:3345650924\n",
"----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100\n",
"####################################################################################################\n",
"=== 5/5 Writing ARPA model ===\n",
"----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100\n",
"----------------------------------------------------------------------------------------------------Last input should have been poison. The program should end soon with an error. If it doesn't, there's a bug.\n",
"terminate called after throwing an instance of 'util::FDException'\n",
" what(): /home/students/s434708/kenlm/util/file.cc:228 in void util::WriteOrThrow(int, const void*, std::size_t) threw FDException because `ret < 1'.\n",
"No space left on device in /home/students/s434708/Desktop/Modelowanie Języka/challenging-america-word-gap-prediction-kenlm/model.arpa while writing 8189 bytes\n",
"/bin/bash: line 1: 26725 Aborted /home/students/s434708/kenlm/build/bin/lmplz -o 5 --skip_symbols < processed_train.txt > model.arpa\n"
]
}
],
"outputs": [],
"source": [
"!$KENLM_BUILD_PATH/bin/lmplz -o 5 --skip_symbols < processed_train.txt > model.arpa"
"class Bigrams(IterableDataset):\n",
" def __init__(self, text_file, vocabulary_size):\n",
" self.vocab = build_vocab_from_iterator(\n",
" get_word_lines_from_file(text_file),\n",
" max_tokens = vocabulary_size,\n",
" specials = ['<unk>']\n",
" )\n",
" self.vocab.set_default_index(self.vocab['<unk>'])\n",
" self.vocabulary_size = vocabulary_size\n",
" self.text_file = text_file\n",
"\n",
" def __iter__(self):\n",
" return look_ahead_iterator((self.vocab[t] for t in chain.from_iterable(get_word_lines_from_file(self.text_file))))"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "dc65780b",
"id": "7a51f2b1",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Reading model.arpa\n",
"----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100\n",
"****************************************************************************************************\n",
"/home/students/s434708/kenlm/util/file.cc:86 in int util::CreateOrThrow(const char*) threw ErrnoException because `-1 == (ret = open(name, 0100 | 01000 | 02, 0400 | 0200 | (0400 >> 3) | ((0400 >> 3) >> 3)))'.\n",
"No space left on device while creating model.binary Byte: 94\n",
"ERROR\n"
"ename": "NameError",
"evalue": "name 'SimpleBigramNeuralLanguageModel' is not defined",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)",
"\u001b[0;32m/tmp/ipykernel_16179/3272155308.py\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[1;32m 3\u001b[0m \u001b[0mdevice\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m'cuda'\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 4\u001b[0m \u001b[0mtrain_dataset\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mBigrams\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'europarl.txt'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mvocab_size\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 5\u001b[0;31m \u001b[0mmodel\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mSimpleTrigramNeuralLanguageModel\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mvocab_size\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0membed_size\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mto\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdevice\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 6\u001b[0m \u001b[0mdata\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mDataLoader\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtrain_dataset\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mbatch_size\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m2000\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 7\u001b[0m \u001b[0moptimizer\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0moptim\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mAdam\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmodel\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mparameters\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/tmp/ipykernel_16179/1892442743.py\u001b[0m in \u001b[0;36m__init__\u001b[0;34m(self, vocabulary_size, embedding_size)\u001b[0m\n\u001b[1;32m 4\u001b[0m \u001b[0;32mclass\u001b[0m \u001b[0mSimpleTrigramNeuralLanguageModel\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mnn\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mModule\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 5\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m__init__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mvocabulary_size\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0membedding_size\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 6\u001b[0;31m \u001b[0msuper\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mSimpleBigramNeuralLanguageModel\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__init__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 7\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0membedding\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mnn\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mEmbedding\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mvocabulary_size\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0membedding_size\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 8\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mlinear\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mnn\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mLinear\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0membedding_size\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mvocabulary_size\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;31mNameError\u001b[0m: name 'SimpleBigramNeuralLanguageModel' is not defined"
]
}
],
"source": [
"!$KENLM_BUILD_PATH/bin/build_binary model.arpa model.binary"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "2087eb80",
"metadata": {},
"outputs": [],
"source": [
"!rm processed_train.txt"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "4ba1e592",
"metadata": {},
"outputs": [],
"source": [
"!rm model.arpa"
]
},
{
"cell_type": "markdown",
"id": "e41f7951",
"metadata": {},
"source": [
"### Predykcje"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "6865301b",
"metadata": {},
"outputs": [],
"source": [
"import kenlm"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "e32de662",
"metadata": {},
"outputs": [],
"source": [
"test_str = 'really good'\n",
"from torch.utils.data import DataLoader\n",
"\n",
"model = kenlm.Model('model.binary')\n",
"print(model.score(test_str, bos = True, eos = True))"
"device = 'cuda'\n",
"train_dataset = Bigrams('europarl.txt', vocab_size)\n",
"model = SimpleTrigramNeuralLanguageModel(vocab_size, embed_size).to(device)\n",
"data = DataLoader(train_dataset, batch_size=2000)\n",
"optimizer = torch.optim.Adam(model.parameters())\n",
"criterion = torch.nn.NLLLoss()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "a18b6ebd",
"id": "474194ae",
"metadata": {},
"outputs": [],
"source": [
"for i in model.full_scores(test_str):\n",
" print(i)"
"for epoch in range(1):\n",
" model.train()\n",
" for x, y in data:\n",
" x = x.to(device)\n",
" y = y.to(device)\n",
" optimizer.zero_grad()\n",
" outputs = model(x)\n",
" loss = criterion(torch.log(outputs), y)\n",
" if step % 100 == 0:\n",
" print(step, loss)\n",
" step += 1\n",
" loss.backward()\n",
" optimizer.step()\n",
" \n",
"torch.save(model.state_dict(), 'model/model1.bin')"
]
}
],