434766 neural
This commit is contained in:
parent
da9a7ccd36
commit
d51cbccf29
21038
dev-0/out.tsv
21038
dev-0/out.tsv
File diff suppressed because it is too large
Load Diff
428
nn-1.ipynb
Normal file
428
nn-1.ipynb
Normal file
@ -0,0 +1,428 @@
|
|||||||
|
{
|
||||||
|
"cells": [
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 1,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from torchtext.vocab import build_vocab_from_iterator\n",
|
||||||
|
"from torch.utils.data import DataLoader\n",
|
||||||
|
"import torch\n",
|
||||||
|
"from torch import nn\n",
|
||||||
|
"import pandas as pd\n",
|
||||||
|
"import nltk\n",
|
||||||
|
"import regex as re\n",
|
||||||
|
"import csv\n",
|
||||||
|
"import itertools\n",
|
||||||
|
"from nltk import word_tokenize\n",
|
||||||
|
"from os.path import exists\n",
|
||||||
|
"\n",
|
||||||
|
"\n",
|
||||||
|
"def clean(text):\n",
|
||||||
|
" text = str(text).strip().lower()\n",
|
||||||
|
" text = re.sub(\"’|>|<|\\.|\\\\|\\\"|”|-|,|\\*|:|\\/\", \"\", text)\n",
|
||||||
|
" text = text.replace('\\\\n', \" \").replace(\"'t\", \" not\").replace(\"'s\", \" is\").replace(\"'ll\", \" will\").replace(\"'m\", \" am\").replace(\"'ve\", \" have\")\n",
|
||||||
|
" text = text.replace(\"'\", \"\")\n",
|
||||||
|
" return text\n",
|
||||||
|
"\n",
|
||||||
|
"def get_words_from_line(line, specials = True):\n",
|
||||||
|
" line = line.rstrip()\n",
|
||||||
|
" if specials:\n",
|
||||||
|
" yield '<s>'\n",
|
||||||
|
" for m in re.finditer(r'[\\p{L}0-9\\*]+|\\p{P}+', line):\n",
|
||||||
|
" yield m.group(0).lower()\n",
|
||||||
|
" if specials:\n",
|
||||||
|
" yield '</s>'\n",
|
||||||
|
"\n",
|
||||||
|
"def get_word_lines_from_data(d):\n",
|
||||||
|
" for line in d:\n",
|
||||||
|
" yield get_words_from_line(line)\n",
|
||||||
|
"\n",
|
||||||
|
"\n",
|
||||||
|
"class SimpleBigramNeuralLanguageModel(torch.nn.Module):\n",
|
||||||
|
" \n",
|
||||||
|
" def __init__(self, vocabulary_size, embedding_size):\n",
|
||||||
|
" super(SimpleBigramNeuralLanguageModel, self).__init__()\n",
|
||||||
|
" self.model = nn.Sequential(\n",
|
||||||
|
" nn.Embedding(vocabulary_size, embedding_size),\n",
|
||||||
|
" nn.Linear(embedding_size, vocabulary_size),\n",
|
||||||
|
" nn.Softmax()\n",
|
||||||
|
" )\n",
|
||||||
|
"\n",
|
||||||
|
" def forward(self, x):\n",
|
||||||
|
" return self.model(x)\n",
|
||||||
|
"\n",
|
||||||
|
"def look_ahead_iterator(gen):\n",
|
||||||
|
" w1 = None\n",
|
||||||
|
" for item in gen:\n",
|
||||||
|
" if w1 is not None:\n",
|
||||||
|
" yield (w1, item)\n",
|
||||||
|
" w1 = item\n",
|
||||||
|
" \n",
|
||||||
|
"class Bigrams(torch.utils.data.IterableDataset):\n",
|
||||||
|
" def __init__(self, data, vocabulary_size):\n",
|
||||||
|
" self.vocab = build_vocab_from_iterator(\n",
|
||||||
|
" get_word_lines_from_data(data),\n",
|
||||||
|
" max_tokens = vocabulary_size,\n",
|
||||||
|
" specials = ['<unk>'])\n",
|
||||||
|
" self.vocab.set_default_index(self.vocab['<unk>'])\n",
|
||||||
|
" self.vocabulary_size = vocabulary_size\n",
|
||||||
|
" self.data = data\n",
|
||||||
|
"\n",
|
||||||
|
" def __iter__(self):\n",
|
||||||
|
" return look_ahead_iterator(\n",
|
||||||
|
" (self.vocab[t] for t in itertools.chain.from_iterable(get_word_lines_from_data(self.data))))\n",
|
||||||
|
"\n",
|
||||||
|
"\n",
|
||||||
|
"# ładowanie danych treningowych\n",
|
||||||
|
"in_file = 'train/in.tsv.xz'\n",
|
||||||
|
"out_file = 'train/expected.tsv'\n",
|
||||||
|
"\n",
|
||||||
|
"X_train = pd.read_csv(in_file, sep='\\t', header=None, quoting=csv.QUOTE_NONE, nrows=200000, on_bad_lines=\"skip\", encoding=\"UTF-8\")\n",
|
||||||
|
"Y_train = pd.read_csv(out_file, sep='\\t', header=None, quoting=csv.QUOTE_NONE, nrows=200000, on_bad_lines=\"skip\", encoding=\"UTF-8\")\n",
|
||||||
|
"\n",
|
||||||
|
"X_train = X_train[[6, 7]]\n",
|
||||||
|
"X_train = pd.concat([X_train, Y_train], axis=1)\n",
|
||||||
|
"X_train = X_train[6] + X_train[0] + X_train[7]\n",
|
||||||
|
"X_train = X_train.apply(clean)\n",
|
||||||
|
"vocab_size = 30000\n",
|
||||||
|
"embed_size = 150\n",
|
||||||
|
"Dataset = Bigrams(X_train, vocab_size)\n",
|
||||||
|
"\n",
|
||||||
|
"\n",
|
||||||
|
"\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 2,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"name": "stdout",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
" Epoka 0--------------------------------------------------------\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "stderr",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"/home/przemek/anaconda3/envs/env/lib/python3.9/site-packages/torch/nn/modules/container.py:141: UserWarning: Implicit dimension choice for softmax has been deprecated. Change the call to include dim=X as an argument.\n",
|
||||||
|
" input = module(input)\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "stdout",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"0 tensor(10.4640, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"100 tensor(8.8699, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"200 tensor(7.8760, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"300 tensor(7.3941, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"400 tensor(6.9599, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"500 tensor(6.7027, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"600 tensor(6.5332, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"700 tensor(6.4762, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"800 tensor(6.2756, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"900 tensor(6.2160, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"1000 tensor(6.2766, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"1100 tensor(6.2922, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"1200 tensor(6.0532, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"1300 tensor(6.0914, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"1400 tensor(5.9667, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"1500 tensor(6.1284, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"1600 tensor(6.1015, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"1700 tensor(6.1512, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"1800 tensor(6.1428, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"1900 tensor(6.1808, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"2000 tensor(6.3026, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"2100 tensor(5.9979, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"2200 tensor(6.1723, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"2300 tensor(6.0850, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"2400 tensor(5.9631, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"2500 tensor(6.0300, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"2600 tensor(5.9996, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"2700 tensor(5.9015, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"2800 tensor(5.9195, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"2900 tensor(5.8945, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"3000 tensor(6.1416, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"3100 tensor(6.1716, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"3200 tensor(6.1329, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"3300 tensor(6.0073, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"3400 tensor(6.0445, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"3500 tensor(6.0357, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"3600 tensor(5.9790, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"3700 tensor(5.8562, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"3800 tensor(5.8810, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"3900 tensor(5.9466, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"4000 tensor(6.0413, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"4100 tensor(5.8879, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"4200 tensor(5.9470, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"4300 tensor(5.9991, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"4400 tensor(6.1229, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"4500 tensor(5.8253, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"4600 tensor(5.8551, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"4700 tensor(5.8695, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"4800 tensor(5.8018, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"4900 tensor(5.9809, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"5000 tensor(5.8554, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"5100 tensor(5.9074, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"5200 tensor(5.8030, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"5300 tensor(5.8432, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"5400 tensor(5.8057, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"5500 tensor(5.9464, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"5600 tensor(6.0155, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"5700 tensor(5.8322, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"5800 tensor(5.8041, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"5900 tensor(5.9783, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"6000 tensor(6.0641, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"6100 tensor(5.8326, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"6200 tensor(5.9006, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"6300 tensor(5.8767, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"6400 tensor(5.8549, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"6500 tensor(5.9623, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"6600 tensor(5.7852, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"6700 tensor(5.9007, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"6800 tensor(6.0006, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"6900 tensor(5.8717, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"7000 tensor(5.8211, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"7100 tensor(6.0302, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"7200 tensor(5.8377, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"7300 tensor(6.0008, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"7400 tensor(5.9733, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"7500 tensor(6.0819, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"7600 tensor(5.8545, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"7700 tensor(5.8242, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"7800 tensor(5.8449, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"7900 tensor(5.9512, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"8000 tensor(5.6949, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"8100 tensor(5.8212, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"8200 tensor(6.2209, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
" Epoka 1--------------------------------------------------------\n",
|
||||||
|
"8300 tensor(5.9703, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"8400 tensor(5.8215, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"8500 tensor(5.8680, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"8600 tensor(5.6376, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"8700 tensor(5.8291, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"8800 tensor(5.8815, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"8900 tensor(5.7486, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"9000 tensor(5.8889, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"9100 tensor(5.8058, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"9200 tensor(5.8526, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"9300 tensor(5.8363, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"9400 tensor(5.7206, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"9500 tensor(5.7525, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"9600 tensor(5.8370, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"9700 tensor(5.8589, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"9800 tensor(5.7505, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"9900 tensor(5.7570, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"10000 tensor(5.9025, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"10100 tensor(5.7193, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"10200 tensor(5.8267, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"10300 tensor(5.9407, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"10400 tensor(5.8414, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"10500 tensor(5.9946, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"10600 tensor(5.8745, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"10700 tensor(5.7626, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"10800 tensor(5.7495, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"10900 tensor(5.8720, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"11000 tensor(5.8455, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"11100 tensor(5.7123, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"11200 tensor(5.7896, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"11300 tensor(5.8969, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"11400 tensor(5.7743, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"11500 tensor(5.6588, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"11600 tensor(5.8743, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"11700 tensor(5.8964, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"11800 tensor(5.7968, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"11900 tensor(5.8222, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"12000 tensor(5.7421, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"12100 tensor(5.8565, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"12200 tensor(5.7788, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"12300 tensor(5.7469, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"12400 tensor(5.8372, device='cuda:0', grad_fn=<NllLossBackward0>)\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "stdout",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"12500 tensor(5.7905, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"12600 tensor(5.8497, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"12700 tensor(5.7814, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"12800 tensor(5.7847, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"12900 tensor(5.6603, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"13000 tensor(5.7659, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"13100 tensor(5.8337, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"13200 tensor(5.7703, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"13300 tensor(5.8301, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"13400 tensor(5.6971, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"13500 tensor(5.8216, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"13600 tensor(5.7899, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"13700 tensor(5.7258, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"13800 tensor(5.9402, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"13900 tensor(5.8674, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"14000 tensor(5.7627, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"14100 tensor(5.8849, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"14200 tensor(5.7721, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"14300 tensor(5.7737, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"14400 tensor(5.7790, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"14500 tensor(5.8570, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"14600 tensor(5.8281, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"14700 tensor(5.7613, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"14800 tensor(5.8226, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"14900 tensor(5.7584, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"15000 tensor(5.7686, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"15100 tensor(5.8094, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"15200 tensor(5.7397, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"15300 tensor(5.7407, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"15400 tensor(5.5733, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"15500 tensor(5.5254, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"15600 tensor(5.7856, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"15700 tensor(5.6769, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"15800 tensor(5.5810, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"15900 tensor(5.8195, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"16000 tensor(5.8086, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"16100 tensor(5.8340, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"16200 tensor(5.8087, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"16300 tensor(5.8688, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"16400 tensor(5.6974, device='cuda:0', grad_fn=<NllLossBackward0>)\n",
|
||||||
|
"16500 tensor(5.8742, device='cuda:0', grad_fn=<NllLossBackward0>)\n"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"device = 'cuda' if torch.cuda.is_available() else 'cpu'\n",
|
||||||
|
"model = SimpleBigramNeuralLanguageModel(vocab_size, embed_size).to(device)\n",
|
||||||
|
"\n",
|
||||||
|
"if(not exists('nn_model2.bin')):\n",
|
||||||
|
" data = DataLoader(Dataset, batch_size=8000)\n",
|
||||||
|
" optimizer = torch.optim.Adam(model.parameters())\n",
|
||||||
|
" criterion = torch.nn.NLLLoss()\n",
|
||||||
|
"\n",
|
||||||
|
" model.train()\n",
|
||||||
|
" step = 0\n",
|
||||||
|
" for i in range(2):\n",
|
||||||
|
" print(f\" Epoka {i}--------------------------------------------------------\")\n",
|
||||||
|
" for x, y in data:\n",
|
||||||
|
" x = x.to(device)\n",
|
||||||
|
" y = y.to(device)\n",
|
||||||
|
" optimizer.zero_grad()\n",
|
||||||
|
" ypredicted = model(x)\n",
|
||||||
|
" loss = criterion(torch.log(ypredicted), y)\n",
|
||||||
|
" if step % 100 == 0:\n",
|
||||||
|
" print(step, loss)\n",
|
||||||
|
" step += 1\n",
|
||||||
|
" loss.backward()\n",
|
||||||
|
" optimizer.step()\n",
|
||||||
|
"\n",
|
||||||
|
" torch.save(model.state_dict(), 'nn_model2.bin')\n",
|
||||||
|
"else:\n",
|
||||||
|
" model.load_state_dict(torch.load('nn_model2.bin')) \n",
|
||||||
|
"\n",
|
||||||
|
"\n",
|
||||||
|
"vocab = Dataset.vocab\n",
|
||||||
|
"\n",
|
||||||
|
"\n",
|
||||||
|
"# nltk.download('punkt')\n",
|
||||||
|
"def predict_word(ws):\n",
|
||||||
|
" ixs = torch.tensor(vocab.forward(ws)).to(device)\n",
|
||||||
|
" out = model(ixs)\n",
|
||||||
|
" top = torch.topk(out[0], 8)\n",
|
||||||
|
" top_indices = top.indices.tolist()\n",
|
||||||
|
" top_probs = top.values.tolist()\n",
|
||||||
|
" top_words = vocab.lookup_tokens(top_indices)\n",
|
||||||
|
" pred_str = \"\"\n",
|
||||||
|
" for word, prob in list(zip(top_words, top_probs)):\n",
|
||||||
|
" pred_str += f\"{word}:{prob} \"\n",
|
||||||
|
"# pred_str += f':0.01'\n",
|
||||||
|
" return pred_str\n",
|
||||||
|
"\n",
|
||||||
|
"\n",
|
||||||
|
"def word_gap_prediction(file):\n",
|
||||||
|
" X_test = pd.read_csv(f'{file}/in.tsv.xz', sep='\\t', header=None, quoting=csv.QUOTE_NONE, on_bad_lines='skip', encoding=\"UTF-8\")[6]\n",
|
||||||
|
" X_test = X_test.apply(clean)\n",
|
||||||
|
" with open(f'{file}/out.tsv', \"w+\", encoding=\"UTF-8\") as f:\n",
|
||||||
|
" for row in X_test:\n",
|
||||||
|
" result = {}\n",
|
||||||
|
" before = None\n",
|
||||||
|
" for before in get_words_from_line(clean(str(row)), False):\n",
|
||||||
|
" pass\n",
|
||||||
|
" before = [before]\n",
|
||||||
|
" if(len(before) < 1):\n",
|
||||||
|
" pred_str = \"a:0.2 the:0.2 to:0.2 of:0.1 and:0.1 of:0.1 :0.1\"\n",
|
||||||
|
" else:\n",
|
||||||
|
" pred_str = predict_word(before)\n",
|
||||||
|
" pred_str = pred_str.strip()\n",
|
||||||
|
" f.write(pred_str + \"\\n\")\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 3,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"name": "stderr",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"/home/przemek/anaconda3/envs/env/lib/python3.9/site-packages/torch/nn/modules/container.py:141: UserWarning: Implicit dimension choice for softmax has been deprecated. Change the call to include dim=X as an argument.\n",
|
||||||
|
" input = module(input)\n"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"word_gap_prediction(\"dev-0/\")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 4,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"name": "stderr",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"/home/przemek/anaconda3/envs/env/lib/python3.9/site-packages/torch/nn/modules/container.py:141: UserWarning: Implicit dimension choice for softmax has been deprecated. Change the call to include dim=X as an argument.\n",
|
||||||
|
" input = module(input)\n"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"word_gap_prediction(\"test-A/\")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": []
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"kernelspec": {
|
||||||
|
"display_name": "Python 3 (ipykernel)",
|
||||||
|
"language": "python",
|
||||||
|
"name": "python3"
|
||||||
|
},
|
||||||
|
"language_info": {
|
||||||
|
"codemirror_mode": {
|
||||||
|
"name": "ipython",
|
||||||
|
"version": 3
|
||||||
|
},
|
||||||
|
"file_extension": ".py",
|
||||||
|
"mimetype": "text/x-python",
|
||||||
|
"name": "python",
|
||||||
|
"nbconvert_exporter": "python",
|
||||||
|
"pygments_lexer": "ipython3",
|
||||||
|
"version": "3.9.12"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nbformat": 4,
|
||||||
|
"nbformat_minor": 4
|
||||||
|
}
|
BIN
nn_model2.bin
Normal file
BIN
nn_model2.bin
Normal file
Binary file not shown.
14828
test-A/out.tsv
14828
test-A/out.tsv
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue
Block a user