jupyter for colab
This commit is contained in:
parent
b22b9c3534
commit
be308d0c3c
168
run_bigram.ipynb
Normal file
168
run_bigram.ipynb
Normal file
@ -0,0 +1,168 @@
|
|||||||
|
{
|
||||||
|
"cells": [
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from itertools import islice\n",
|
||||||
|
"import regex as re\n",
|
||||||
|
"import sys\n",
|
||||||
|
"from torchtext.vocab import build_vocab_from_iterator\n",
|
||||||
|
"from torch import nn\n",
|
||||||
|
"import torch\n",
|
||||||
|
"from torch.utils.data import IterableDataset\n",
|
||||||
|
"import itertools\n",
|
||||||
|
"import pandas as pd\n",
|
||||||
|
"from torch.utils.data import DataLoader\n",
|
||||||
|
"import csv\n",
|
||||||
|
"\n",
|
||||||
|
"def data_preprocessing(text):\n",
|
||||||
|
" return re.sub(r'\\p{P}', '', text.lower().replace('-\\\\n', '').replace('\\\\n', ' ').replace(\"'ll\", \" will\").replace(\"-\", \"\").replace(\"'ve\", \" have\").replace(\"'s\", \" is\"))\n",
|
||||||
|
"\n",
|
||||||
|
"def get_words_from_line(line):\n",
|
||||||
|
" line = line.rstrip()\n",
|
||||||
|
" yield '<s>'\n",
|
||||||
|
" for m in re.finditer(r'[\\p{L}0-9\\*]+|\\p{P}+', line):\n",
|
||||||
|
" yield m.group(0).lower()\n",
|
||||||
|
" yield '</s>'\n",
|
||||||
|
"\n",
|
||||||
|
"\n",
|
||||||
|
"def get_word_lines_from_file(data):\n",
|
||||||
|
" for line in data:\n",
|
||||||
|
" yield get_words_from_line(line)\n",
|
||||||
|
"\n",
|
||||||
|
"\n",
|
||||||
|
"class SimpleBigramNeuralLanguageModel(nn.Module):\n",
|
||||||
|
" def __init__(self, vocabulary_size, embedding_size):\n",
|
||||||
|
" super(SimpleBigramNeuralLanguageModel, self).__init__()\n",
|
||||||
|
" self.model = nn.Sequential(\n",
|
||||||
|
" nn.Embedding(vocabulary_size, embedding_size),\n",
|
||||||
|
" nn.Linear(embedding_size, vocabulary_size),\n",
|
||||||
|
" nn.Softmax()\n",
|
||||||
|
" )\n",
|
||||||
|
"\n",
|
||||||
|
" def forward(self, x):\n",
|
||||||
|
" return self.model(x)\n",
|
||||||
|
"\n",
|
||||||
|
"\n",
|
||||||
|
"def look_ahead_iterator(gen):\n",
|
||||||
|
" prev = None\n",
|
||||||
|
" for item in gen:\n",
|
||||||
|
" if prev is not None:\n",
|
||||||
|
" yield (prev, item)\n",
|
||||||
|
" prev = item\n",
|
||||||
|
"\n",
|
||||||
|
"class Bigrams(IterableDataset):\n",
|
||||||
|
" def __init__(self, text_file, vocabulary_size):\n",
|
||||||
|
" self.vocab = build_vocab_from_iterator(\n",
|
||||||
|
" get_word_lines_from_file(text_file),\n",
|
||||||
|
" max_tokens = vocabulary_size,\n",
|
||||||
|
" specials = ['<unk>'])\n",
|
||||||
|
" self.vocab.set_default_index(self.vocab['<unk>'])\n",
|
||||||
|
" self.vocabulary_size = vocabulary_size\n",
|
||||||
|
" self.text_file = text_file\n",
|
||||||
|
"\n",
|
||||||
|
" def __iter__(self):\n",
|
||||||
|
" return look_ahead_iterator(\n",
|
||||||
|
" (self.vocab[t] for t in itertools.chain.from_iterable(get_word_lines_from_file(self.text_file))))\n",
|
||||||
|
"\n",
|
||||||
|
"in_file = 'train/in.tsv.xz'\n",
|
||||||
|
"out_file = 'train/expected.tsv'\n",
|
||||||
|
"\n",
|
||||||
|
"train_set = pd.read_csv(\n",
|
||||||
|
" 'train/in.tsv.xz',\n",
|
||||||
|
" sep='\\t',\n",
|
||||||
|
" header=None,\n",
|
||||||
|
" quoting=csv.QUOTE_NONE,\n",
|
||||||
|
" nrows=35000)\n",
|
||||||
|
"\n",
|
||||||
|
"train_labels = pd.read_csv(\n",
|
||||||
|
" 'train/expected.tsv',\n",
|
||||||
|
" sep='\\t',\n",
|
||||||
|
" header=None,\n",
|
||||||
|
" quoting=csv.QUOTE_NONE,\n",
|
||||||
|
" nrows=35000)\n",
|
||||||
|
"\n",
|
||||||
|
"data = pd.concat([train_set, train_labels], axis=1)\n",
|
||||||
|
"data = train_set[6] + train_set[0] + train_set[7]\n",
|
||||||
|
"data = data.apply(data_preprocessing)\n",
|
||||||
|
"\n",
|
||||||
|
"vocab_size = 30000\n",
|
||||||
|
"embed_size = 150\n",
|
||||||
|
"\n",
|
||||||
|
"\n",
|
||||||
|
"bigram_data = Bigrams(data, vocab_size)\n",
|
||||||
|
"\n",
|
||||||
|
"device = 'cpu'\n",
|
||||||
|
"model = SimpleBigramNeuralLanguageModel(vocab_size, embed_size).to(device)\n",
|
||||||
|
"data = DataLoader(bigram_data, batch_size=5000)\n",
|
||||||
|
"optimizer = torch.optim.Adam(model.parameters())\n",
|
||||||
|
"criterion = torch.nn.NLLLoss()\n",
|
||||||
|
"\n",
|
||||||
|
"model.train()\n",
|
||||||
|
"step = 0\n",
|
||||||
|
"for x, y in data:\n",
|
||||||
|
" x = x.to(device)\n",
|
||||||
|
" y = y.to(device)\n",
|
||||||
|
" optimizer.zero_grad()\n",
|
||||||
|
" ypredicted = model(x)\n",
|
||||||
|
" loss = criterion(torch.log(ypredicted), y)\n",
|
||||||
|
" if step % 100 == 0:\n",
|
||||||
|
" print(step, loss)\n",
|
||||||
|
" step += 1\n",
|
||||||
|
" loss.backward()\n",
|
||||||
|
" optimizer.step()\n",
|
||||||
|
"\n",
|
||||||
|
"torch.save(model.state_dict(), 'model1.bin')\n",
|
||||||
|
"\n",
|
||||||
|
"vocab = bigram_data.vocab\n",
|
||||||
|
"prediction = 'the:0.03 be:0.03 to:0.03 of:0.025 and:0.025 a:0.025 in:0.020 that:0.020 have:0.015 I:0.010 it:0.010 for:0.010 not:0.010 on:0.010 with:0.010 he:0.010 as:0.010 you:0.010 do:0.010 at:0.010 :0.77'\n",
|
||||||
|
"\n",
|
||||||
|
"def predict_word(w):\n",
|
||||||
|
" ixs = torch.tensor(vocab.forward(w)).to(device)\n",
|
||||||
|
" out = model(ixs)\n",
|
||||||
|
" top = torch.topk(out[0], 8)\n",
|
||||||
|
" top_indices = top.indices.tolist()\n",
|
||||||
|
" top_probs = top.values.tolist()\n",
|
||||||
|
" top_words = vocab.lookup_tokens(top_indices)\n",
|
||||||
|
" pred_str = \"\"\n",
|
||||||
|
" for word, prob in list(zip(top_words, top_probs)):\n",
|
||||||
|
" pred_str += f\"{word}:{prob} \"\n",
|
||||||
|
" return pred_str\n",
|
||||||
|
"\n",
|
||||||
|
"\n",
|
||||||
|
"def predict(f):\n",
|
||||||
|
" x = pd.read_csv(f'{f}/in.tsv.xz', sep='\\t', header=None, quoting=csv.QUOTE_NONE, on_bad_lines='skip', encoding=\"UTF-8\")[6]\n",
|
||||||
|
" x = x.apply(data_preprocessing)\n",
|
||||||
|
"\n",
|
||||||
|
" with open(f'{f}/out.tsv', \"w+\", encoding=\"UTF-8\") as f:\n",
|
||||||
|
" for row in x:\n",
|
||||||
|
" result = {}\n",
|
||||||
|
" before = None\n",
|
||||||
|
" for before in get_words_from_line(data_preprocessing(str(row)), False):\n",
|
||||||
|
" pass\n",
|
||||||
|
" before = [before]\n",
|
||||||
|
" if(len(before) < 1):\n",
|
||||||
|
" pred_str = prediction\n",
|
||||||
|
" else:\n",
|
||||||
|
" pred_str = predict_word(before)\n",
|
||||||
|
"\n",
|
||||||
|
" pred_str = pred_str.strip()\n",
|
||||||
|
" f.write(pred_str + \"\\n\")\n",
|
||||||
|
"\n",
|
||||||
|
"prediction(\"dev-0/\")\n",
|
||||||
|
"prediction(\"test-A/\")"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"language_info": {
|
||||||
|
"name": "python"
|
||||||
|
},
|
||||||
|
"orig_nbformat": 4
|
||||||
|
},
|
||||||
|
"nbformat": 4,
|
||||||
|
"nbformat_minor": 2
|
||||||
|
}
|
Loading…
Reference in New Issue
Block a user