challenging-america-word-ga.../run.ipynb

168 lines
8.3 KiB
Plaintext
Raw Normal View History

2022-04-25 10:10:22 +02:00
{
"cells": [
{
"cell_type": "code",
2022-05-01 12:06:02 +02:00
"execution_count": 1,
2022-04-25 10:10:22 +02:00
"id": "d42ddd87",
"metadata": {},
"outputs": [],
"source": [
2022-05-01 12:06:02 +02:00
"import torch\n",
"from torch import nn"
2022-04-25 10:10:22 +02:00
]
},
{
"cell_type": "code",
2022-05-01 12:06:02 +02:00
"execution_count": 2,
"id": "dc73124c",
2022-04-25 10:10:22 +02:00
"metadata": {},
"outputs": [],
"source": [
2022-05-01 12:06:02 +02:00
"vocab_size = 20000\n",
"embed_size = 100\n",
"\n",
"class SimpleTrigramNeuralLanguageModel(nn.Module):\n",
" def __init__(self, vocabulary_size, embedding_size):\n",
" super(SimpleTrigramNeuralLanguageModel, self).__init__()\n",
" self.embedding = nn.Embedding(vocabulary_size, embedding_size)\n",
" self.linear = nn.Linear(embedding_size, vocabulary_size)\n",
2022-04-25 10:10:22 +02:00
"\n",
2022-05-01 12:06:02 +02:00
" def forward(self, x):\n",
" x = self.embedding(x)\n",
" x = self.linear(x)\n",
" x = torch.softmax(x, dim=1)\n",
" return x"
2022-04-25 10:10:22 +02:00
]
},
{
"cell_type": "code",
2022-05-01 12:06:02 +02:00
"execution_count": 3,
"id": "569b4c88",
2022-04-25 10:10:22 +02:00
"metadata": {},
"outputs": [],
"source": [
2022-05-01 12:06:02 +02:00
"import regex as re\n",
"from itertools import islice, chain\n",
"from torchtext.vocab import build_vocab_from_iterator\n",
"from torch.utils.data import IterableDataset\n",
2022-04-25 10:10:22 +02:00
"\n",
2022-05-01 12:06:02 +02:00
"def get_words_from_line(line):\n",
" line = line.rstrip()\n",
" yield '<s>'\n",
" for m in re.finditer(r'[\\p{L}0-9\\*]+|\\p{P}+', line):\n",
" yield m.group(0).lower()\n",
" yield '</s>'\n",
2022-04-25 10:10:22 +02:00
"\n",
2022-05-01 12:06:02 +02:00
"def get_word_lines_from_file(file_name):\n",
" with open(file_name, 'r') as fh:\n",
" for line in fh:\n",
" yield get_words_from_line(line)\n",
" \n",
"def look_ahead_iterator(gen):\n",
" prev = None\n",
" for item in gen:\n",
" if prev is not None:\n",
" yield (prev, item)\n",
" prev = item"
2022-04-25 10:10:22 +02:00
]
},
{
"cell_type": "code",
"execution_count": 4,
2022-05-01 12:06:02 +02:00
"id": "f95cb913",
2022-04-25 10:10:22 +02:00
"metadata": {},
2022-05-01 12:06:02 +02:00
"outputs": [],
2022-04-25 10:10:22 +02:00
"source": [
2022-05-01 12:06:02 +02:00
"class Bigrams(IterableDataset):\n",
" def __init__(self, text_file, vocabulary_size):\n",
" self.vocab = build_vocab_from_iterator(\n",
" get_word_lines_from_file(text_file),\n",
" max_tokens = vocabulary_size,\n",
" specials = ['<unk>']\n",
" )\n",
" self.vocab.set_default_index(self.vocab['<unk>'])\n",
" self.vocabulary_size = vocabulary_size\n",
" self.text_file = text_file\n",
"\n",
" def __iter__(self):\n",
" return look_ahead_iterator((self.vocab[t] for t in chain.from_iterable(get_word_lines_from_file(self.text_file))))"
2022-04-25 10:10:22 +02:00
]
},
{
"cell_type": "code",
"execution_count": 6,
2022-05-01 12:06:02 +02:00
"id": "7a51f2b1",
2022-04-25 10:10:22 +02:00
"metadata": {},
"outputs": [
{
2022-05-01 12:06:02 +02:00
"ename": "NameError",
"evalue": "name 'SimpleBigramNeuralLanguageModel' is not defined",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)",
"\u001b[0;32m/tmp/ipykernel_16179/3272155308.py\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[1;32m 3\u001b[0m \u001b[0mdevice\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m'cuda'\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 4\u001b[0m \u001b[0mtrain_dataset\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mBigrams\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'europarl.txt'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mvocab_size\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 5\u001b[0;31m \u001b[0mmodel\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mSimpleTrigramNeuralLanguageModel\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mvocab_size\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0membed_size\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mto\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdevice\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 6\u001b[0m \u001b[0mdata\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mDataLoader\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtrain_dataset\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mbatch_size\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m2000\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 7\u001b[0m \u001b[0moptimizer\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0moptim\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mAdam\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmodel\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mparameters\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/tmp/ipykernel_16179/1892442743.py\u001b[0m in \u001b[0;36m__init__\u001b[0;34m(self, vocabulary_size, embedding_size)\u001b[0m\n\u001b[1;32m 4\u001b[0m \u001b[0;32mclass\u001b[0m \u001b[0mSimpleTrigramNeuralLanguageModel\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mnn\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mModule\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 5\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m__init__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mvocabulary_size\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0membedding_size\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 6\u001b[0;31m \u001b[0msuper\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mSimpleBigramNeuralLanguageModel\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__init__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 7\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0membedding\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mnn\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mEmbedding\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mvocabulary_size\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0membedding_size\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 8\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mlinear\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mnn\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mLinear\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0membedding_size\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mvocabulary_size\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;31mNameError\u001b[0m: name 'SimpleBigramNeuralLanguageModel' is not defined"
2022-04-25 10:10:22 +02:00
]
}
],
"source": [
2022-05-01 12:06:02 +02:00
"from torch.utils.data import DataLoader\n",
2022-04-25 10:10:22 +02:00
"\n",
2022-05-01 12:06:02 +02:00
"device = 'cuda'\n",
"train_dataset = Bigrams('europarl.txt', vocab_size)\n",
"model = SimpleTrigramNeuralLanguageModel(vocab_size, embed_size).to(device)\n",
"data = DataLoader(train_dataset, batch_size=2000)\n",
"optimizer = torch.optim.Adam(model.parameters())\n",
"criterion = torch.nn.NLLLoss()"
2022-04-25 10:10:22 +02:00
]
},
{
"cell_type": "code",
"execution_count": null,
2022-05-01 12:06:02 +02:00
"id": "474194ae",
2022-04-25 10:10:22 +02:00
"metadata": {},
"outputs": [],
"source": [
2022-05-01 12:06:02 +02:00
"for epoch in range(1):\n",
" model.train()\n",
" for x, y in data:\n",
" x = x.to(device)\n",
" y = y.to(device)\n",
" optimizer.zero_grad()\n",
" outputs = model(x)\n",
" loss = criterion(torch.log(outputs), y)\n",
" if step % 100 == 0:\n",
" print(step, loss)\n",
" step += 1\n",
" loss.backward()\n",
" optimizer.step()\n",
" \n",
"torch.save(model.state_dict(), 'model/model1.bin')"
2022-04-25 10:10:22 +02:00
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.10"
}
},
"nbformat": 4,
"nbformat_minor": 5
}