add code neural-network-simple
This commit is contained in:
parent
6d7e6b5733
commit
4aa1dc1858
10
gonito.yaml
Normal file
10
gonito.yaml
Normal file
@ -0,0 +1,10 @@
|
||||
description: neural network, bigram
|
||||
tags:
|
||||
- neural-network
|
||||
- bigram
|
||||
params:
|
||||
epochs: 1
|
||||
vocab_size: 10000
|
||||
embed_size: 250
|
||||
batch_size: 5000
|
||||
num_of_top: 10
|
334
neural_network_simple_colab.ipynb
Normal file
334
neural_network_simple_colab.ipynb
Normal file
@ -0,0 +1,334 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "PAM8swqfl3YC"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import itertools\n",
|
||||
"import lzma\n",
|
||||
"import regex as re\n",
|
||||
"import torch\n",
|
||||
"from torch import nn\n",
|
||||
"from torch.utils.data import IterableDataset, DataLoader\n",
|
||||
"from torchtext.vocab import build_vocab_from_iterator\n",
|
||||
"import pickle\n",
|
||||
"import os"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"base_uri": "https://localhost:8080/"
|
||||
},
|
||||
"id": "52BQle50l92y",
|
||||
"outputId": "1f98398d-f385-4711-c2b7-3abe7418fbdb"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from google.colab import drive\n",
|
||||
"drive.mount('/content/drive')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "PNb3_zqUl3YD"
|
||||
},
|
||||
"source": [
|
||||
"### Definitions"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "b_6d7n2al3YE"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def clean_line(line: str):\n",
|
||||
" separated = line.split('\\t')\n",
|
||||
" prefix = separated[6].replace(r'\\n', ' ').strip()\n",
|
||||
" suffix = separated[7].replace(r'\\n', ' ').strip()\n",
|
||||
" return prefix + ' ' + suffix\n",
|
||||
"\n",
|
||||
"def get_words_from_line(line):\n",
|
||||
" line = clean_line(line)\n",
|
||||
" for word in line.split():\n",
|
||||
" yield word\n",
|
||||
"\n",
|
||||
"def get_word_lines_from_file(file_name):\n",
|
||||
" with lzma.open(file_name, mode='rt', encoding='utf-8') as fid:\n",
|
||||
" for line in fid:\n",
|
||||
" yield get_words_from_line(line)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def look_ahead_iterator(gen):\n",
|
||||
" prev = None\n",
|
||||
" for item in gen:\n",
|
||||
" if prev is not None:\n",
|
||||
" yield (prev, item)\n",
|
||||
" prev = item\n",
|
||||
"\n",
|
||||
"def predict(word: str, num_of_top: str) -> str:\n",
|
||||
" ixs = torch.tensor(vocab.forward([word])).to(device)\n",
|
||||
" out = model(ixs)\n",
|
||||
" top = torch.topk(out[0], num_of_top)\n",
|
||||
" top_indices = top.indices.tolist()\n",
|
||||
" top_probs = top.values.tolist()\n",
|
||||
" top_words = vocab.lookup_tokens(top_indices)\n",
|
||||
" zipped = list(zip(top_words, top_probs))\n",
|
||||
" if '<unk>' in [element[0] for element in zipped]:\n",
|
||||
" zipped = [(element[0] if element[0] != '<unk>' else '', element[1]) for element in zipped]\n",
|
||||
" zipped[-1] = ('', zipped[-1][1])\n",
|
||||
" else:\n",
|
||||
" zipped[-1] = ('', zipped[-1][1])\n",
|
||||
" return ' '.join([f'{element[0]}:{element[1]}' for element in zipped])\n",
|
||||
"\n",
|
||||
"def execute(path):\n",
|
||||
" with lzma.open(f'{path}/in.tsv.xz', 'rt', encoding='utf-8') as f, \\\n",
|
||||
" open(f'{path}/out.tsv', 'w', encoding='utf-8') as out:\n",
|
||||
" for line in f:\n",
|
||||
" prefix = line.split('\\t')[6]\n",
|
||||
" left = prefix.replace(r'\\n', ' ').split()[-1]\n",
|
||||
" result = predict(left, num_of_top)\n",
|
||||
" out.write(f\"{result}\\n\")\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "ZfV8fDhyl3YF"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"class Bigrams(IterableDataset):\n",
|
||||
" def __init__(self, text_file, vocabulary_size):\n",
|
||||
" self.vocab = vocab\n",
|
||||
" self.vocab.set_default_index(self.vocab['<unk>'])\n",
|
||||
" self.vocabulary_size = vocabulary_size\n",
|
||||
" self.text_file = text_file\n",
|
||||
"\n",
|
||||
" def __iter__(self):\n",
|
||||
" return look_ahead_iterator(\n",
|
||||
" (self.vocab[t] for t in itertools.chain.from_iterable(get_word_lines_from_file(self.text_file))))\n",
|
||||
"\n",
|
||||
" \n",
|
||||
"class SimpleBigramNeuralLanguageModel(nn.Module):\n",
|
||||
" def __init__(self, vocabulary_size, embedding_size):\n",
|
||||
" super(SimpleBigramNeuralLanguageModel, self).__init__()\n",
|
||||
" self.model = nn.Sequential(\n",
|
||||
" nn.Embedding(vocabulary_size, embedding_size),\n",
|
||||
" nn.Linear(embedding_size, vocabulary_size),\n",
|
||||
" nn.Softmax()\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" def forward(self, x):\n",
|
||||
" return self.model(x)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "W0O6U62El3YG"
|
||||
},
|
||||
"source": [
|
||||
"### Parameters"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "eUS-U3_6l3YG"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"vocab_size = 10000\n",
|
||||
"embed_size = 250\n",
|
||||
"batch_size = 5000\n",
|
||||
"num_of_top = 10"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "CPeVRcYZl3YG"
|
||||
},
|
||||
"source": [
|
||||
"### Vocabulary building"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "4wBx0OTal3YH"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"if os.path.exists('./vocabulary.pickle'):\n",
|
||||
" with open('vocabulary.pickle', 'rb') as handle:\n",
|
||||
" vocab = pickle.load(handle)\n",
|
||||
"else:\n",
|
||||
" vocab = build_vocab_from_iterator(\n",
|
||||
" get_word_lines_from_file('./drive/MyDrive/ColabNotebooks/america/train/in.tsv.xz'),\n",
|
||||
" max_tokens = vocab_size,\n",
|
||||
" specials = ['<unk>'])\n",
|
||||
"\n",
|
||||
" with open('vocabulary.pickle', 'wb') as handle:\n",
|
||||
" pickle.dump(vocab, handle, protocol=pickle.HIGHEST_PROTOCOL)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"base_uri": "https://localhost:8080/"
|
||||
},
|
||||
"id": "oVMVipnhl3YH",
|
||||
"outputId": "e588d083-be33-4dce-e61c-b26e664b2c5f"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"vocab.lookup_tokens([0, 1, 2, 3, 4, 4500])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "1gJscHJUl3YI"
|
||||
},
|
||||
"source": [
|
||||
"### Training"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "W_OjBUInl3YI"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"model = SimpleBigramNeuralLanguageModel(vocab_size, embed_size)\n",
|
||||
"vocab.set_default_index(vocab['<unk>'])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"base_uri": "https://localhost:8080/"
|
||||
},
|
||||
"id": "f373Kduzl3YI",
|
||||
"outputId": "0e6100df-d00e-4be3-83c4-cd2f671041e6"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#uczenie\n",
|
||||
"from torch.utils.data import DataLoader\n",
|
||||
"\n",
|
||||
"device = 'cuda'\n",
|
||||
"train_dataset = Bigrams('./drive/MyDrive/ColabNotebooks/america/train/in.tsv.xz', vocab_size)\n",
|
||||
"model = SimpleBigramNeuralLanguageModel(vocab_size, embed_size).to(device)\n",
|
||||
"data = DataLoader(train_dataset, batch_size=batch_size)\n",
|
||||
"optimizer = torch.optim.Adam(model.parameters())\n",
|
||||
"\n",
|
||||
"#funkcja kosztu\n",
|
||||
"criterion = torch.nn.NLLLoss()\n",
|
||||
"\n",
|
||||
"model.train()\n",
|
||||
"step = 0\n",
|
||||
"for x, y in data:\n",
|
||||
" x = x.to(device)\n",
|
||||
" y = y.to(device)\n",
|
||||
" optimizer.zero_grad()\n",
|
||||
" ypredicted = model(x)\n",
|
||||
" loss = criterion(torch.log(ypredicted), y)\n",
|
||||
" if step % 100 == 0:\n",
|
||||
" print(step, loss)\n",
|
||||
" step += 1\n",
|
||||
" loss.backward()\n",
|
||||
" optimizer.step()\n",
|
||||
"\n",
|
||||
"torch.save(model.state_dict(), 'model1.bin')\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "IHrqskyXl3YI"
|
||||
},
|
||||
"source": [
|
||||
"### Evaluation"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"base_uri": "https://localhost:8080/"
|
||||
},
|
||||
"id": "yK3oK65fl3YI",
|
||||
"outputId": "ee3e64a6-c361-4e96-cad0-82f2950827ca"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"model = SimpleBigramNeuralLanguageModel(vocab_size, embed_size).to(device)\n",
|
||||
"model.load_state_dict(torch.load('model1.bin'))\n",
|
||||
"model.eval()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "ihz4Px0bl3YJ"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"execute('./drive/MyDrive/ColabNotebooks/america/dev-0')\n",
|
||||
"execute('./drive/MyDrive/ColabNotebooks/america/test-A')"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"accelerator": "GPU",
|
||||
"colab": {
|
||||
"provenance": []
|
||||
},
|
||||
"gpuClass": "standard",
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.8"
|
||||
},
|
||||
"orig_nbformat": 4
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0
|
||||
}
|
Loading…
Reference in New Issue
Block a user