Added neural network classifiers
This commit is contained in:
parent
7f75f2e2e2
commit
f5fa1779c9
0
bert_classifier.ipynb
Normal file
0
bert_classifier.ipynb
Normal file
152
keras_classifier.ipynb
Normal file
152
keras_classifier.ipynb
Normal file
@ -0,0 +1,152 @@
|
||||
{
|
||||
"metadata": {
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.5-final"
|
||||
},
|
||||
"orig_nbformat": 2,
|
||||
"kernelspec": {
|
||||
"name": "python3",
|
||||
"display_name": "Python 3.9.5 64-bit",
|
||||
"metadata": {
|
||||
"interpreter": {
|
||||
"hash": "ac59ebe37160ed0dfa835113d9b8498d9f09ceb179beaac4002f036b9467c963"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2,
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# https://gonito.net/challenge/paranormal-or-skeptic\n",
|
||||
"# dane + wyniki -> https://git.wmi.amu.edu.pl/s444380/paranormal-or-skeptic-ISI-public"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import lzma\n",
|
||||
"from keras.models import Sequential\n",
|
||||
"from keras.layers import Dense\n",
|
||||
"import tensorflow as tf\n",
|
||||
"import numpy as np\n",
|
||||
"from gensim import downloader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Read train files\n",
|
||||
"with lzma.open(\"train/in.tsv.xz\", \"rt\", encoding=\"utf-8\") as train_file:\n",
|
||||
" x_train = [x.strip().lower() for x in train_file.readlines()]\n",
|
||||
"\n",
|
||||
"with open(\"train/expected.tsv\", \"r\", encoding=\"utf-8\") as train_file:\n",
|
||||
" y_train = np.array([int(x.strip()) for x in train_file.readlines()])\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"word2vec = downloader.load(\"glove-twitter-200\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"x_train_w2v = [np.mean([word2vec[word.lower()] for word in doc.split() if word.lower() in word2vec]\n",
|
||||
" or [np.zeros(200)], axis=0) for doc in x_train]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 24,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Read dev files\n",
|
||||
"with lzma.open(\"dev-0/in.tsv.xz\", \"rt\", encoding=\"utf-8\") as dev_file:\n",
|
||||
" x_dev = [x.strip().lower() for x in dev_file.readlines()]\n",
|
||||
"\n",
|
||||
"with open(\"dev-0/expected.tsv\", \"r\", encoding=\"utf-8\") as train_file:\n",
|
||||
" y_dev = np.array([int(x.strip()) for x in train_file.readlines()])\n",
|
||||
"\n",
|
||||
"x_dev_w2v = [np.mean([word2vec[word.lower()] for word in doc.split() if word.lower() in word2vec]\n",
|
||||
" or [np.zeros(200)], axis=0) for doc in x_dev]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# y_train = y_train.reshape(-1, 1)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 22,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"model = Sequential()\n",
|
||||
"model.add(Dense(1000, activation='relu', input_dim=200))\n",
|
||||
"model.add(Dense(500, activation='relu'))\n",
|
||||
"model.add(Dense(1, activation='sigmoid'))\n",
|
||||
"model.compile(optimizer='sgd', loss='binary_crossentropy', metrics=['accuracy'])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 25,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"output_type": "stream",
|
||||
"name": "stdout",
|
||||
"text": [
|
||||
"Epoch 1/5\n",
|
||||
"9050/9050 [==============================] - 48s 5ms/step - loss: 0.5244 - accuracy: 0.7303 - val_loss: 0.5536 - val_accuracy: 0.6910\n",
|
||||
"Epoch 2/5\n",
|
||||
"9050/9050 [==============================] - 47s 5ms/step - loss: 0.5132 - accuracy: 0.7367 - val_loss: 0.5052 - val_accuracy: 0.7475\n",
|
||||
"Epoch 3/5\n",
|
||||
"9050/9050 [==============================] - 47s 5ms/step - loss: 0.5067 - accuracy: 0.7396 - val_loss: 0.5091 - val_accuracy: 0.7320\n",
|
||||
"Epoch 4/5\n",
|
||||
"9050/9050 [==============================] - 47s 5ms/step - loss: 0.5025 - accuracy: 0.7429 - val_loss: 0.5343 - val_accuracy: 0.7071\n",
|
||||
"Epoch 5/5\n",
|
||||
"9050/9050 [==============================] - 47s 5ms/step - loss: 0.4992 - accuracy: 0.7447 - val_loss: 0.5143 - val_accuracy: 0.7381\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"history = model.fit(tf.stack(x_train_w2v), tf.stack(y_train), epochs=5, validation_data=(tf.stack(x_dev_w2v), tf.stack(y_dev)))"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
@ -26,6 +26,16 @@
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2,
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# https://gonito.net/challenge/paranormal-or-skeptic\n",
|
||||
"# dane + wyniki -> https://git.wmi.amu.edu.pl/s444380/paranormal-or-skeptic-ISI-public"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
@ -189,17 +199,17 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 27,
|
||||
"execution_count": 39,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"x_dev_w2v = [np.mean([word2vec[word.lower()] for word in doc.split() if word.lower() in word2vec]\n",
|
||||
" or [np.zeros(FEATURES)], axis=0) for doc in x_train]"
|
||||
" or [np.zeros(FEATURES)], axis=0) for doc in x_dev]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 40,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@ -209,11 +219,70 @@
|
||||
" x = x_dev_w2v[i:i+BATCH_SIZE]\n",
|
||||
" x = torch.tensor(np.array(x).astype(np.float32))\n",
|
||||
" \n",
|
||||
" outputs = model(x\n",
|
||||
" outputs = model(x)\n",
|
||||
" \n",
|
||||
" y = (outputs > 0.5)\n",
|
||||
" y_dev.extend(y)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 42,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"with open(\"dev-0/out.tsv\", \"w\", encoding=\"utf-8\") as f:\n",
|
||||
" f.writelines([str(y.int()[0].item()) + \"\\n\" for y in y_dev])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 43,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Read test files\n",
|
||||
"with lzma.open(\"test-A/in.tsv.xz\", \"rt\", encoding=\"utf-8\") as test_file:\n",
|
||||
" x_test = [x.strip().lower() for x in test_file.readlines()]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 44,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"x_test_w2v = [np.mean([word2vec[word.lower()] for word in doc.split() if word.lower() in word2vec]\n",
|
||||
" or [np.zeros(FEATURES)], axis=0) for doc in x_test]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 45,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"y_test = []\n",
|
||||
"with torch.no_grad():\n",
|
||||
" for i in range(0, len(x_test_w2v), BATCH_SIZE):\n",
|
||||
" x = x_test_w2v[i:i+BATCH_SIZE]\n",
|
||||
" x = torch.tensor(np.array(x).astype(np.float32))\n",
|
||||
" \n",
|
||||
" outputs = model(x)\n",
|
||||
" \n",
|
||||
" y = (outputs > 0.5)\n",
|
||||
" y_test.extend(y)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 46,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"with open(\"test-A/out.tsv\", \"w\", encoding=\"utf-8\") as f:\n",
|
||||
" f.writelines([str(y.int()[0].item()) + \"\\n\" for y in y_test])"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
Loading…
Reference in New Issue
Block a user