{ "metadata": { "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.9.5-final" }, "orig_nbformat": 2, "kernelspec": { "name": "python3", "display_name": "Python 3.9.5 64-bit", "metadata": { "interpreter": { "hash": "ac59ebe37160ed0dfa835113d9b8498d9f09ceb179beaac4002f036b9467c963" } } } }, "nbformat": 4, "nbformat_minor": 2, "cells": [ { "cell_type": "code", "execution_count": 2, "metadata": {}, "outputs": [], "source": [ "# https://gonito.net/challenge/paranormal-or-skeptic\n", "# dane + wyniki -> https://git.wmi.amu.edu.pl/s444380/paranormal-or-skeptic-ISI-public" ] }, { "cell_type": "code", "execution_count": 3, "metadata": {}, "outputs": [], "source": [ "import lzma\n", "from keras.models import Sequential\n", "from keras.layers import Dense\n", "import tensorflow as tf\n", "import numpy as np\n", "from gensim import downloader" ] }, { "cell_type": "code", "execution_count": 4, "metadata": {}, "outputs": [], "source": [ "# Read train files\n", "with lzma.open(\"train/in.tsv.xz\", \"rt\", encoding=\"utf-8\") as train_file:\n", " x_train = [x.strip().lower() for x in train_file.readlines()]\n", "\n", "with open(\"train/expected.tsv\", \"r\", encoding=\"utf-8\") as train_file:\n", " y_train = np.array([int(x.strip()) for x in train_file.readlines()])\n" ] }, { "cell_type": "code", "execution_count": 5, "metadata": {}, "outputs": [], "source": [ "word2vec = downloader.load(\"glove-twitter-200\")" ] }, { "cell_type": "code", "execution_count": 7, "metadata": {}, "outputs": [], "source": [ "x_train_w2v = [np.mean([word2vec[word.lower()] for word in doc.split() if word.lower() in word2vec]\n", " or [np.zeros(200)], axis=0) for doc in x_train]" ] }, { "cell_type": "code", "execution_count": 24, "metadata": {}, "outputs": [], "source": [ "# Read dev files\n", "with lzma.open(\"dev-0/in.tsv.xz\", \"rt\", encoding=\"utf-8\") as dev_file:\n", " x_dev = [x.strip().lower() for x in dev_file.readlines()]\n", "\n", "with open(\"dev-0/expected.tsv\", \"r\", encoding=\"utf-8\") as train_file:\n", " y_dev = np.array([int(x.strip()) for x in train_file.readlines()])\n", "\n", "x_dev_w2v = [np.mean([word2vec[word.lower()] for word in doc.split() if word.lower() in word2vec]\n", " or [np.zeros(200)], axis=0) for doc in x_dev]" ] }, { "cell_type": "code", "execution_count": 11, "metadata": {}, "outputs": [], "source": [ "# y_train = y_train.reshape(-1, 1)" ] }, { "cell_type": "code", "execution_count": 22, "metadata": {}, "outputs": [], "source": [ "model = Sequential()\n", "model.add(Dense(1000, activation='relu', input_dim=200))\n", "model.add(Dense(500, activation='relu'))\n", "model.add(Dense(1, activation='sigmoid'))\n", "model.compile(optimizer='sgd', loss='binary_crossentropy', metrics=['accuracy'])" ] }, { "cell_type": "code", "execution_count": 25, "metadata": {}, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "Epoch 1/5\n", "9050/9050 [==============================] - 48s 5ms/step - loss: 0.5244 - accuracy: 0.7303 - val_loss: 0.5536 - val_accuracy: 0.6910\n", "Epoch 2/5\n", "9050/9050 [==============================] - 47s 5ms/step - loss: 0.5132 - accuracy: 0.7367 - val_loss: 0.5052 - val_accuracy: 0.7475\n", "Epoch 3/5\n", "9050/9050 [==============================] - 47s 5ms/step - loss: 0.5067 - accuracy: 0.7396 - val_loss: 0.5091 - val_accuracy: 0.7320\n", "Epoch 4/5\n", "9050/9050 [==============================] - 47s 5ms/step - loss: 0.5025 - accuracy: 0.7429 - val_loss: 0.5343 - val_accuracy: 0.7071\n", "Epoch 5/5\n", "9050/9050 [==============================] - 47s 5ms/step - loss: 0.4992 - accuracy: 0.7447 - val_loss: 0.5143 - val_accuracy: 0.7381\n" ] } ], "source": [ "history = model.fit(tf.stack(x_train_w2v), tf.stack(y_train), epochs=5, validation_data=(tf.stack(x_dev_w2v), tf.stack(y_dev)))" ] } ] }