589 lines
12 KiB
Plaintext
589 lines
12 KiB
Plaintext
{
|
|
"cells": [
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 27,
|
|
"metadata": {
|
|
"pycharm": {
|
|
"name": "#%%\n"
|
|
}
|
|
},
|
|
"outputs": [],
|
|
"source": [
|
|
"#!/usr/bin/env python\n",
|
|
"# coding: utf-8\n",
|
|
"import lzma\n",
|
|
"from gensim.models import Word2Vec\n",
|
|
"import gensim.downloader\n",
|
|
"import numpy as np\n",
|
|
"import pandas as pd\n",
|
|
"import torch"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 2,
|
|
"metadata": {
|
|
"pycharm": {
|
|
"name": "#%%\n"
|
|
}
|
|
},
|
|
"outputs": [],
|
|
"source": [
|
|
"X_train = lzma.open(\"train/in.tsv.xz\", mode='rt', encoding='utf-8').readlines()\n",
|
|
"y_train = np.array(open('train/expected.tsv').readlines())\n",
|
|
"X_dev0 = lzma.open(\"dev-0/in.tsv.xz\", mode='rt', encoding='utf-8').readlines()\n",
|
|
"y_expected_dev0 = np.array(open(\"dev-0/expected.tsv\", \"r\").readlines())\n",
|
|
"X_test = lzma.open(\"test-A/in.tsv.xz\", mode='rt', encoding='utf-8').readlines()"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 3,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"X_train = [line.split() for line in X_train]\n",
|
|
"X_dev0 = [line.split() for line in X_dev0]\n",
|
|
"X_test = [line.split() for line in X_test]"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 62,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"model_w2v = Word2Vec(X_train, vector_size=100, window=5, min_count=1, workers=4)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 79,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"def vectorize(model, data):\n",
|
|
" return np.array([np.mean([model.wv[word] if word in model.wv.key_to_index else np.zeros(100, dtype=float) for word in doc], axis=0) for doc in data])\n",
|
|
" "
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 80,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"X_train_w2v = vectorize(model_w2v, X_train)\n",
|
|
"X_dev0_w2v = vectorize(model_w2v, X_dev0)\n",
|
|
"X_test_w2v = vectorize(model_w2v, X_test)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 63,
|
|
"metadata": {
|
|
"pycharm": {
|
|
"name": "#%%\n"
|
|
}
|
|
},
|
|
"outputs": [],
|
|
"source": [
|
|
"FEATURES = 100\n",
|
|
"\n",
|
|
"class NeuralNetworkModel(torch.nn.Module):\n",
|
|
"\n",
|
|
" def __init__(self):\n",
|
|
" super(NeuralNetworkModel, self).__init__()\n",
|
|
" self.fc1 = torch.nn.Linear(FEATURES,500)\n",
|
|
" self.fc2 = torch.nn.Linear(500,1)\n",
|
|
"\n",
|
|
" def forward(self, x):\n",
|
|
" x = self.fc1(x)\n",
|
|
" x = torch.relu(x)\n",
|
|
" x = self.fc2(x)\n",
|
|
" x = torch.sigmoid(x)\n",
|
|
" return x"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 145,
|
|
"metadata": {
|
|
"pycharm": {
|
|
"name": "#%%\n"
|
|
}
|
|
},
|
|
"outputs": [],
|
|
"source": [
|
|
"nn_model = NeuralNetworkModel()"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 146,
|
|
"metadata": {
|
|
"pycharm": {
|
|
"name": "#%%\n"
|
|
}
|
|
},
|
|
"outputs": [],
|
|
"source": [
|
|
"BATCH_SIZE = 42"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 147,
|
|
"metadata": {
|
|
"pycharm": {
|
|
"name": "#%%\n"
|
|
}
|
|
},
|
|
"outputs": [],
|
|
"source": [
|
|
"criterion = torch.nn.BCELoss()"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 148,
|
|
"metadata": {
|
|
"pycharm": {
|
|
"is_executing": true,
|
|
"name": "#%%\n"
|
|
}
|
|
},
|
|
"outputs": [],
|
|
"source": [
|
|
"optimizer = torch.optim.SGD(nn_model.parameters(), lr = 0.1)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 149,
|
|
"metadata": {
|
|
"pycharm": {
|
|
"is_executing": true,
|
|
"name": "#%%\n"
|
|
}
|
|
},
|
|
"outputs": [],
|
|
"source": [
|
|
"def get_loss_acc(model, X_dataset, Y_dataset):\n",
|
|
" loss_score = 0\n",
|
|
" acc_score = 0\n",
|
|
" items_total = 0\n",
|
|
" model.eval()\n",
|
|
" for i in range(0, Y_dataset.shape[0], BATCH_SIZE):\n",
|
|
" X = np.array(X_dataset[i:i+BATCH_SIZE]).astype(np.float32)\n",
|
|
" X = torch.tensor(X)\n",
|
|
" Y = Y_dataset[i:i+BATCH_SIZE]\n",
|
|
" Y = torch.tensor(Y.astype(np.float32)).reshape(-1,1)\n",
|
|
" Y_predictions = model(X)\n",
|
|
" acc_score += torch.sum((Y_predictions > 0.5) == Y).item()\n",
|
|
" items_total += Y.shape[0]\n",
|
|
"\n",
|
|
" loss = criterion(Y_predictions, Y)\n",
|
|
"\n",
|
|
" loss_score += loss.item() * Y.shape[0]\n",
|
|
" return (loss_score / items_total), (acc_score / items_total)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 150,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"def predict(model, data):\n",
|
|
" model.eval()\n",
|
|
" predictions = []\n",
|
|
" for x in data:\n",
|
|
" X = torch.tensor(np.array(x).astype(np.float32))\n",
|
|
" Y_predictions = model(X)\n",
|
|
" if Y_predictions[0] > 0.5:\n",
|
|
" predictions.append(\"1\")\n",
|
|
" else:\n",
|
|
" predictions.append(\"0\")\n",
|
|
" return predictions"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 151,
|
|
"metadata": {
|
|
"pycharm": {
|
|
"is_executing": true,
|
|
"name": "#%%\n"
|
|
}
|
|
},
|
|
"outputs": [
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"0"
|
|
]
|
|
},
|
|
"metadata": {},
|
|
"output_type": "display_data"
|
|
},
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"(0.49161445487174543, 0.7499197110287693)"
|
|
]
|
|
},
|
|
"metadata": {},
|
|
"output_type": "display_data"
|
|
},
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"(0.4990149180719994, 0.7420333839150227)"
|
|
]
|
|
},
|
|
"metadata": {},
|
|
"output_type": "display_data"
|
|
},
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"1"
|
|
]
|
|
},
|
|
"metadata": {},
|
|
"output_type": "display_data"
|
|
},
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"(0.486242138754709, 0.7533833599812141)"
|
|
]
|
|
},
|
|
"metadata": {},
|
|
"output_type": "display_data"
|
|
},
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"(0.4960476360955079, 0.7448786039453718)"
|
|
]
|
|
},
|
|
"metadata": {},
|
|
"output_type": "display_data"
|
|
},
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"2"
|
|
]
|
|
},
|
|
"metadata": {},
|
|
"output_type": "display_data"
|
|
},
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"(0.48170865143118824, 0.7566018254086104)"
|
|
]
|
|
},
|
|
"metadata": {},
|
|
"output_type": "display_data"
|
|
},
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"(0.49339661830880754, 0.7448786039453718)"
|
|
]
|
|
},
|
|
"metadata": {},
|
|
"output_type": "display_data"
|
|
},
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"3"
|
|
]
|
|
},
|
|
"metadata": {},
|
|
"output_type": "display_data"
|
|
},
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"(0.47863767532834156, 0.7587877573995352)"
|
|
]
|
|
},
|
|
"metadata": {},
|
|
"output_type": "display_data"
|
|
},
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"(0.49210414077877457, 0.7503793626707133)"
|
|
]
|
|
},
|
|
"metadata": {},
|
|
"output_type": "display_data"
|
|
},
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"4"
|
|
]
|
|
},
|
|
"metadata": {},
|
|
"output_type": "display_data"
|
|
},
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"(0.4755889592268004, 0.7613466446116604)"
|
|
]
|
|
},
|
|
"metadata": {},
|
|
"output_type": "display_data"
|
|
},
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"(0.49055553189223017, 0.753793626707132)"
|
|
]
|
|
},
|
|
"metadata": {},
|
|
"output_type": "display_data"
|
|
},
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"5"
|
|
]
|
|
},
|
|
"metadata": {},
|
|
"output_type": "display_data"
|
|
},
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"(0.47395927866325194, 0.7623273787118541)"
|
|
]
|
|
},
|
|
"metadata": {},
|
|
"output_type": "display_data"
|
|
},
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"(0.4905445413022374, 0.7541729893778453)"
|
|
]
|
|
},
|
|
"metadata": {},
|
|
"output_type": "display_data"
|
|
},
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"6"
|
|
]
|
|
},
|
|
"metadata": {},
|
|
"output_type": "display_data"
|
|
},
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"(0.4721670034531442, 0.7639055318237855)"
|
|
]
|
|
},
|
|
"metadata": {},
|
|
"output_type": "display_data"
|
|
},
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"(0.4896522785377249, 0.7522761760242792)"
|
|
]
|
|
},
|
|
"metadata": {},
|
|
"output_type": "display_data"
|
|
},
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"7"
|
|
]
|
|
},
|
|
"metadata": {},
|
|
"output_type": "display_data"
|
|
},
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"(0.4713666787153674, 0.7644166186083936)"
|
|
]
|
|
},
|
|
"metadata": {},
|
|
"output_type": "display_data"
|
|
},
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"(0.4897225151384003, 0.7532245827010622)"
|
|
]
|
|
},
|
|
"metadata": {},
|
|
"output_type": "display_data"
|
|
},
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"8"
|
|
]
|
|
},
|
|
"metadata": {},
|
|
"output_type": "display_data"
|
|
},
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"(0.4687599671611641, 0.7661674361745845)"
|
|
]
|
|
},
|
|
"metadata": {},
|
|
"output_type": "display_data"
|
|
},
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"(0.4882916720620779, 0.7524658573596358)"
|
|
]
|
|
},
|
|
"metadata": {},
|
|
"output_type": "display_data"
|
|
},
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"9"
|
|
]
|
|
},
|
|
"metadata": {},
|
|
"output_type": "display_data"
|
|
},
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"(0.4669961705231401, 0.767617817590364)"
|
|
]
|
|
},
|
|
"metadata": {},
|
|
"output_type": "display_data"
|
|
},
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"(0.48753329053272426, 0.7534142640364189)"
|
|
]
|
|
},
|
|
"metadata": {},
|
|
"output_type": "display_data"
|
|
}
|
|
],
|
|
"source": [
|
|
"for epoch in range(10):\n",
|
|
" loss_score = 0\n",
|
|
" acc_score = 0\n",
|
|
" items_total = 0\n",
|
|
" nn_model.train()\n",
|
|
" for i in range(0, y_train.shape[0], BATCH_SIZE):\n",
|
|
" X = X_train_w2v[i:i+BATCH_SIZE]\n",
|
|
" X = torch.tensor(X)\n",
|
|
" Y = y_train[i:i+BATCH_SIZE]\n",
|
|
" Y = torch.tensor(Y.astype(np.float32)).reshape(-1,1)\n",
|
|
" Y_predictions = nn_model(X)\n",
|
|
" acc_score += torch.sum((Y_predictions > 0.5) == Y).item()\n",
|
|
" items_total += Y.shape[0]\n",
|
|
"\n",
|
|
" optimizer.zero_grad()\n",
|
|
" loss = criterion(Y_predictions, Y)\n",
|
|
" loss.backward()\n",
|
|
" optimizer.step()\n",
|
|
"\n",
|
|
" loss_score += loss.item() * Y.shape[0]\n",
|
|
"\n",
|
|
" display(epoch)\n",
|
|
" display(get_loss_acc(nn_model, X_train_w2v, y_train))\n",
|
|
" display(get_loss_acc(nn_model, X_dev0_w2v, y_expected_dev0))"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 152,
|
|
"metadata": {
|
|
"pycharm": {
|
|
"name": "#%%\n"
|
|
}
|
|
},
|
|
"outputs": [],
|
|
"source": [
|
|
"y_pred_dev0 = predict(nn_model, X_dev0_w2v)\n",
|
|
"y_pred_test = predict(nn_model, X_test_w2v)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 153,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": []
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 158,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"open('dev-0/out.tsv', 'w').writelines([i+'\\n' for i in y_pred_dev0])"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 159,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"open('test-A/out.tsv', 'w').writelines([i+'\\n' for i in y_pred_test])"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": []
|
|
}
|
|
],
|
|
"metadata": {
|
|
"kernelspec": {
|
|
"display_name": "Python 3 (ipykernel)",
|
|
"language": "python",
|
|
"name": "python3"
|
|
},
|
|
"language_info": {
|
|
"codemirror_mode": {
|
|
"name": "ipython",
|
|
"version": 3
|
|
},
|
|
"file_extension": ".py",
|
|
"mimetype": "text/x-python",
|
|
"name": "python",
|
|
"nbconvert_exporter": "python",
|
|
"pygments_lexer": "ipython3",
|
|
"version": "3.9.7"
|
|
}
|
|
},
|
|
"nbformat": 4,
|
|
"nbformat_minor": 1
|
|
}
|