Compare commits
5 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
a8125bba9d | ||
f5fa1779c9 | |||
7f75f2e2e2 | |||
b217d37450 | |||
9b68bb67c7 |
0
bert_classifier.ipynb
Normal file
0
bert_classifier.ipynb
Normal file
41
classifier.py
Normal file
41
classifier.py
Normal file
@ -0,0 +1,41 @@
|
|||||||
|
import lzma
|
||||||
|
|
||||||
|
from naivebayes import NaiveBayesTextClassifier
|
||||||
|
import nltk
|
||||||
|
from nltk.corpus import stopwords
|
||||||
|
|
||||||
|
nltk.download("stopwords")
|
||||||
|
|
||||||
|
# Read train files
|
||||||
|
with lzma.open("train/in.tsv.xz", "rt", encoding="utf-8") as train_file:
|
||||||
|
x_train = [x.strip().lower() for x in train_file.readlines()]
|
||||||
|
|
||||||
|
with open("train/expected.tsv", "r", encoding="utf-8") as train_file:
|
||||||
|
y_train = [int(x.strip()) for x in train_file.readlines()]
|
||||||
|
|
||||||
|
nbc = NaiveBayesTextClassifier(
|
||||||
|
categories=[0, 1],
|
||||||
|
stop_words=stopwords.words("english"),
|
||||||
|
min_df=1
|
||||||
|
)
|
||||||
|
step = 15000
|
||||||
|
for i in range(0, len(x_train), step):
|
||||||
|
nbc.train(x_train[i:min(i+step, len(x_train))], y_train[i:min(i+step, len(x_train))])
|
||||||
|
|
||||||
|
# Read dev files
|
||||||
|
with lzma.open("dev-0/in.tsv.xz", "rt", encoding="utf-8") as dev_file:
|
||||||
|
x_dev = [x.strip().lower() for x in dev_file.readlines()]
|
||||||
|
|
||||||
|
# Read test file
|
||||||
|
with lzma.open("test-A/in.tsv.xz", "rt", encoding="utf-8") as test_file:
|
||||||
|
x_test = [x.strip().lower() for x in test_file.readlines()]
|
||||||
|
|
||||||
|
# Predict dev
|
||||||
|
pred_dev = [str(x) + "\n" for x in nbc.classify(x_dev)]
|
||||||
|
with open("dev-0/out.tsv", "w", encoding="utf-8") as dev_out_file:
|
||||||
|
dev_out_file.writelines(pred_dev)
|
||||||
|
|
||||||
|
# Predict dev
|
||||||
|
pred_test = [str(x) + "\n" for x in nbc.classify(x_test)]
|
||||||
|
with open("test-A/out.tsv", "w", encoding="utf-8") as test_out_file:
|
||||||
|
test_out_file.writelines(pred_test)
|
5272
dev-0/out.tsv
Normal file
5272
dev-0/out.tsv
Normal file
File diff suppressed because it is too large
Load Diff
152
keras_classifier.ipynb
Normal file
152
keras_classifier.ipynb
Normal file
@ -0,0 +1,152 @@
|
|||||||
|
{
|
||||||
|
"metadata": {
|
||||||
|
"language_info": {
|
||||||
|
"codemirror_mode": {
|
||||||
|
"name": "ipython",
|
||||||
|
"version": 3
|
||||||
|
},
|
||||||
|
"file_extension": ".py",
|
||||||
|
"mimetype": "text/x-python",
|
||||||
|
"name": "python",
|
||||||
|
"nbconvert_exporter": "python",
|
||||||
|
"pygments_lexer": "ipython3",
|
||||||
|
"version": "3.9.5-final"
|
||||||
|
},
|
||||||
|
"orig_nbformat": 2,
|
||||||
|
"kernelspec": {
|
||||||
|
"name": "python3",
|
||||||
|
"display_name": "Python 3.9.5 64-bit",
|
||||||
|
"metadata": {
|
||||||
|
"interpreter": {
|
||||||
|
"hash": "ac59ebe37160ed0dfa835113d9b8498d9f09ceb179beaac4002f036b9467c963"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nbformat": 4,
|
||||||
|
"nbformat_minor": 2,
|
||||||
|
"cells": [
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 2,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# https://gonito.net/challenge/paranormal-or-skeptic\n",
|
||||||
|
"# dane + wyniki -> https://git.wmi.amu.edu.pl/s444380/paranormal-or-skeptic-ISI-public"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 3,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"import lzma\n",
|
||||||
|
"from keras.models import Sequential\n",
|
||||||
|
"from keras.layers import Dense\n",
|
||||||
|
"import tensorflow as tf\n",
|
||||||
|
"import numpy as np\n",
|
||||||
|
"from gensim import downloader"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 4,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# Read train files\n",
|
||||||
|
"with lzma.open(\"train/in.tsv.xz\", \"rt\", encoding=\"utf-8\") as train_file:\n",
|
||||||
|
" x_train = [x.strip().lower() for x in train_file.readlines()]\n",
|
||||||
|
"\n",
|
||||||
|
"with open(\"train/expected.tsv\", \"r\", encoding=\"utf-8\") as train_file:\n",
|
||||||
|
" y_train = np.array([int(x.strip()) for x in train_file.readlines()])\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 5,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"word2vec = downloader.load(\"glove-twitter-200\")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 7,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"x_train_w2v = [np.mean([word2vec[word.lower()] for word in doc.split() if word.lower() in word2vec]\n",
|
||||||
|
" or [np.zeros(200)], axis=0) for doc in x_train]"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 24,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# Read dev files\n",
|
||||||
|
"with lzma.open(\"dev-0/in.tsv.xz\", \"rt\", encoding=\"utf-8\") as dev_file:\n",
|
||||||
|
" x_dev = [x.strip().lower() for x in dev_file.readlines()]\n",
|
||||||
|
"\n",
|
||||||
|
"with open(\"dev-0/expected.tsv\", \"r\", encoding=\"utf-8\") as train_file:\n",
|
||||||
|
" y_dev = np.array([int(x.strip()) for x in train_file.readlines()])\n",
|
||||||
|
"\n",
|
||||||
|
"x_dev_w2v = [np.mean([word2vec[word.lower()] for word in doc.split() if word.lower() in word2vec]\n",
|
||||||
|
" or [np.zeros(200)], axis=0) for doc in x_dev]"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 11,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# y_train = y_train.reshape(-1, 1)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 22,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"model = Sequential()\n",
|
||||||
|
"model.add(Dense(1000, activation='relu', input_dim=200))\n",
|
||||||
|
"model.add(Dense(500, activation='relu'))\n",
|
||||||
|
"model.add(Dense(1, activation='sigmoid'))\n",
|
||||||
|
"model.compile(optimizer='sgd', loss='binary_crossentropy', metrics=['accuracy'])"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 25,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"output_type": "stream",
|
||||||
|
"name": "stdout",
|
||||||
|
"text": [
|
||||||
|
"Epoch 1/5\n",
|
||||||
|
"9050/9050 [==============================] - 48s 5ms/step - loss: 0.5244 - accuracy: 0.7303 - val_loss: 0.5536 - val_accuracy: 0.6910\n",
|
||||||
|
"Epoch 2/5\n",
|
||||||
|
"9050/9050 [==============================] - 47s 5ms/step - loss: 0.5132 - accuracy: 0.7367 - val_loss: 0.5052 - val_accuracy: 0.7475\n",
|
||||||
|
"Epoch 3/5\n",
|
||||||
|
"9050/9050 [==============================] - 47s 5ms/step - loss: 0.5067 - accuracy: 0.7396 - val_loss: 0.5091 - val_accuracy: 0.7320\n",
|
||||||
|
"Epoch 4/5\n",
|
||||||
|
"9050/9050 [==============================] - 47s 5ms/step - loss: 0.5025 - accuracy: 0.7429 - val_loss: 0.5343 - val_accuracy: 0.7071\n",
|
||||||
|
"Epoch 5/5\n",
|
||||||
|
"9050/9050 [==============================] - 47s 5ms/step - loss: 0.4992 - accuracy: 0.7447 - val_loss: 0.5143 - val_accuracy: 0.7381\n"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"history = model.fit(tf.stack(x_train_w2v), tf.stack(y_train), epochs=5, validation_data=(tf.stack(x_dev_w2v), tf.stack(y_dev)))"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
288
pytorch_classifier.ipynb
Normal file
288
pytorch_classifier.ipynb
Normal file
@ -0,0 +1,288 @@
|
|||||||
|
{
|
||||||
|
"metadata": {
|
||||||
|
"language_info": {
|
||||||
|
"codemirror_mode": {
|
||||||
|
"name": "ipython",
|
||||||
|
"version": 3
|
||||||
|
},
|
||||||
|
"file_extension": ".py",
|
||||||
|
"mimetype": "text/x-python",
|
||||||
|
"name": "python",
|
||||||
|
"nbconvert_exporter": "python",
|
||||||
|
"pygments_lexer": "ipython3",
|
||||||
|
"version": "3.9.5-final"
|
||||||
|
},
|
||||||
|
"orig_nbformat": 2,
|
||||||
|
"kernelspec": {
|
||||||
|
"name": "python3",
|
||||||
|
"display_name": "Python 3.9.5 64-bit",
|
||||||
|
"metadata": {
|
||||||
|
"interpreter": {
|
||||||
|
"hash": "ac59ebe37160ed0dfa835113d9b8498d9f09ceb179beaac4002f036b9467c963"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nbformat": 4,
|
||||||
|
"nbformat_minor": 2,
|
||||||
|
"cells": [
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# https://gonito.net/challenge/paranormal-or-skeptic\n",
|
||||||
|
"# dane + wyniki -> https://git.wmi.amu.edu.pl/s444380/paranormal-or-skeptic-ISI-public"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 2,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"import lzma\n",
|
||||||
|
"import torch\n",
|
||||||
|
"import numpy as np\n",
|
||||||
|
"from gensim import downloader"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 3,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"BATCH_SIZE = 10\n",
|
||||||
|
"EPOCHS = 10\n",
|
||||||
|
"FEATURES = 200"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 23,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"class NeuralNetworkModel(torch.nn.Module):\n",
|
||||||
|
"\n",
|
||||||
|
" def __init__(self):\n",
|
||||||
|
" super(NeuralNetworkModel, self).__init__()\n",
|
||||||
|
" self.fc1 = torch.nn.Linear(FEATURES, 1000)\n",
|
||||||
|
" self.fc2 = torch.nn.Linear(1000, 500)\n",
|
||||||
|
" self.fc3 = torch.nn.Linear(500, 1)\n",
|
||||||
|
"\n",
|
||||||
|
" def forward(self, x):\n",
|
||||||
|
" x = self.fc1(x)\n",
|
||||||
|
" x = torch.relu(x)\n",
|
||||||
|
" x = self.fc2(x)\n",
|
||||||
|
" x = torch.relu(x)\n",
|
||||||
|
" x = self.fc3(x)\n",
|
||||||
|
" x = torch.sigmoid(x)\n",
|
||||||
|
" return x"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 6,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# Read train files\n",
|
||||||
|
"with lzma.open(\"train/in.tsv.xz\", \"rt\", encoding=\"utf-8\") as train_file:\n",
|
||||||
|
" x_train = [x.strip().lower() for x in train_file.readlines()]\n",
|
||||||
|
"\n",
|
||||||
|
"with open(\"train/expected.tsv\", \"r\", encoding=\"utf-8\") as train_file:\n",
|
||||||
|
" y_train = np.array([int(x.strip()) for x in train_file.readlines()])\n",
|
||||||
|
"\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 7,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"word2vec = downloader.load(\"glove-twitter-200\")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 8,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"x_train_w2v = [np.mean([word2vec[word.lower()] for word in doc.split() if word.lower() in word2vec]\n",
|
||||||
|
" or [np.zeros(FEATURES)], axis=0) for doc in x_train]"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 24,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"model = NeuralNetworkModel()\n",
|
||||||
|
"\n",
|
||||||
|
"criterion = torch.nn.BCELoss()\n",
|
||||||
|
"optimizer = torch.optim.ASGD(model.parameters(), lr=0.05)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 25,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"output_type": "stream",
|
||||||
|
"name": "stdout",
|
||||||
|
"text": [
|
||||||
|
"0\n",
|
||||||
|
"0.5444966091123856 0.7128072132302411\n",
|
||||||
|
"1\n",
|
||||||
|
"0.5187017436751196 0.7303153888921503\n",
|
||||||
|
"2\n",
|
||||||
|
"0.5117590330604093 0.7348944502191112\n",
|
||||||
|
"3\n",
|
||||||
|
"0.5075270808198805 0.7376916143781145\n",
|
||||||
|
"4\n",
|
||||||
|
"0.5043017516287736 0.7403230206610286\n",
|
||||||
|
"5\n",
|
||||||
|
"0.5016950109024928 0.7418977204838748\n",
|
||||||
|
"6\n",
|
||||||
|
"0.49942716640870777 0.7432134236253319\n",
|
||||||
|
"7\n",
|
||||||
|
"0.49766424133924386 0.7448606425189672\n",
|
||||||
|
"8\n",
|
||||||
|
"0.49617289846816215 0.745534033890579\n",
|
||||||
|
"9\n",
|
||||||
|
"0.49471875689137873 0.7467116054686286\n"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"for epoch in range(EPOCHS):\n",
|
||||||
|
" print(epoch)\n",
|
||||||
|
" loss_score = 0\n",
|
||||||
|
" acc_score = 0\n",
|
||||||
|
" items_total = 0\n",
|
||||||
|
" for i in range(0, y_train.shape[0], BATCH_SIZE):\n",
|
||||||
|
" x = x_train_w2v[i:i+BATCH_SIZE]\n",
|
||||||
|
" x = torch.tensor(np.array(x).astype(np.float32))\n",
|
||||||
|
" y = y_train[i:i+BATCH_SIZE]\n",
|
||||||
|
" y = torch.tensor(y.astype(np.float32)).reshape(-1, 1)\n",
|
||||||
|
" y_pred = model(x)\n",
|
||||||
|
" acc_score += torch.sum((y_pred > 0.5) == y).item()\n",
|
||||||
|
" items_total += y.shape[0]\n",
|
||||||
|
"\n",
|
||||||
|
" optimizer.zero_grad()\n",
|
||||||
|
" loss = criterion(y_pred, y)\n",
|
||||||
|
" loss.backward()\n",
|
||||||
|
" optimizer.step()\n",
|
||||||
|
"\n",
|
||||||
|
" loss_score += loss.item() * y.shape[0]\n",
|
||||||
|
" \n",
|
||||||
|
" print((loss_score / items_total), (acc_score / items_total))"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 26,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# Read dev files\n",
|
||||||
|
"with lzma.open(\"dev-0/in.tsv.xz\", \"rt\", encoding=\"utf-8\") as dev_file:\n",
|
||||||
|
" x_dev = [x.strip().lower() for x in dev_file.readlines()]"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 39,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"x_dev_w2v = [np.mean([word2vec[word.lower()] for word in doc.split() if word.lower() in word2vec]\n",
|
||||||
|
" or [np.zeros(FEATURES)], axis=0) for doc in x_dev]"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 40,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"y_dev = []\n",
|
||||||
|
"with torch.no_grad():\n",
|
||||||
|
" for i in range(0, len(x_dev_w2v), BATCH_SIZE):\n",
|
||||||
|
" x = x_dev_w2v[i:i+BATCH_SIZE]\n",
|
||||||
|
" x = torch.tensor(np.array(x).astype(np.float32))\n",
|
||||||
|
" \n",
|
||||||
|
" outputs = model(x)\n",
|
||||||
|
" \n",
|
||||||
|
" y = (outputs > 0.5)\n",
|
||||||
|
" y_dev.extend(y)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 42,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"with open(\"dev-0/out.tsv\", \"w\", encoding=\"utf-8\") as f:\n",
|
||||||
|
" f.writelines([str(y.int()[0].item()) + \"\\n\" for y in y_dev])"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 43,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# Read test files\n",
|
||||||
|
"with lzma.open(\"test-A/in.tsv.xz\", \"rt\", encoding=\"utf-8\") as test_file:\n",
|
||||||
|
" x_test = [x.strip().lower() for x in test_file.readlines()]"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 44,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"x_test_w2v = [np.mean([word2vec[word.lower()] for word in doc.split() if word.lower() in word2vec]\n",
|
||||||
|
" or [np.zeros(FEATURES)], axis=0) for doc in x_test]"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 45,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"y_test = []\n",
|
||||||
|
"with torch.no_grad():\n",
|
||||||
|
" for i in range(0, len(x_test_w2v), BATCH_SIZE):\n",
|
||||||
|
" x = x_test_w2v[i:i+BATCH_SIZE]\n",
|
||||||
|
" x = torch.tensor(np.array(x).astype(np.float32))\n",
|
||||||
|
" \n",
|
||||||
|
" outputs = model(x)\n",
|
||||||
|
" \n",
|
||||||
|
" y = (outputs > 0.5)\n",
|
||||||
|
" y_test.extend(y)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 46,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"with open(\"test-A/out.tsv\", \"w\", encoding=\"utf-8\") as f:\n",
|
||||||
|
" f.writelines([str(y.int()[0].item()) + \"\\n\" for y in y_test])"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
34
simple_transformers.py
Normal file
34
simple_transformers.py
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
from simpletransformers.classification import ClassificationModel, ClassificationArgs
|
||||||
|
import pandas as pd
|
||||||
|
import logging
|
||||||
|
import torch
|
||||||
|
|
||||||
|
logging.basicConfig(level=logging.INFO)
|
||||||
|
transformer_logger = logging.getLogger("transformers")
|
||||||
|
transformer_logger.setLevel(logging.WARNING)
|
||||||
|
|
||||||
|
|
||||||
|
train_df = pd.read_csv("train/train.tsv", sep="\t")
|
||||||
|
print(train_df)
|
||||||
|
|
||||||
|
dev_df = pd.read_csv("dev-0/dev.tsv", sep="\t")
|
||||||
|
print(dev_df)
|
||||||
|
|
||||||
|
|
||||||
|
args = {
|
||||||
|
'train_batch_size': 32,
|
||||||
|
'learning_rate': 2e-5,
|
||||||
|
'evaluate_during_training': True,
|
||||||
|
'save_steps': 1000,
|
||||||
|
'evaluate_during_training_steps': 1000,
|
||||||
|
'evaluate_during_training_verbose': True,
|
||||||
|
'overwrite_output_dir': True,
|
||||||
|
'save_eval_checkpoints': True,
|
||||||
|
'use_early_stopping': True,
|
||||||
|
'early_stopping_patience': 5,
|
||||||
|
'num_train_epochs': 3
|
||||||
|
}
|
||||||
|
|
||||||
|
model = ClassificationModel("deberta", "microsoft/deberta-base", use_cuda=True, args=args)
|
||||||
|
|
||||||
|
model.train_model(train_df, eval_df=dev_df)
|
19
simple_transformers_eval.py
Normal file
19
simple_transformers_eval.py
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
from simpletransformers.classification import ClassificationModel
|
||||||
|
import pandas as pd
|
||||||
|
|
||||||
|
model = ClassificationModel("deberta", "outputs/best_model")
|
||||||
|
|
||||||
|
dev_df = pd.read_csv("dev-0/dev.tsv", sep="\t")
|
||||||
|
|
||||||
|
result, model_outputs, wrong_predictions = model.eval_model(dev_df)
|
||||||
|
print(result)
|
||||||
|
tp = result["tp"]
|
||||||
|
fp = result["fp"]
|
||||||
|
tn = result["tn"]
|
||||||
|
fn = result["fn"]
|
||||||
|
print(f"Accuracy: {(tp+tn)/(tp+fp+tn+fn)}")
|
||||||
|
precision = tp/(tp+fp)
|
||||||
|
print(f"Precision: {precision}")
|
||||||
|
recall = tp/(tp+fn)
|
||||||
|
print(f"Recall: {recall}")
|
||||||
|
print(f"F1-score: {2*precision*recall/(precision+recall)}")
|
5152
test-A/out.tsv
Normal file
5152
test-A/out.tsv
Normal file
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue
Block a user