This commit is contained in:
ulaniuk 2022-05-05 19:32:53 +02:00
commit da1a89450a
6 changed files with 11166 additions and 0 deletions

25
README.md Normal file
View File

@ -0,0 +1,25 @@
Sport Texts Classification Challenge - Ball
======================
Guess whether the sport is connected to the ball for a Polish article. Evaluation metrics: Accuracy, Likelihood.
Classes
-------
* `1` — ball
* `0` — no-ball
Directory structure
-------------------
* `README.md` — this file
* `config.txt` — configuration file
* `train/` — directory with training data
* `train/train.tsv` — sample train set
* `dev-0/` — directory with dev (test) data
* `dev-0/in.tsv` — input data for the dev set
* `dev-0/expected.tsv` — expected (reference) data for the dev set
* `test-A` — directory with test data
* `test-A/in.tsv` — input data for the test set
* `test-A/expected.tsv` — expected (reference) data for the test set

1
config.txt Normal file
View File

@ -0,0 +1 @@
--metric Likelihood --metric Accuracy --precision 5

5452
dev-0/out.tsv Normal file

File diff suppressed because it is too large Load Diff

182
run.ipynb Normal file
View File

@ -0,0 +1,182 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"stworzyć klasyfikator bazujący na naiwnym bayessie (może być gotowa biblioteka), może też korzystać z gotowych implementacji tfidf\n",
"\n",
"stworzyć predykcje w plikach dev-0/out.tsv oraz test-A/out.tsv\n",
"\n",
"wynik accuracy sprawdzony za pomocą narzędzia geval (patrz poprzednie zadanie) powinien wynosić conajmniej 0.67\n",
"\n",
"proszę umieścić predykcję oraz skrypty generujące (w postaci tekstowej a nie jupyter) w repo, zadanie oddajemy w gonito, termin 10.05, 40 punktów\n",
"\n",
"Output label is the probability of a paranormal subreddit."
]
},
{
"cell_type": "code",
"execution_count": 135,
"metadata": {},
"outputs": [],
"source": [
"import pandas as pd\n",
"import numpy as np\n",
"\n",
"from sklearn.feature_extraction.text import TfidfVectorizer\n",
"from sklearn.naive_bayes import MultinomialNB\n",
"from sklearn.pipeline import make_pipeline"
]
},
{
"cell_type": "code",
"execution_count": 136,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"C:\\Users\\micha\\AppData\\Local\\Temp\\ipykernel_1208\\74507919.py:1: FutureWarning: The error_bad_lines argument has been deprecated and will be removed in a future version. Use on_bad_lines in the future.\n",
"\n",
"\n",
" data = pd.read_csv('train/train.tsv', sep='\\t', header=None, error_bad_lines=False)\n",
"b'Skipping line 25706: expected 2 fields, saw 3\\nSkipping line 58881: expected 2 fields, saw 3\\nSkipping line 73761: expected 2 fields, saw 3\\n'\n"
]
}
],
"source": [
"data = pd.read_csv('train/train.tsv', sep='\\t', header=None, error_bad_lines=False)"
]
},
{
"cell_type": "code",
"execution_count": 137,
"metadata": {},
"outputs": [],
"source": [
"X = data[1]\n",
"\n",
"with open('dev-0/in.tsv', 'r', encoding='utf8') as f:\n",
" Xdev = f.readlines()\n",
"Xdev = pd.Series(Xdev)\n",
"\n",
"with open('test-A/in.tsv', 'r', encoding='utf8') as f:\n",
" Xtest = f.readlines()\n",
"Xtest = pd.Series(Xtest)"
]
},
{
"cell_type": "code",
"execution_count": 138,
"metadata": {},
"outputs": [],
"source": [
"y = data[0].astype('string')\n",
"\n",
"ydev = pd.read_csv('dev-0/expected.tsv', sep='\\t', header=None)\n",
"ydev = ydev.squeeze()"
]
},
{
"cell_type": "code",
"execution_count": 139,
"metadata": {},
"outputs": [],
"source": [
"model = make_pipeline(TfidfVectorizer(), MultinomialNB())"
]
},
{
"cell_type": "code",
"execution_count": 140,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"Pipeline(steps=[('tfidfvectorizer', TfidfVectorizer()),\n",
" ('multinomialnb', MultinomialNB())])"
]
},
"execution_count": 140,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"model.fit(X, y)"
]
},
{
"cell_type": "code",
"execution_count": 141,
"metadata": {},
"outputs": [],
"source": [
"predictions_dev0 = model.predict(Xdev)\n",
"predictions_dev0 = pd.Series(predictions_dev0)\n",
"predictions_dev0 = predictions_dev0.astype('int')"
]
},
{
"cell_type": "code",
"execution_count": 142,
"metadata": {},
"outputs": [],
"source": [
"with open('dev-0/out.tsv', 'wt') as f:\n",
" for pred in predictions_dev0:\n",
" f.write(str(pred)+'\\n')"
]
},
{
"cell_type": "code",
"execution_count": 143,
"metadata": {},
"outputs": [],
"source": [
"predictions_testA = model.predict(Xtest)\n",
"predictions_testA = pd.Series(predictions_testA)\n",
"predictions_testA = predictions_testA.astype('int')"
]
},
{
"cell_type": "code",
"execution_count": 144,
"metadata": {},
"outputs": [],
"source": [
"with open('test-A/out.tsv', 'wt') as f:\n",
" for pred in predictions_testA:\n",
" f.write(str(pred)+'\\n')"
]
}
],
"metadata": {
"interpreter": {
"hash": "df93b008b708122b991044997d8941ca5d5845b048d54848454a010a3b0bd41a"
},
"kernelspec": {
"display_name": "Python 3.8.13 ('eks')",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.13"
},
"orig_nbformat": 4
},
"nbformat": 4,
"nbformat_minor": 2
}

59
run.py Normal file
View File

@ -0,0 +1,59 @@
import pandas as pd
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.pipeline import make_pipeline
data = pd.read_csv('train/train.tsv', sep='\t', header=None, error_bad_lines=False)
X = data[1]
with open('dev-0/in.tsv', 'r', encoding='utf8') as f:
Xdev = f.readlines()
Xdev = pd.Series(Xdev)
with open('test-A/in.tsv', 'r', encoding='utf8') as f:
Xtest = f.readlines()
Xtest = pd.Series(Xtest)
y = data[0].astype('string')
ydev = pd.read_csv('dev-0/expected.tsv', sep='\t', header=None)
ydev = ydev.squeeze()
model = make_pipeline(TfidfVectorizer(), MultinomialNB())
model.fit(X, y)
predictions_dev0 = model.predict(Xdev)
predictions_dev0 = pd.Series(predictions_dev0)
predictions_dev0 = predictions_dev0.astype('int')
with open('dev-0/out.tsv', 'wt') as f:
for pred in predictions_dev0:
f.write(str(pred)+'\n')
predictions_testA = model.predict(Xtest)
predictions_testA = pd.Series(predictions_testA)
predictions_testA = predictions_testA.astype('int')
with open('test-A/out.tsv', 'wt') as f:
for pred in predictions_testA:
f.write(str(pred)+'\n')

5447
test-A/out.tsv Normal file

File diff suppressed because it is too large Load Diff