final
This commit is contained in:
parent
97421a97ee
commit
5b4c7405cc
6
.ipynb_checkpoints/run-checkpoint.ipynb
Normal file
6
.ipynb_checkpoints/run-checkpoint.ipynb
Normal file
@ -0,0 +1,6 @@
|
||||
{
|
||||
"cells": [],
|
||||
"metadata": {},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
88
.ipynb_checkpoints/run-checkpoint.py
Normal file
88
.ipynb_checkpoints/run-checkpoint.py
Normal file
@ -0,0 +1,88 @@
|
||||
import vowpalwabbit
|
||||
import pandas as pd
|
||||
import re
|
||||
|
||||
|
||||
x_train = pd.read_csv('train/in.tsv', header=None, sep='\t')
|
||||
y_train = pd.read_csv('train/expected.tsv', header=None, sep='\t')
|
||||
|
||||
x_train = x_train.drop(1, axis=1)
|
||||
x_train.columns = ['year', 'text']
|
||||
y_train.columns = ['category']
|
||||
|
||||
data = pd.concat([x_train, y_train], axis=1)
|
||||
|
||||
|
||||
model = vowpalwabbit.Workspace('--oaa 7')
|
||||
|
||||
map_dict = {}
|
||||
|
||||
for i, x in enumerate(data['category'].unique()):
|
||||
map_dict[x] = i+1
|
||||
|
||||
data['train_input'] = data.apply(lambda row: to_vw_format(row, map_dict), axis=1)
|
||||
|
||||
|
||||
for example in data['train_input']:
|
||||
model.learn(example)
|
||||
|
||||
|
||||
|
||||
def to_vw_format(row, map_dict):
|
||||
text = row['text'].replace('\n', ' ').lower().strip()
|
||||
text = re.sub("[^a-zA-Z -']", '', text)
|
||||
year = row['year']
|
||||
try:
|
||||
category = map_dict[row['category']]
|
||||
except KeyError:
|
||||
category = ''
|
||||
|
||||
vw_input = f"{category} | year:{year} text:{text}\n"
|
||||
|
||||
return vw_input
|
||||
|
||||
|
||||
|
||||
### Read data
|
||||
|
||||
data_dev = pd.read_csv('dev-0/in.tsv', header=None, sep='\t')
|
||||
data_dev = data_dev.drop(1, axis=1)
|
||||
data_dev.columns = ['year', 'text']
|
||||
data_dev['train_input'] = data_dev.apply(lambda row: to_vw_format(row, map_dict), axis=1)
|
||||
|
||||
|
||||
data_A = pd.read_csv('test-A/in.tsv', header=None, sep='\t')
|
||||
data_A = data_A.drop(1, axis=1)
|
||||
data_A.columns = ['year', 'text']
|
||||
data_A['train_input'] = data_A.apply(lambda row: to_vw_format(row, map_dict), axis=1)
|
||||
|
||||
data_B = pd.read_csv('test-B/in.tsv', header=None, sep='\t')
|
||||
data_B = data_B.drop(1, axis=1)
|
||||
data_B.columns = ['year', 'text']
|
||||
data_B['train_input'] = data_B.apply(lambda row: to_vw_format(row, map_dict), axis=1)
|
||||
|
||||
|
||||
|
||||
### Write predictions
|
||||
|
||||
with open("dev-0/out.tsv", 'w', encoding='utf-8') as file:
|
||||
for test_example in data_dev['train_input']:
|
||||
prediction_dev = model.predict(test_example)
|
||||
text_prediction_dev = dict((value, key) for key, value in map_dict.items()).get(prediction_dev)
|
||||
file.write(str(text_prediction_dev) + '\n')
|
||||
|
||||
|
||||
with open("test-A/out.tsv", 'w', encoding='utf-8') as file:
|
||||
for test_example in data_A['train_input']:
|
||||
prediction_A = model.predict(test_example)
|
||||
text_prediction_A = dict((value, key) for key, value in map_dict.items()).get(prediction_A)
|
||||
file.write(str(text_prediction_A) + '\n')
|
||||
|
||||
|
||||
with open("test-B/out.tsv", 'w', encoding='utf-8') as file:
|
||||
for test_example in data_B['train_input']:
|
||||
prediction_B = model.predict(test_example)
|
||||
text_prediction_B = dict((value, key) for key, value in map_dict.items()).get(prediction_B)
|
||||
file.write(str(text_prediction_B) + '\n')
|
||||
|
||||
|
149134
dev-0/out.tsv
Normal file
149134
dev-0/out.tsv
Normal file
File diff suppressed because it is too large
Load Diff
122
run.ipynb
Normal file
122
run.ipynb
Normal file
@ -0,0 +1,122 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "26d23c74-6b2a-469d-9b26-3cc3f2ab32ba",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import vowpalwabbit\n",
|
||||
"import pandas as pd\n",
|
||||
"import re\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"x_train = pd.read_csv('train/in.tsv', header=None, sep='\\t')\n",
|
||||
"y_train = pd.read_csv('train/expected.tsv', header=None, sep='\\t')\n",
|
||||
"\n",
|
||||
"x_train = x_train.drop(1, axis=1)\n",
|
||||
"x_train.columns = ['year', 'text']\n",
|
||||
"y_train.columns = ['category']\n",
|
||||
"\n",
|
||||
"data = pd.concat([x_train, y_train], axis=1)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"model = vowpalwabbit.Workspace('--oaa 7')\n",
|
||||
"\n",
|
||||
"map_dict = {}\n",
|
||||
"\n",
|
||||
"for i, x in enumerate(data['category'].unique()):\n",
|
||||
" map_dict[x] = i+1 \n",
|
||||
" \n",
|
||||
"data['train_input'] = data.apply(lambda row: to_vw_format(row, map_dict), axis=1)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"for example in data['train_input']:\n",
|
||||
" model.learn(example)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def to_vw_format(row, map_dict):\n",
|
||||
" text = row['text'].replace('\\n', ' ').lower().strip()\n",
|
||||
" text = re.sub(\"[^a-zA-Z -']\", '', text)\n",
|
||||
" year = row['year']\n",
|
||||
" try:\n",
|
||||
" category = map_dict[row['category']]\n",
|
||||
" except KeyError:\n",
|
||||
" category = ''\n",
|
||||
"\n",
|
||||
" vw_input = f\"{category} | year:{year} text:{text}\\n\"\n",
|
||||
"\n",
|
||||
" return vw_input\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"### Read data \n",
|
||||
"\n",
|
||||
"data_dev = pd.read_csv('dev-0/in.tsv', header=None, sep='\\t')\n",
|
||||
"data_dev = data_dev.drop(1, axis=1)\n",
|
||||
"data_dev.columns = ['year', 'text']\n",
|
||||
"data_dev['train_input'] = data_dev.apply(lambda row: to_vw_format(row, map_dict), axis=1)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"data_A = pd.read_csv('test-A/in.tsv', header=None, sep='\\t')\n",
|
||||
"data_A = data_A.drop(1, axis=1)\n",
|
||||
"data_A.columns = ['year', 'text']\n",
|
||||
"data_A['train_input'] = data_A.apply(lambda row: to_vw_format(row, map_dict), axis=1)\n",
|
||||
"\n",
|
||||
"data_B = pd.read_csv('test-B/in.tsv', header=None, sep='\\t')\n",
|
||||
"data_B = data_B.drop(1, axis=1)\n",
|
||||
"data_B.columns = ['year', 'text']\n",
|
||||
"data_B['train_input'] = data_B.apply(lambda row: to_vw_format(row, map_dict), axis=1)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"### Write predictions \n",
|
||||
"\n",
|
||||
"with open(\"dev-0/out.tsv\", 'w', encoding='utf-8') as file:\n",
|
||||
" for test_example in data_dev['train_input']:\n",
|
||||
" prediction_dev = model.predict(test_example)\n",
|
||||
" text_prediction_dev = dict((value, key) for key, value in map_dict.items()).get(prediction_dev)\n",
|
||||
" file.write(str(text_prediction_dev) + '\\n')\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"with open(\"test-A/out.tsv\", 'w', encoding='utf-8') as file:\n",
|
||||
" for test_example in data_A['train_input']:\n",
|
||||
" prediction_A = model.predict(test_example)\n",
|
||||
" text_prediction_A = dict((value, key) for key, value in map_dict.items()).get(prediction_A)\n",
|
||||
" file.write(str(text_prediction_A) + '\\n')\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"with open(\"test-B/out.tsv\", 'w', encoding='utf-8') as file:\n",
|
||||
" for test_example in data_B['train_input']:\n",
|
||||
" prediction_B = model.predict(test_example)\n",
|
||||
" text_prediction_B = dict((value, key) for key, value in map_dict.items()).get(prediction_B)\n",
|
||||
" file.write(str(text_prediction_B) + '\\n')\n",
|
||||
"\n",
|
||||
"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.7"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
88
run.py
Normal file
88
run.py
Normal file
@ -0,0 +1,88 @@
|
||||
import vowpalwabbit
|
||||
import pandas as pd
|
||||
import re
|
||||
|
||||
|
||||
x_train = pd.read_csv('train/in.tsv', header=None, sep='\t')
|
||||
y_train = pd.read_csv('train/expected.tsv', header=None, sep='\t')
|
||||
|
||||
x_train = x_train.drop(1, axis=1)
|
||||
x_train.columns = ['year', 'text']
|
||||
y_train.columns = ['category']
|
||||
|
||||
data = pd.concat([x_train, y_train], axis=1)
|
||||
|
||||
|
||||
model = vowpalwabbit.Workspace('--oaa 7')
|
||||
|
||||
map_dict = {}
|
||||
|
||||
for i, x in enumerate(data['category'].unique()):
|
||||
map_dict[x] = i+1
|
||||
|
||||
data['train_input'] = data.apply(lambda row: to_vw_format(row, map_dict), axis=1)
|
||||
|
||||
|
||||
for example in data['train_input']:
|
||||
model.learn(example)
|
||||
|
||||
|
||||
|
||||
def to_vw_format(row, map_dict):
|
||||
text = row['text'].replace('\n', ' ').lower().strip()
|
||||
text = re.sub("[^a-zA-Z -']", '', text)
|
||||
year = row['year']
|
||||
try:
|
||||
category = map_dict[row['category']]
|
||||
except KeyError:
|
||||
category = ''
|
||||
|
||||
vw_input = f"{category} | year:{year} text:{text}\n"
|
||||
|
||||
return vw_input
|
||||
|
||||
|
||||
|
||||
### Read data
|
||||
|
||||
data_dev = pd.read_csv('dev-0/in.tsv', header=None, sep='\t')
|
||||
data_dev = data_dev.drop(1, axis=1)
|
||||
data_dev.columns = ['year', 'text']
|
||||
data_dev['train_input'] = data_dev.apply(lambda row: to_vw_format(row, map_dict), axis=1)
|
||||
|
||||
|
||||
data_A = pd.read_csv('test-A/in.tsv', header=None, sep='\t')
|
||||
data_A = data_A.drop(1, axis=1)
|
||||
data_A.columns = ['year', 'text']
|
||||
data_A['train_input'] = data_A.apply(lambda row: to_vw_format(row, map_dict), axis=1)
|
||||
|
||||
data_B = pd.read_csv('test-B/in.tsv', header=None, sep='\t')
|
||||
data_B = data_B.drop(1, axis=1)
|
||||
data_B.columns = ['year', 'text']
|
||||
data_B['train_input'] = data_B.apply(lambda row: to_vw_format(row, map_dict), axis=1)
|
||||
|
||||
|
||||
|
||||
### Write predictions
|
||||
|
||||
with open("dev-0/out.tsv", 'w', encoding='utf-8') as file:
|
||||
for test_example in data_dev['train_input']:
|
||||
prediction_dev = model.predict(test_example)
|
||||
text_prediction_dev = dict((value, key) for key, value in map_dict.items()).get(prediction_dev)
|
||||
file.write(str(text_prediction_dev) + '\n')
|
||||
|
||||
|
||||
with open("test-A/out.tsv", 'w', encoding='utf-8') as file:
|
||||
for test_example in data_A['train_input']:
|
||||
prediction_A = model.predict(test_example)
|
||||
text_prediction_A = dict((value, key) for key, value in map_dict.items()).get(prediction_A)
|
||||
file.write(str(text_prediction_A) + '\n')
|
||||
|
||||
|
||||
with open("test-B/out.tsv", 'w', encoding='utf-8') as file:
|
||||
for test_example in data_B['train_input']:
|
||||
prediction_B = model.predict(test_example)
|
||||
text_prediction_B = dict((value, key) for key, value in map_dict.items()).get(prediction_B)
|
||||
file.write(str(text_prediction_B) + '\n')
|
||||
|
||||
|
148308
test-A/out.tsv
Normal file
148308
test-A/out.tsv
Normal file
File diff suppressed because it is too large
Load Diff
79119
test-B/out.tsv
Normal file
79119
test-B/out.tsv
Normal file
File diff suppressed because it is too large
Load Diff
1186898
train/expected.tsv
Normal file
1186898
train/expected.tsv
Normal file
File diff suppressed because it is too large
Load Diff
1186898
train/in.tsv
Normal file
1186898
train/in.tsv
Normal file
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue
Block a user