s478839
This commit is contained in:
parent
97421a97ee
commit
565e5dfb90
123
.ipynb_checkpoints/run-checkpoint.ipynb
Normal file
123
.ipynb_checkpoints/run-checkpoint.ipynb
Normal file
@ -0,0 +1,123 @@
|
|||||||
|
{
|
||||||
|
"cells": [
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 6,
|
||||||
|
"id": "4206eb3f",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"import vowpalwabbit\n",
|
||||||
|
"import pandas as pd\n",
|
||||||
|
"import re"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 7,
|
||||||
|
"id": "fde46276",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"def prediction(path_in, path_out, model, categories):\n",
|
||||||
|
" data = pd.read_csv(path_in, header=None, sep='\\t')\n",
|
||||||
|
" data = data.drop(1, axis=1)\n",
|
||||||
|
" data.columns = ['year', 'text']\n",
|
||||||
|
"\n",
|
||||||
|
" data['train_input'] = data.apply(lambda row: to_vowpalwabbit(row, categories), axis=1)\n",
|
||||||
|
"\n",
|
||||||
|
" with open(path_out, 'w', encoding='utf-8') as file:\n",
|
||||||
|
" for example in data['train_input']:\n",
|
||||||
|
" predicted = model.predict(example)\n",
|
||||||
|
" text_predicted = dict((value, key) for key, value in map_dict.items()).get(predicted)\n",
|
||||||
|
" file.write(str(text_predicted) + '\\n')\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 8,
|
||||||
|
"id": "27e69709",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"def to_vowpalwabbit(row, categories):\n",
|
||||||
|
" text = row['text'].replace('\\n', ' ').lower().strip()\n",
|
||||||
|
" text = re.sub(\"[^a-zA-Z -']\", '', text)\n",
|
||||||
|
" text = re.sub(\" +\", ' ', text)\n",
|
||||||
|
" year = row['year']\n",
|
||||||
|
" try:\n",
|
||||||
|
" category = categories[row['category']]\n",
|
||||||
|
" except KeyError:\n",
|
||||||
|
" category = ''\n",
|
||||||
|
"\n",
|
||||||
|
" vw = f\"{category} | year:{year} text:{text}\\n\"\n",
|
||||||
|
"\n",
|
||||||
|
" return vw"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 9,
|
||||||
|
"id": "c406b425",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"name": "stdout",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"{'news': 1, 'sport': 2, 'opinion': 3, 'business': 4, 'culture': 5, 'lifestyle': 6, 'removed': 7}\n"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"x_train = pd.read_csv('train/in.tsv', header=None, sep='\\t')\n",
|
||||||
|
"x_train = x_train.drop(1, axis=1)\n",
|
||||||
|
"x_train.columns = ['year', 'text']\n",
|
||||||
|
"\n",
|
||||||
|
"y_train = pd.read_csv('train/expected.tsv', header=None, sep='\\t')\n",
|
||||||
|
"y_train.columns = ['category']\n",
|
||||||
|
"\n",
|
||||||
|
"data = pd.concat([x_train, y_train], axis=1)\n",
|
||||||
|
"\n",
|
||||||
|
"categories = {}\n",
|
||||||
|
"\n",
|
||||||
|
"for i, x in enumerate(data['category'].unique()):\n",
|
||||||
|
" categories[x] = i+1\n",
|
||||||
|
"\n",
|
||||||
|
"print(categories)\n",
|
||||||
|
" \n",
|
||||||
|
"data['train_input'] = data.apply(lambda row: to_vowpalwabbit(row, categories), axis=1)\n",
|
||||||
|
"\n",
|
||||||
|
"model = vowpalwabbit.Workspace('--oaa 3 --quiet')\n",
|
||||||
|
"\n",
|
||||||
|
"for example in data['train_input']:\n",
|
||||||
|
" model.learn(example)\n",
|
||||||
|
"\n",
|
||||||
|
"prediction('dev-0/in.tsv', 'dev-0/out.tsv', model, categories)\n",
|
||||||
|
"prediction('test-A/in.tsv', 'test-A/out.tsv', model, categories)\n",
|
||||||
|
"prediction('test-B/in.tsv', 'test-B/out.tsv', model, categories)"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"kernelspec": {
|
||||||
|
"display_name": "Python 3 (ipykernel)",
|
||||||
|
"language": "python",
|
||||||
|
"name": "python3"
|
||||||
|
},
|
||||||
|
"language_info": {
|
||||||
|
"codemirror_mode": {
|
||||||
|
"name": "ipython",
|
||||||
|
"version": 3
|
||||||
|
},
|
||||||
|
"file_extension": ".py",
|
||||||
|
"mimetype": "text/x-python",
|
||||||
|
"name": "python",
|
||||||
|
"nbconvert_exporter": "python",
|
||||||
|
"pygments_lexer": "ipython3",
|
||||||
|
"version": "3.9.7"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nbformat": 4,
|
||||||
|
"nbformat_minor": 5
|
||||||
|
}
|
149134
dev-0/.ipynb_checkpoints/out-checkpoint.tsv
Normal file
149134
dev-0/.ipynb_checkpoints/out-checkpoint.tsv
Normal file
File diff suppressed because it is too large
Load Diff
149134
dev-0/out.tsv
Normal file
149134
dev-0/out.tsv
Normal file
File diff suppressed because it is too large
Load Diff
123
run.ipynb
Normal file
123
run.ipynb
Normal file
@ -0,0 +1,123 @@
|
|||||||
|
{
|
||||||
|
"cells": [
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 6,
|
||||||
|
"id": "4206eb3f",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"import vowpalwabbit\n",
|
||||||
|
"import pandas as pd\n",
|
||||||
|
"import re"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 7,
|
||||||
|
"id": "fde46276",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"def prediction(path_in, path_out, model, categories):\n",
|
||||||
|
" data = pd.read_csv(path_in, header=None, sep='\\t')\n",
|
||||||
|
" data = data.drop(1, axis=1)\n",
|
||||||
|
" data.columns = ['year', 'text']\n",
|
||||||
|
"\n",
|
||||||
|
" data['train_input'] = data.apply(lambda row: to_vowpalwabbit(row, categories), axis=1)\n",
|
||||||
|
"\n",
|
||||||
|
" with open(path_out, 'w', encoding='utf-8') as file:\n",
|
||||||
|
" for example in data['train_input']:\n",
|
||||||
|
" predicted = model.predict(example)\n",
|
||||||
|
" text_predicted = dict((value, key) for key, value in map_dict.items()).get(predicted)\n",
|
||||||
|
" file.write(str(text_predicted) + '\\n')\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 8,
|
||||||
|
"id": "27e69709",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"def to_vowpalwabbit(row, categories):\n",
|
||||||
|
" text = row['text'].replace('\\n', ' ').lower().strip()\n",
|
||||||
|
" text = re.sub(\"[^a-zA-Z -']\", '', text)\n",
|
||||||
|
" text = re.sub(\" +\", ' ', text)\n",
|
||||||
|
" year = row['year']\n",
|
||||||
|
" try:\n",
|
||||||
|
" category = categories[row['category']]\n",
|
||||||
|
" except KeyError:\n",
|
||||||
|
" category = ''\n",
|
||||||
|
"\n",
|
||||||
|
" vw = f\"{category} | year:{year} text:{text}\\n\"\n",
|
||||||
|
"\n",
|
||||||
|
" return vw"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 9,
|
||||||
|
"id": "c406b425",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"name": "stdout",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"{'news': 1, 'sport': 2, 'opinion': 3, 'business': 4, 'culture': 5, 'lifestyle': 6, 'removed': 7}\n"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"x_train = pd.read_csv('train/in.tsv', header=None, sep='\\t')\n",
|
||||||
|
"x_train = x_train.drop(1, axis=1)\n",
|
||||||
|
"x_train.columns = ['year', 'text']\n",
|
||||||
|
"\n",
|
||||||
|
"y_train = pd.read_csv('train/expected.tsv', header=None, sep='\\t')\n",
|
||||||
|
"y_train.columns = ['category']\n",
|
||||||
|
"\n",
|
||||||
|
"data = pd.concat([x_train, y_train], axis=1)\n",
|
||||||
|
"\n",
|
||||||
|
"categories = {}\n",
|
||||||
|
"\n",
|
||||||
|
"for i, x in enumerate(data['category'].unique()):\n",
|
||||||
|
" categories[x] = i+1\n",
|
||||||
|
"\n",
|
||||||
|
"print(categories)\n",
|
||||||
|
" \n",
|
||||||
|
"data['train_input'] = data.apply(lambda row: to_vowpalwabbit(row, categories), axis=1)\n",
|
||||||
|
"\n",
|
||||||
|
"model = vowpalwabbit.Workspace('--oaa 3 --quiet')\n",
|
||||||
|
"\n",
|
||||||
|
"for example in data['train_input']:\n",
|
||||||
|
" model.learn(example)\n",
|
||||||
|
"\n",
|
||||||
|
"prediction('dev-0/in.tsv', 'dev-0/out.tsv', model, categories)\n",
|
||||||
|
"prediction('test-A/in.tsv', 'test-A/out.tsv', model, categories)\n",
|
||||||
|
"prediction('test-B/in.tsv', 'test-B/out.tsv', model, categories)"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"kernelspec": {
|
||||||
|
"display_name": "Python 3 (ipykernel)",
|
||||||
|
"language": "python",
|
||||||
|
"name": "python3"
|
||||||
|
},
|
||||||
|
"language_info": {
|
||||||
|
"codemirror_mode": {
|
||||||
|
"name": "ipython",
|
||||||
|
"version": 3
|
||||||
|
},
|
||||||
|
"file_extension": ".py",
|
||||||
|
"mimetype": "text/x-python",
|
||||||
|
"name": "python",
|
||||||
|
"nbconvert_exporter": "python",
|
||||||
|
"pygments_lexer": "ipython3",
|
||||||
|
"version": "3.9.7"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nbformat": 4,
|
||||||
|
"nbformat_minor": 5
|
||||||
|
}
|
76
run.py
Normal file
76
run.py
Normal file
@ -0,0 +1,76 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# coding: utf-8
|
||||||
|
|
||||||
|
# In[6]:
|
||||||
|
|
||||||
|
|
||||||
|
import vowpalwabbit
|
||||||
|
import pandas as pd
|
||||||
|
import re
|
||||||
|
|
||||||
|
|
||||||
|
# In[7]:
|
||||||
|
|
||||||
|
|
||||||
|
def prediction(path_in, path_out, model, categories):
|
||||||
|
data = pd.read_csv(path_in, header=None, sep='\t')
|
||||||
|
data = data.drop(1, axis=1)
|
||||||
|
data.columns = ['year', 'text']
|
||||||
|
|
||||||
|
data['train_input'] = data.apply(lambda row: to_vowpalwabbit(row, categories), axis=1)
|
||||||
|
|
||||||
|
with open(path_out, 'w', encoding='utf-8') as file:
|
||||||
|
for example in data['train_input']:
|
||||||
|
predicted = model.predict(example)
|
||||||
|
text_predicted = dict((value, key) for key, value in map_dict.items()).get(predicted)
|
||||||
|
file.write(str(text_predicted) + '\n')
|
||||||
|
|
||||||
|
|
||||||
|
# In[8]:
|
||||||
|
|
||||||
|
|
||||||
|
def to_vowpalwabbit(row, categories):
|
||||||
|
text = row['text'].replace('\n', ' ').lower().strip()
|
||||||
|
text = re.sub("[^a-zA-Z -']", '', text)
|
||||||
|
text = re.sub(" +", ' ', text)
|
||||||
|
year = row['year']
|
||||||
|
try:
|
||||||
|
category = categories[row['category']]
|
||||||
|
except KeyError:
|
||||||
|
category = ''
|
||||||
|
|
||||||
|
vw = f"{category} | year:{year} text:{text}\n"
|
||||||
|
|
||||||
|
return vw
|
||||||
|
|
||||||
|
|
||||||
|
# In[9]:
|
||||||
|
|
||||||
|
|
||||||
|
x_train = pd.read_csv('train/in.tsv', header=None, sep='\t')
|
||||||
|
x_train = x_train.drop(1, axis=1)
|
||||||
|
x_train.columns = ['year', 'text']
|
||||||
|
|
||||||
|
y_train = pd.read_csv('train/expected.tsv', header=None, sep='\t')
|
||||||
|
y_train.columns = ['category']
|
||||||
|
|
||||||
|
data = pd.concat([x_train, y_train], axis=1)
|
||||||
|
|
||||||
|
categories = {}
|
||||||
|
|
||||||
|
for i, x in enumerate(data['category'].unique()):
|
||||||
|
categories[x] = i+1
|
||||||
|
|
||||||
|
print(categories)
|
||||||
|
|
||||||
|
data['train_input'] = data.apply(lambda row: to_vowpalwabbit(row, categories), axis=1)
|
||||||
|
|
||||||
|
model = vowpalwabbit.Workspace('--oaa 3 --quiet')
|
||||||
|
|
||||||
|
for example in data['train_input']:
|
||||||
|
model.learn(example)
|
||||||
|
|
||||||
|
prediction('dev-0/in.tsv', 'dev-0/out.tsv', model, categories)
|
||||||
|
prediction('test-A/in.tsv', 'test-A/out.tsv', model, categories)
|
||||||
|
prediction('test-B/in.tsv', 'test-B/out.tsv', model, categories)
|
||||||
|
|
148308
test-A/out.tsv
Normal file
148308
test-A/out.tsv
Normal file
File diff suppressed because it is too large
Load Diff
79119
test-B/out.tsv
Normal file
79119
test-B/out.tsv
Normal file
File diff suppressed because it is too large
Load Diff
1186898
train/expected.tsv
Normal file
1186898
train/expected.tsv
Normal file
File diff suppressed because it is too large
Load Diff
1186898
train/in.tsv
Normal file
1186898
train/in.tsv
Normal file
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue
Block a user