2nd attempt

This commit is contained in:
Sebastian 2022-05-18 00:39:36 +02:00
parent f130909428
commit c3d349bf38
5 changed files with 34260 additions and 34285 deletions

View File

@ -13,7 +13,8 @@
"import sklearn\n", "import sklearn\n",
"from sklearn.feature_extraction.text import TfidfVectorizer\n", "from sklearn.feature_extraction.text import TfidfVectorizer\n",
"from sklearn.linear_model import LinearRegression\n", "from sklearn.linear_model import LinearRegression\n",
"from sklearn.metrics import mean_squared_error" "from sklearn.metrics import mean_squared_error\n",
"from sklearn.pipeline import make_pipeline"
] ]
}, },
{ {
@ -43,7 +44,7 @@
"source": [ "source": [
"train = pd.read_csv('train/train.tsv', header=None, sep='\\t', error_bad_lines=False)\n", "train = pd.read_csv('train/train.tsv', header=None, sep='\\t', error_bad_lines=False)\n",
"print(len(train))\n", "print(len(train))\n",
"train = train.head(2000)" "train = train.head(30000)"
] ]
}, },
{ {
@ -74,77 +75,34 @@
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 5, "execution_count": 5,
"id": "79099730-c5bd-4c5c-a0b0-788512d44226",
"metadata": {},
"outputs": [],
"source": [
"vectorizer = TfidfVectorizer()"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "0a1cce75-86a1-4f76-9416-e876e01699e3", "id": "0a1cce75-86a1-4f76-9416-e876e01699e3",
"metadata": {}, "metadata": {},
"outputs": [],
"source": [
"x_train = vectorizer.fit_transform(x_train)\n",
"x_dev = vectorizer.transform(x_dev)"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "ef405093-6b4c-4558-add4-40bd0ced244e",
"metadata": {},
"outputs": [],
"source": [
"model = LinearRegression()"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "4354553c-6143-43c7-8845-3b2327819481",
"metadata": {},
"outputs": [ "outputs": [
{ {
"data": { "data": {
"text/plain": [ "text/plain": [
"LinearRegression()" "Pipeline(steps=[('tfidfvectorizer', TfidfVectorizer()),\n",
" ('linearregression', LinearRegression())])"
] ]
}, },
"execution_count": 8, "execution_count": 5,
"metadata": {}, "metadata": {},
"output_type": "execute_result" "output_type": "execute_result"
} }
], ],
"source": [ "source": [
"model.fit(x_train.toarray(), y_train)" "model = make_pipeline(TfidfVectorizer(), LinearRegression())\n",
"model.fit(x_train, y_train)"
] ]
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 9, "execution_count": 6,
"id": "cc1270d5-29dc-4f03-82c1-dc03f3e4fa00", "id": "cc1270d5-29dc-4f03-82c1-dc03f3e4fa00",
"metadata": {}, "metadata": {},
"outputs": [ "outputs": [],
{
"ename": "MemoryError",
"evalue": "Unable to allocate 32.2 GiB for an array with shape (20000, 216394) and data type float64",
"output_type": "error",
"traceback": [
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[1;31mMemoryError\u001b[0m Traceback (most recent call last)",
"\u001b[1;32mC:\\Users\\SEBAST~1\\AppData\\Local\\Temp/ipykernel_17784/3948937349.py\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[1;32m----> 1\u001b[1;33m \u001b[0mdev_predicted\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mmodel\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mpredict\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mx_dev\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mtoarray\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 2\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 3\u001b[0m \u001b[1;32mwith\u001b[0m \u001b[0mopen\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m'dev-0/out.tsv'\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;34m'wt'\u001b[0m\u001b[1;33m)\u001b[0m \u001b[1;32mas\u001b[0m \u001b[0mf\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 4\u001b[0m \u001b[1;32mfor\u001b[0m \u001b[0mi\u001b[0m \u001b[1;32min\u001b[0m \u001b[0mdev_predicted\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 5\u001b[0m \u001b[0mf\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mwrite\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mstr\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mi\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m+\u001b[0m\u001b[1;34m'\\n'\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;32mD:\\Programy\\anaconda3\\lib\\site-packages\\scipy\\sparse\\compressed.py\u001b[0m in \u001b[0;36mtoarray\u001b[1;34m(self, order, out)\u001b[0m\n\u001b[0;32m 1029\u001b[0m \u001b[1;32mif\u001b[0m \u001b[0mout\u001b[0m \u001b[1;32mis\u001b[0m \u001b[1;32mNone\u001b[0m \u001b[1;32mand\u001b[0m \u001b[0morder\u001b[0m \u001b[1;32mis\u001b[0m \u001b[1;32mNone\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 1030\u001b[0m \u001b[0morder\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_swap\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m'cf'\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;36m0\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m-> 1031\u001b[1;33m \u001b[0mout\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_process_toarray_args\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0morder\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mout\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 1032\u001b[0m \u001b[1;32mif\u001b[0m \u001b[1;32mnot\u001b[0m \u001b[1;33m(\u001b[0m\u001b[0mout\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mflags\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mc_contiguous\u001b[0m \u001b[1;32mor\u001b[0m \u001b[0mout\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mflags\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mf_contiguous\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 1033\u001b[0m \u001b[1;32mraise\u001b[0m \u001b[0mValueError\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m'Output array must be C or F contiguous'\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;32mD:\\Programy\\anaconda3\\lib\\site-packages\\scipy\\sparse\\base.py\u001b[0m in \u001b[0;36m_process_toarray_args\u001b[1;34m(self, order, out)\u001b[0m\n\u001b[0;32m 1200\u001b[0m \u001b[1;32mreturn\u001b[0m \u001b[0mout\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 1201\u001b[0m \u001b[1;32melse\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m-> 1202\u001b[1;33m \u001b[1;32mreturn\u001b[0m \u001b[0mnp\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mzeros\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mshape\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mdtype\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mdtype\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0morder\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0morder\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 1203\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 1204\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;31mMemoryError\u001b[0m: Unable to allocate 32.2 GiB for an array with shape (20000, 216394) and data type float64"
]
}
],
"source": [ "source": [
"dev_predicted = model.predict(x_dev.toarray())\n", "dev_predicted = model.predict(x_dev)\n",
"\n", "\n",
"with open('dev-0/out.tsv', 'wt') as f:\n", "with open('dev-0/out.tsv', 'wt') as f:\n",
" for i in dev_predicted:\n", " for i in dev_predicted:\n",
@ -156,17 +114,25 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 7,
"id": "223de995-5e91-4254-9214-4fc871c985e9", "id": "223de995-5e91-4254-9214-4fc871c985e9",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"4261.093474053155\n"
]
}
],
"source": [ "source": [
"print(mean_squared_error(dev_out, dev_expected))" "print(mean_squared_error(dev_out, dev_expected))"
] ]
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 8,
"id": "3bc8418b-64f1-4163-a0ec-8e3293032341", "id": "3bc8418b-64f1-4163-a0ec-8e3293032341",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
@ -174,10 +140,10 @@
"with open('test-A/in.tsv', 'r', encoding = 'utf-8') as f:\n", "with open('test-A/in.tsv', 'r', encoding = 'utf-8') as f:\n",
" x_test = f.readlines()\n", " x_test = f.readlines()\n",
" \n", " \n",
"x_test = pd.Series(x_test)\n", "# x_test = pd.Series(x_test)\n",
"x_test = vectorizer.transform(x_test)\n", "# x_test = vectorizer.transform(x_test)\n",
"\n", "\n",
"test_predicted = model.predict(x_test.toarray())\n", "test_predicted = model.predict(x_test)\n",
"\n", "\n",
"with open('test-A/out.tsv', 'wt') as f:\n", "with open('test-A/out.tsv', 'wt') as f:\n",
" for i in test_predicted:\n", " for i in test_predicted:\n",
@ -186,10 +152,19 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 9,
"id": "a18aea56-7fa1-40bd-8aa3-bbaf9d66d6b7", "id": "a18aea56-7fa1-40bd-8aa3-bbaf9d66d6b7",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"[NbConvertApp] Converting notebook run.ipynb to script\n",
"[NbConvertApp] Writing 1607 bytes to run.py\n"
]
}
],
"source": [ "source": [
"!jupyter nbconvert --to script run.ipynb" "!jupyter nbconvert --to script run.ipynb"
] ]

File diff suppressed because it is too large Load Diff

View File

@ -44,7 +44,7 @@
"source": [ "source": [
"train = pd.read_csv('train/train.tsv', header=None, sep='\\t', error_bad_lines=False)\n", "train = pd.read_csv('train/train.tsv', header=None, sep='\\t', error_bad_lines=False)\n",
"print(len(train))\n", "print(len(train))\n",
"train = train.head(20000)" "train = train.head(30000)"
] ]
}, },
{ {
@ -122,7 +122,7 @@
"name": "stdout", "name": "stdout",
"output_type": "stream", "output_type": "stream",
"text": [ "text": [
"4214.6524419302405\n" "4261.093474053155\n"
] ]
} }
], ],
@ -161,7 +161,7 @@
"output_type": "stream", "output_type": "stream",
"text": [ "text": [
"[NbConvertApp] Converting notebook run.ipynb to script\n", "[NbConvertApp] Converting notebook run.ipynb to script\n",
"[NbConvertApp] Writing 1608 bytes to run.py\n" "[NbConvertApp] Writing 1607 bytes to run.py\n"
] ]
} }
], ],

2
run.py
View File

@ -19,7 +19,7 @@ from sklearn.pipeline import make_pipeline
train = pd.read_csv('train/train.tsv', header=None, sep='\t', error_bad_lines=False) train = pd.read_csv('train/train.tsv', header=None, sep='\t', error_bad_lines=False)
print(len(train)) print(len(train))
train = train.head(100000) train = train.head(30000)
# In[3]: # In[3]:

File diff suppressed because it is too large Load Diff