This commit is contained in:
Sebastian 2022-05-18 01:06:51 +02:00
parent 75174effea
commit f6163eb890
5 changed files with 34285 additions and 34318 deletions

View File

@ -44,7 +44,7 @@
"source": [
"train = pd.read_csv('train/train.tsv', header=None, sep='\\t', error_bad_lines=False)\n",
"print(len(train))\n",
"train = train[:10000]"
"train = train[:30000]"
]
},
{
@ -58,20 +58,6 @@
"y_train = train[0]"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "dd454ce5-a06e-4fbd-a546-83fb94ad0390",
"metadata": {},
"outputs": [],
"source": [
"x_dev_data = pd.read_csv('dev-0/in.tsv', header=None, sep='\\t')\n",
"x_dev = x_dev_data[0]\n",
"x_dev[19999] = \"to jest tekst testowy\"\n",
"x_dev[20000] = \"a ten tekst jest najbardziej testowy\"\n",
"y_dev = pd.read_csv('dev-0/expected.tsv', header=None, sep='\\t')"
]
},
{
"cell_type": "code",
"execution_count": 4,
@ -95,23 +81,6 @@
"model.fit(x_train, y_train)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "cc1270d5-29dc-4f03-82c1-dc03f3e4fa00",
"metadata": {},
"outputs": [],
"source": [
"dev_predicted = model.predict(x_dev)\n",
"\n",
"with open('dev-0/out.tsv', 'wt') as f:\n",
" for i in dev_predicted:\n",
" f.write(str(i)+'\\n')\n",
"\n",
"dev_out = pd.read_csv('dev-0/out.tsv', header=None, sep='\\t')\n",
"dev_expected = pd.read_csv('dev-0/expected.tsv', header=None, sep='\\t')\n"
]
},
{
"cell_type": "code",
"execution_count": 5,
@ -135,8 +104,8 @@
"metadata": {},
"outputs": [],
"source": [
"x_dev = readFile('dev-0/in.tsv')\n",
"dev_predicted = model.predict(x_dev)\n",
"x_dev0 = readFile('dev-0/in.tsv')\n",
"dev_predicted = model.predict(x_dev0)\n",
"with open('dev-0/out.tsv', 'wt') as f:\n",
" for i in dev_predicted:\n",
" f.write(str(i)+'\\n')"
@ -144,17 +113,21 @@
},
{
"cell_type": "code",
"execution_count": null,
"id": "223de995-5e91-4254-9214-4fc871c985e9",
"execution_count": 7,
"id": "66e4c057-6a76-4d05-ad60-faa09381fdb1",
"metadata": {},
"outputs": [],
"source": [
"print(mean_squared_error(dev_out, dev_expected))"
"x_dev1 = readFile('dev-0/in.tsv')\n",
"dev_predicted = model.predict(x_dev1)\n",
"with open('dev-0/out.tsv', 'wt') as f:\n",
" for i in dev_predicted:\n",
" f.write(str(i)+'\\n')"
]
},
{
"cell_type": "code",
"execution_count": 7,
"execution_count": 8,
"id": "3bc8418b-64f1-4163-a0ec-8e3293032341",
"metadata": {},
"outputs": [],
@ -174,10 +147,19 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 9,
"id": "a18aea56-7fa1-40bd-8aa3-bbaf9d66d6b7",
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"[NbConvertApp] Converting notebook run.ipynb to script\n",
"[NbConvertApp] Writing 1597 bytes to run.py\n"
]
}
],
"source": [
"!jupyter nbconvert --to script run.ipynb"
]

File diff suppressed because it is too large Load Diff

View File

@ -44,7 +44,7 @@
"source": [
"train = pd.read_csv('train/train.tsv', header=None, sep='\\t', error_bad_lines=False)\n",
"print(len(train))\n",
"train = train[:10000]"
"train = train[:30000]"
]
},
{
@ -58,20 +58,6 @@
"y_train = train[0]"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "dd454ce5-a06e-4fbd-a546-83fb94ad0390",
"metadata": {},
"outputs": [],
"source": [
"x_dev_data = pd.read_csv('dev-0/in.tsv', header=None, sep='\\t')\n",
"x_dev = x_dev_data[0]\n",
"x_dev[19999] = \"to jest tekst testowy\"\n",
"x_dev[20000] = \"a ten tekst jest najbardziej testowy\"\n",
"y_dev = pd.read_csv('dev-0/expected.tsv', header=None, sep='\\t')"
]
},
{
"cell_type": "code",
"execution_count": 4,
@ -95,23 +81,6 @@
"model.fit(x_train, y_train)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "cc1270d5-29dc-4f03-82c1-dc03f3e4fa00",
"metadata": {},
"outputs": [],
"source": [
"dev_predicted = model.predict(x_dev)\n",
"\n",
"with open('dev-0/out.tsv', 'wt') as f:\n",
" for i in dev_predicted:\n",
" f.write(str(i)+'\\n')\n",
"\n",
"dev_out = pd.read_csv('dev-0/out.tsv', header=None, sep='\\t')\n",
"dev_expected = pd.read_csv('dev-0/expected.tsv', header=None, sep='\\t')\n"
]
},
{
"cell_type": "code",
"execution_count": 5,
@ -135,8 +104,8 @@
"metadata": {},
"outputs": [],
"source": [
"x_dev = readFile('dev-0/in.tsv')\n",
"dev_predicted = model.predict(x_dev)\n",
"x_dev0 = readFile('dev-0/in.tsv')\n",
"dev_predicted = model.predict(x_dev0)\n",
"with open('dev-0/out.tsv', 'wt') as f:\n",
" for i in dev_predicted:\n",
" f.write(str(i)+'\\n')"
@ -144,17 +113,21 @@
},
{
"cell_type": "code",
"execution_count": null,
"id": "223de995-5e91-4254-9214-4fc871c985e9",
"execution_count": 7,
"id": "66e4c057-6a76-4d05-ad60-faa09381fdb1",
"metadata": {},
"outputs": [],
"source": [
"print(mean_squared_error(dev_out, dev_expected))"
"x_dev1 = readFile('dev-0/in.tsv')\n",
"dev_predicted = model.predict(x_dev1)\n",
"with open('dev-0/out.tsv', 'wt') as f:\n",
" for i in dev_predicted:\n",
" f.write(str(i)+'\\n')"
]
},
{
"cell_type": "code",
"execution_count": 7,
"execution_count": 8,
"id": "3bc8418b-64f1-4163-a0ec-8e3293032341",
"metadata": {},
"outputs": [],
@ -174,10 +147,19 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 9,
"id": "a18aea56-7fa1-40bd-8aa3-bbaf9d66d6b7",
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"[NbConvertApp] Converting notebook run.ipynb to script\n",
"[NbConvertApp] Writing 1597 bytes to run.py\n"
]
}
],
"source": [
"!jupyter nbconvert --to script run.ipynb"
]

39
run.py
View File

@ -19,7 +19,7 @@ from sklearn.pipeline import make_pipeline
train = pd.read_csv('train/train.tsv', header=None, sep='\t', error_bad_lines=False)
print(len(train))
train = train.head(40000)
train = train[:30000]
# In[3]:
@ -32,40 +32,43 @@ y_train = train[0]
# In[4]:
x_dev_data = pd.read_csv('dev-0/in.tsv', header=None, sep='\t')
x_dev = x_dev_data[0]
x_dev[19999] = "to jest tekst testowy"
x_dev[20000] = "a ten tekst jest najbardziej testowy"
y_dev = pd.read_csv('dev-0/expected.tsv', header=None, sep='\t')
model = make_pipeline(TfidfVectorizer(), LinearRegression())
model.fit(x_train, y_train)
# In[5]:
model = make_pipeline(TfidfVectorizer(), LinearRegression())
model.fit(x_train, y_train)
def readFile(filename):
result = []
with open(filename, 'r', encoding="utf-8") as file:
for line in file:
text = line.split("\t")[0].strip()
result.append(text)
return result
# In[6]:
dev_predicted = model.predict(x_dev)
x_dev0 = readFile('dev-0/in.tsv')
dev_predicted = model.predict(x_dev0)
with open('dev-0/out.tsv', 'wt') as f:
for i in dev_predicted:
f.write(str(i)+'\n')
dev_out = pd.read_csv('dev-0/out.tsv', header=None, sep='\t')
dev_expected = pd.read_csv('dev-0/expected.tsv', header=None, sep='\t')
# In[ ]:
# In[7]:
x_dev1 = readFile('dev-0/in.tsv')
dev_predicted = model.predict(x_dev1)
with open('dev-0/out.tsv', 'wt') as f:
for i in dev_predicted:
f.write(str(i)+'\n')
print(mean_squared_error(dev_out, dev_expected))
# In[8]:
# In[ ]:
with open('test-A/in.tsv', 'r', encoding = 'utf-8') as f:
@ -81,7 +84,7 @@ with open('test-A/out.tsv', 'wt') as f:
f.write(str(i)+'\n')
# In[9]:
# In[ ]:
get_ipython().system('jupyter nbconvert --to script run.ipynb')

File diff suppressed because it is too large Load Diff