forked from kubapok/retroc2
100 lines
1.6 KiB
Python
100 lines
1.6 KiB
Python
#!/usr/bin/env python
|
|
# coding: utf-8
|
|
|
|
# In[1]:
|
|
|
|
|
|
import os
|
|
import pandas as pd
|
|
import numpy as np
|
|
import sklearn
|
|
from sklearn.feature_extraction.text import TfidfVectorizer
|
|
from sklearn.linear_model import LinearRegression
|
|
from sklearn.metrics import mean_squared_error
|
|
|
|
|
|
# In[2]:
|
|
|
|
|
|
train = pd.read_csv('train/train.tsv', header=None, sep='\t', error_bad_lines=False)
|
|
print(len(train))
|
|
train = train.head(1000)
|
|
|
|
|
|
# In[3]:
|
|
|
|
|
|
x_train = train[4]
|
|
y_train = train[0]
|
|
|
|
|
|
# In[4]:
|
|
|
|
|
|
x_dev_data = pd.read_csv('dev-0/in.tsv', header=None, sep='\t')
|
|
x_dev = x_dev_data[0]
|
|
x_dev[19999] = "to jest tekst testowy"
|
|
x_dev[20000] = "a ten tekst jest najbardziej testowy"
|
|
y_dev = pd.read_csv('dev-0/expected.tsv', header=None, sep='\t')
|
|
|
|
|
|
# In[5]:
|
|
|
|
|
|
vectorizer = TfidfVectorizer()
|
|
|
|
|
|
# In[6]:
|
|
|
|
|
|
x_train = vectorizer.fit_transform(x_train)
|
|
x_dev = vectorizer.transform(x_dev)
|
|
|
|
|
|
# In[7]:
|
|
|
|
|
|
model = LinearRegression()
|
|
|
|
|
|
# In[8]:
|
|
|
|
|
|
model.fit(x_train.toarray(), y_train)
|
|
|
|
|
|
# In[9]:
|
|
|
|
|
|
dev_predicted = model.predict(x_dev.toarray())
|
|
|
|
with open('dev-0/out.tsv', 'wt') as f:
|
|
for i in dev_predicted:
|
|
f.write(str(i)+'\n')
|
|
|
|
dev_out = pd.read_csv('dev-0/out.tsv', header=None, sep='\t')
|
|
dev_expected = pd.read_csv('dev-0/expected.tsv', header=None, sep='\t')
|
|
|
|
|
|
# In[10]:
|
|
|
|
|
|
print(mean_squared_error(dev_out, dev_expected))
|
|
|
|
|
|
# In[ ]:
|
|
|
|
|
|
with open('test-A/in.tsv', 'r', encoding = 'utf-8') as f:
|
|
x_test = f.readlines()
|
|
|
|
x_test = pd.Series(x_test)
|
|
x_test = vectorizer.transform(x_test)
|
|
|
|
test_predicted = model.predict(x_test.toarray())
|
|
|
|
with open('test-A/out.tsv', 'wt') as f:
|
|
for i in test_predicted:
|
|
f.write(str(i)+'\n')
|
|
|