forked from kubapok/retroc2
47 lines
1.7 KiB
Python
47 lines
1.7 KiB
Python
import gensim as gensim
|
|
import smart_open
|
|
from sklearn.feature_extraction.text import TfidfVectorizer
|
|
from sklearn.linear_model import LinearRegression
|
|
from stop_words import get_stop_words
|
|
from sklearn.cluster import KMeans
|
|
from gensim.models.doc2vec import Doc2Vec
|
|
import os
|
|
import pandas as pd
|
|
|
|
def read_train_file(inDirectory):
|
|
colnames = ['start_date', 'end_date', 'title', 'sort_title', 'data']
|
|
df_train = pd.read_csv(inDirectory, sep="\t", names=colnames)
|
|
return df_train[:5000]
|
|
|
|
def read_evaluate_file(inDirectory):
|
|
colnames = ['data']
|
|
df_train = pd.read_csv(inDirectory, sep="\t", names=colnames)
|
|
return df_train[:5000]
|
|
|
|
def train_date_mean(df):
|
|
date_mean = (df['start_date'] + df['end_date']) / 2
|
|
return date_mean
|
|
|
|
def preper_data(df):
|
|
document_list = list()
|
|
for line in df:
|
|
tokens = gensim.utils.simple_preprocess(line, min_len=2, max_len=15)
|
|
filtered_sentence = []
|
|
for word in tokens:
|
|
if word not in get_stop_words('polish'):
|
|
filtered_sentence.append(word)
|
|
document_list.append(filtered_sentence)
|
|
return document_list
|
|
|
|
df = read_train_file('train/train.tsv')
|
|
date_mean_df = train_date_mean(df)[:5000]
|
|
vectorizer = TfidfVectorizer(stop_words=get_stop_words('polish'))
|
|
train_vectorized_corpus = vectorizer.fit_transform(df['data'])
|
|
reg = LinearRegression().fit(train_vectorized_corpus, date_mean_df)
|
|
|
|
df_evaluate = read_evaluate_file('dev-0/in.tsv')
|
|
evaluate_vectorized_corpus = vectorizer.transform(df_evaluate['data'])
|
|
evaluate = reg.predict(evaluate_vectorized_corpus)
|
|
with open("dev-0/out.tsv", 'w') as file:
|
|
for e in evaluate:
|
|
file.write("%i\n" % e) |