forked from kubapok/retroc2
First stage
This commit is contained in:
parent
647c099815
commit
2ec127e514
20000
dev-0/meta.tsv/meta.tsv
Normal file
20000
dev-0/meta.tsv/meta.tsv
Normal file
File diff suppressed because it is too large
Load Diff
5000
dev-0/out.tsv
Normal file
5000
dev-0/out.tsv
Normal file
File diff suppressed because it is too large
Load Diff
47
mian.py
Normal file
47
mian.py
Normal file
@ -0,0 +1,47 @@
|
||||
import gensim as gensim
|
||||
import smart_open
|
||||
from sklearn.feature_extraction.text import TfidfVectorizer
|
||||
from sklearn.linear_model import LinearRegression
|
||||
from stop_words import get_stop_words
|
||||
from sklearn.cluster import KMeans
|
||||
from gensim.models.doc2vec import Doc2Vec
|
||||
import os
|
||||
import pandas as pd
|
||||
|
||||
def read_train_file(inDirectory):
|
||||
colnames = ['start_date', 'end_date', 'title', 'sort_title', 'data']
|
||||
df_train = pd.read_csv(inDirectory, sep="\t", names=colnames)
|
||||
return df_train[:5000]
|
||||
|
||||
def read_evaluate_file(inDirectory):
|
||||
colnames = ['data']
|
||||
df_train = pd.read_csv(inDirectory, sep="\t", names=colnames)
|
||||
return df_train[:5000]
|
||||
|
||||
def train_date_mean(df):
|
||||
date_mean = (df['start_date'] + df['end_date']) / 2
|
||||
return date_mean
|
||||
|
||||
def preper_data(df):
|
||||
document_list = list()
|
||||
for line in df:
|
||||
tokens = gensim.utils.simple_preprocess(line, min_len=2, max_len=15)
|
||||
filtered_sentence = []
|
||||
for word in tokens:
|
||||
if word not in get_stop_words('polish'):
|
||||
filtered_sentence.append(word)
|
||||
document_list.append(filtered_sentence)
|
||||
return document_list
|
||||
|
||||
df = read_train_file('train/train.tsv')
|
||||
date_mean_df = train_date_mean(df)[:5000]
|
||||
vectorizer = TfidfVectorizer(stop_words=get_stop_words('polish'))
|
||||
train_vectorized_corpus = vectorizer.fit_transform(df['data'])
|
||||
reg = LinearRegression().fit(train_vectorized_corpus, date_mean_df)
|
||||
|
||||
df_evaluate = read_evaluate_file('dev-0/in.tsv')
|
||||
evaluate_vectorized_corpus = vectorizer.transform(df_evaluate['data'])
|
||||
evaluate = reg.predict(evaluate_vectorized_corpus)
|
||||
with open("dev-0/out.tsv", 'w') as file:
|
||||
for e in evaluate:
|
||||
file.write("%i\n" % e)
|
107471
train/train.tsv
Normal file
107471
train/train.tsv
Normal file
File diff suppressed because one or more lines are too long
Loading…
Reference in New Issue
Block a user