forked from kubapok/retroc2
Secon stage
This commit is contained in:
parent
44375fa02b
commit
fa5e5e599e
24696
dev-0/out.tsv
24696
dev-0/out.tsv
File diff suppressed because it is too large
Load Diff
24
mian.py
24
mian.py
@ -1,3 +1,5 @@
|
|||||||
|
import csv
|
||||||
|
|
||||||
import gensim as gensim
|
import gensim as gensim
|
||||||
import smart_open
|
import smart_open
|
||||||
from sklearn.feature_extraction.text import TfidfVectorizer
|
from sklearn.feature_extraction.text import TfidfVectorizer
|
||||||
@ -11,30 +13,21 @@ import pandas as pd
|
|||||||
def read_train_file(inDirectory):
|
def read_train_file(inDirectory):
|
||||||
colnames = ['start_date', 'end_date', 'title', 'sort_title', 'data']
|
colnames = ['start_date', 'end_date', 'title', 'sort_title', 'data']
|
||||||
df_train = pd.read_csv(inDirectory, sep="\t", names=colnames)
|
df_train = pd.read_csv(inDirectory, sep="\t", names=colnames)
|
||||||
return df_train[:5000]
|
return df_train[:tain_set]
|
||||||
|
|
||||||
def read_evaluate_file(inDirectory):
|
def read_evaluate_file(inDirectory):
|
||||||
colnames = ['data']
|
colnames = ['data']
|
||||||
df_train = pd.read_csv(inDirectory, sep="\t", names=colnames)
|
df_train = pd.read_csv(inDirectory, sep="\t", names=colnames, quoting=csv.QUOTE_NONE, error_bad_lines=False)
|
||||||
return df_train[:5000]
|
return df_train
|
||||||
|
|
||||||
def train_date_mean(df):
|
def train_date_mean(df):
|
||||||
date_mean = (df['start_date'] + df['end_date']) / 2
|
date_mean = (df['start_date'] + df['end_date']) / 2
|
||||||
return date_mean
|
return date_mean
|
||||||
|
|
||||||
def preper_data(df):
|
tain_set = 50000
|
||||||
document_list = list()
|
|
||||||
for line in df:
|
|
||||||
tokens = gensim.utils.simple_preprocess(line, min_len=2, max_len=15)
|
|
||||||
filtered_sentence = []
|
|
||||||
for word in tokens:
|
|
||||||
if word not in get_stop_words('polish'):
|
|
||||||
filtered_sentence.append(word)
|
|
||||||
document_list.append(filtered_sentence)
|
|
||||||
return document_list
|
|
||||||
|
|
||||||
df = read_train_file('train/train.tsv')
|
df = read_train_file('train/train.tsv')
|
||||||
date_mean_df = train_date_mean(df)[:5000]
|
date_mean_df = train_date_mean(df)
|
||||||
vectorizer = TfidfVectorizer(stop_words=get_stop_words('polish'))
|
vectorizer = TfidfVectorizer(stop_words=get_stop_words('polish'))
|
||||||
train_vectorized_corpus = vectorizer.fit_transform(df['data'])
|
train_vectorized_corpus = vectorizer.fit_transform(df['data'])
|
||||||
reg = LinearRegression().fit(train_vectorized_corpus, date_mean_df)
|
reg = LinearRegression().fit(train_vectorized_corpus, date_mean_df)
|
||||||
@ -45,3 +38,6 @@ evaluate = reg.predict(evaluate_vectorized_corpus)
|
|||||||
with open("dev-0/out.tsv", 'w') as file:
|
with open("dev-0/out.tsv", 'w') as file:
|
||||||
for e in evaluate:
|
for e in evaluate:
|
||||||
file.write("%i\n" % e)
|
file.write("%i\n" % e)
|
||||||
|
|
||||||
|
|
||||||
|
os.system("./geval -t dev-0")
|
Loading…
Reference in New Issue
Block a user