Fiinal main

This commit is contained in:
kociuba 2021-04-27 21:23:49 +02:00
parent b82539de84
commit a4edb0ed99
2 changed files with 20024 additions and 20021 deletions

File diff suppressed because it is too large Load Diff

45
mian.py
View File

@ -1,43 +1,46 @@
import csv import csv
import gensim as gensim
import smart_open
from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LinearRegression from sklearn.linear_model import LinearRegression
from stop_words import get_stop_words from stop_words import get_stop_words
from sklearn.cluster import KMeans
from gensim.models.doc2vec import Doc2Vec
import os import os
import pandas as pd import pandas as pd
def read_train_file(inDirectory): def read_train_file(inDirectory):
colnames = ['start_date', 'end_date', 'title', 'sort_title', 'data'] colnames = ['start_date', 'end_date', 'title', 'sort_title', 'data']
df_train = pd.read_csv(inDirectory, sep="\t", names=colnames) df_train = pd.read_csv(inDirectory, sep="\t", names=colnames)
return df_train[:tain_set] return df_train
def read_evaluate_file(inDirectory): def read_evaluate_file(inDirectory):
colnames = ['data'] colnames = ['data']
df_train = pd.read_csv(inDirectory, sep="\t", names=colnames, quoting=csv.QUOTE_NONE, error_bad_lines=False) df_evaluate = pd.read_csv(inDirectory, sep="\t", names=colnames, quoting=csv.QUOTE_NONE, error_bad_lines=False)
return df_train return df_evaluate
def train_date_mean(df): def train_date_mean(df):
date_mean = (df['start_date'] + df['end_date']) / 2 date_mean = (df['start_date'] + df['end_date']) / 2
return date_mean return date_mean
tain_set = 1000 def linearRegresionTrain():
df = read_train_file('train/train.tsv')
date_mean_df = train_date_mean(df)
train_vectorized_corpus = vectorizer.fit_transform(df['data'])
reg.fit(train_vectorized_corpus, date_mean_df)
def linearRegresionEvaluate(inDirectory, outDirectory):
df_evaluate = read_evaluate_file(inDirectory)
evaluate_vectorized_corpus = vectorizer.transform(df_evaluate['data'])
evaluate = reg.predict(evaluate_vectorized_corpus)
with open(outDirectory, 'w') as file:
for e in evaluate:
file.write("%f\n" % e)
df = read_train_file('train/train.tsv')
date_mean_df = train_date_mean(df)
vectorizer = TfidfVectorizer(stop_words=get_stop_words('polish')) vectorizer = TfidfVectorizer(stop_words=get_stop_words('polish'))
train_vectorized_corpus = vectorizer.fit_transform(df['data']) reg = LinearRegression()
reg = LinearRegression().fit(train_vectorized_corpus, date_mean_df) linearRegresionTrain()
linearRegresionEvaluate('dev-0/in.tsv', 'dev-0/out.tsv')
df_evaluate = read_evaluate_file('dev-0/in.tsv') linearRegresionEvaluate('dev-1/in.tsv', 'dev-1/out.tsv')
evaluate_vectorized_corpus = vectorizer.transform(df_evaluate['data']) linearRegresionEvaluate('test-A/in.tsv', 'test-A/out.tsv')
evaluate = reg.predict(evaluate_vectorized_corpus)
with open("dev-0/out.tsv", 'w') as file:
for e in evaluate:
file.write("%i\n" % e)
os.system("./geval -t dev-0") os.system("./geval -t dev-0")
os.system("./geval -t dev-1")