Fiinal main

This commit is contained in:
kociuba 2021-04-27 21:23:49 +02:00
parent b82539de84
commit a4edb0ed99
2 changed files with 20024 additions and 20021 deletions

File diff suppressed because it is too large Load Diff

43
mian.py
View File

@ -1,43 +1,46 @@
import csv
import gensim as gensim
import smart_open
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LinearRegression
from stop_words import get_stop_words
from sklearn.cluster import KMeans
from gensim.models.doc2vec import Doc2Vec
import os
import pandas as pd
def read_train_file(inDirectory):
colnames = ['start_date', 'end_date', 'title', 'sort_title', 'data']
df_train = pd.read_csv(inDirectory, sep="\t", names=colnames)
return df_train[:tain_set]
return df_train
def read_evaluate_file(inDirectory):
colnames = ['data']
df_train = pd.read_csv(inDirectory, sep="\t", names=colnames, quoting=csv.QUOTE_NONE, error_bad_lines=False)
return df_train
df_evaluate = pd.read_csv(inDirectory, sep="\t", names=colnames, quoting=csv.QUOTE_NONE, error_bad_lines=False)
return df_evaluate
def train_date_mean(df):
date_mean = (df['start_date'] + df['end_date']) / 2
return date_mean
tain_set = 1000
def linearRegresionTrain():
df = read_train_file('train/train.tsv')
date_mean_df = train_date_mean(df)
train_vectorized_corpus = vectorizer.fit_transform(df['data'])
reg.fit(train_vectorized_corpus, date_mean_df)
df = read_train_file('train/train.tsv')
date_mean_df = train_date_mean(df)
vectorizer = TfidfVectorizer(stop_words=get_stop_words('polish'))
train_vectorized_corpus = vectorizer.fit_transform(df['data'])
reg = LinearRegression().fit(train_vectorized_corpus, date_mean_df)
df_evaluate = read_evaluate_file('dev-0/in.tsv')
evaluate_vectorized_corpus = vectorizer.transform(df_evaluate['data'])
evaluate = reg.predict(evaluate_vectorized_corpus)
with open("dev-0/out.tsv", 'w') as file:
def linearRegresionEvaluate(inDirectory, outDirectory):
df_evaluate = read_evaluate_file(inDirectory)
evaluate_vectorized_corpus = vectorizer.transform(df_evaluate['data'])
evaluate = reg.predict(evaluate_vectorized_corpus)
with open(outDirectory, 'w') as file:
for e in evaluate:
file.write("%i\n" % e)
file.write("%f\n" % e)
vectorizer = TfidfVectorizer(stop_words=get_stop_words('polish'))
reg = LinearRegression()
linearRegresionTrain()
linearRegresionEvaluate('dev-0/in.tsv', 'dev-0/out.tsv')
linearRegresionEvaluate('dev-1/in.tsv', 'dev-1/out.tsv')
linearRegresionEvaluate('test-A/in.tsv', 'test-A/out.tsv')
os.system("./geval -t dev-0")
os.system("./geval -t dev-1")