forked from kubapok/auta-public
69 lines
1.7 KiB
Python
69 lines
1.7 KiB
Python
import pandas
|
|
import os
|
|
import sys
|
|
from sklearn.linear_model import LinearRegression
|
|
|
|
IN_FILE_NAME = "in.tsv"
|
|
OUT_FILE_NAME = "out.tsv"
|
|
MAIN_DIR = "."
|
|
NAMES_FILE_NAME = "names"
|
|
TRAIN_DIR = "train"
|
|
TRAIN_FILE_NAME = "train.tsv"
|
|
VALUE_SEP = "\t"
|
|
LINE_SEP = "\n"
|
|
|
|
|
|
def main(dirname: str):
|
|
names = get_names()
|
|
|
|
X, Y = get_train_data(names)
|
|
|
|
clf = LinearRegression().fit(X, Y)
|
|
clf.predict(get_input_data(dirname, names)).tofile(os.path.join(
|
|
dirname, OUT_FILE_NAME), sep=LINE_SEP)
|
|
|
|
|
|
def get_train_data(names: list):
|
|
train_path = os.path.join(MAIN_DIR, TRAIN_DIR, TRAIN_FILE_NAME)
|
|
check_file(train_path)
|
|
train_data = process_input(pandas.read_csv(
|
|
train_path, header=None, sep=VALUE_SEP, names=names), names)
|
|
|
|
train_data = train_data.loc[(train_data[names[0]] > 1000)]
|
|
train_data = train_data.loc[(train_data[names[1]] > 100)]
|
|
|
|
X = train_data.loc[:, train_data.columns != names[0]]
|
|
Y = train_data[names[0]]
|
|
|
|
return X, Y
|
|
|
|
|
|
def get_input_data(dirname, names):
|
|
in_path = os.path.join(dirname, IN_FILE_NAME)
|
|
check_file(in_path)
|
|
return process_input(pandas.read_csv(in_path, header=None, sep=VALUE_SEP, names=names[1:]), names)
|
|
|
|
|
|
def process_input(df, names):
|
|
df = df.drop([names[3]], axis=1)
|
|
return pandas.get_dummies(df, columns=[names[4]])
|
|
|
|
|
|
def get_names() -> list:
|
|
names_path = os.path.join(MAIN_DIR, NAMES_FILE_NAME)
|
|
check_file(names_path)
|
|
|
|
with open(names_path) as f_names:
|
|
return f_names.read().strip().split(VALUE_SEP)
|
|
|
|
|
|
def check_file(filename: str):
|
|
if not os.path.exists(filename):
|
|
raise FileNotFoundError(filename)
|
|
|
|
|
|
if __name__ == "__main__":
|
|
if len(sys.argv) < 2:
|
|
raise Exception("Name of working dir not specified!")
|
|
main(sys.argv[1])
|