diff --git a/create_dataset.py b/create_dataset.py index de2f76f..22d9dbf 100644 --- a/create_dataset.py +++ b/create_dataset.py @@ -21,9 +21,17 @@ water = pd.read_csv('waterQuality1.csv', nrows = args.CUT) water = water[water['is_safe'].apply(lambda x: str(x).isdigit())] water['is_safe'].value_counts() + +# Normalizing Dataset to [0.0, 1.0] float values +from sklearn import preprocessing +water_min_max = preprocessing.MinMaxScaler() +water_min_max = water_min_max.fit_transform(water) +water_min_max = pd.DataFrame(water_min_max, columns=water.columns) +waterNorm = water_min_max + # Splitting DataSet on train, dev, test parts from sklearn.model_selection import train_test_split -water_train, water_test = train_test_split(water, train_size=0.8, random_state=1, stratify=water["is_safe"]) +water_train, water_test = train_test_split(waterNorm, train_size=0.8, random_state=1, stratify=waterNorm["is_safe"]) water_test, water_dev = train_test_split(water_test, train_size=0.66, random_state=1, stratify=water_test["is_safe"]) # water_train["is_safe"].value_counts() # water_test["is_safe"].value_counts() @@ -41,13 +49,5 @@ water_test, water_dev = train_test_split(water_test, train_size=0.66, random_sta #water["is_safe"].value_counts().plot(kind="bar") -# Normalizing Dataset to [0.0, 1.0] float values -from sklearn import preprocessing -water_min_max = preprocessing.MinMaxScaler() -water_min_max = water_min_max.fit_transform(water) -water_min_max = pd.DataFrame(water_min_max, columns=water.columns) -waterNorm = water_min_max - - waterNorm.to_csv('waterQuality.csv', index=False)