From 48175b11be006e5584c9da07f798b03724da2f40 Mon Sep 17 00:00:00 2001 From: zgolebiewska Date: Sun, 26 May 2024 14:23:37 +0200 Subject: [PATCH] IUM_07 --- model.py | 69 +++++++++++++++++++++++++++++++++----------------------- 1 file changed, 41 insertions(+), 28 deletions(-) diff --git a/model.py b/model.py index f1633c8..f1df595 100644 --- a/model.py +++ b/model.py @@ -1,51 +1,64 @@ +from sacred import Experiment +from sacred.observers import MongoObserver, FileStorageObserver import tensorflow as tf import pandas as pd from sklearn.preprocessing import LabelEncoder, StandardScaler from sklearn.model_selection import train_test_split import json -import mlflow -mlflow.set_tracking_uri("http://localhost:5000") # Ustawienie adresu MLflow Tracking Server +ex = Experiment("s464906_experiment") -df = pd.read_csv('OrangeQualityData.csv') +mongo_url = "mongodb://admin:IUM_2021@tzietkiewicz.vm.wmi.amu.edu.pl:27017" +ex.observers.append(MongoObserver(url=mongo_url, db_name='sacred')) +ex.observers.append(FileStorageObserver('logs')) -encoder = LabelEncoder() -df["Color"] = encoder.fit_transform(df["Color"]) -df["Variety"] = encoder.fit_transform(df["Variety"]) -df["Blemishes"] = df["Blemishes (Y/N)"].apply(lambda x: 1 if x.startswith("Y") else 0) +@ex.config +def cfg(): + epochs = 100 -df.drop(columns=["Blemishes (Y/N)"], inplace=True) +@ex.automain +def train_model(epochs): + df = pd.read_csv('OrangeQualityData.csv') -X = df.drop(columns=["Quality (1-5)"]) -y = df["Quality (1-5)"] + encoder = LabelEncoder() + df["Color"] = encoder.fit_transform(df["Color"]) + df["Variety"] = encoder.fit_transform(df["Variety"]) + df["Blemishes"] = df["Blemishes (Y/N)"].apply(lambda x: 1 if x.startswith("Y") else 0) -X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) + df.drop(columns=["Blemishes (Y/N)"], inplace=True) -scaler = StandardScaler() -X_train_scaled = scaler.fit_transform(X_train) -X_test_scaled = scaler.transform(X_test) + X = df.drop(columns=["Quality (1-5)"]) + y = df["Quality (1-5)"] -model = tf.keras.Sequential([ - tf.keras.layers.Dense(64, activation='relu', input_shape=(X_train_scaled.shape[1],)), - tf.keras.layers.Dense(32, activation='relu'), - tf.keras.layers.Dense(1) -]) + X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) -model.compile(optimizer='sgd', loss='mse') + scaler = StandardScaler() + X_train_scaled = scaler.fit_transform(X_train) + X_test_scaled = scaler.transform(X_test) -with mlflow.start_run(): - mlflow.log_param("optimizer", 'sgd') - mlflow.log_param("loss_function", 'mse') - mlflow.log_param("epochs", 100) + model = tf.keras.Sequential([ + tf.keras.layers.Dense(64, activation='relu', input_shape=(X_train_scaled.shape[1],)), + tf.keras.layers.Dense(32, activation='relu'), + tf.keras.layers.Dense(1) + ]) - history = model.fit(X_train_scaled, y_train, epochs=100, verbose=0, validation_data=(X_test_scaled, y_test)) + model.compile(optimizer='sgd', loss='mse') - for key, value in history.history.items(): - mlflow.log_metric(key, value[-1]) # Logujemy ostatnią wartość metryki + history = model.fit(X_train_scaled, y_train, epochs=epochs, verbose=0, validation_data=(X_test_scaled, y_test)) + + ex.log_scalar("epochs", epochs) + + ex.add_artifact(__file__) model.save('orange_quality_model_tf.h5') + ex.add_artifact('orange_quality_model_tf.h5') + + for key, value in history.history.items(): + ex.log_scalar(key, value[-1]) predictions = model.predict(X_test_scaled) - with open('predictions_tf.json', 'w') as f: json.dump(predictions.tolist(), f, indent=4) + ex.add_artifact('predictions_tf.json') + + return 'Training completed successfully'