ium_07 sacred

This commit is contained in:
Michal Gulczynski 2024-06-11 20:07:04 +02:00
parent b88ddb3066
commit 113d505f58
2 changed files with 10 additions and 15 deletions

View File

@ -15,9 +15,12 @@ ex = Experiment('464953')
ex.observers.append(MongoObserver(url='mongodb://admin:IUM_2021@tzietkiewicz.vm.wmi.amu.edu.pl:27017')) ex.observers.append(MongoObserver(url='mongodb://admin:IUM_2021@tzietkiewicz.vm.wmi.amu.edu.pl:27017'))
ex.observers.append(FileStorageObserver('my_experiment_logs')) ex.observers.append(FileStorageObserver('my_experiment_logs'))
def datasets_preparation(): @ex.capture
df_1 = pd.read_csv("datasets/spotify_songs.csv") def datasets_preparation(_run):
df_2 = pd.read_csv("datasets/Spotify_Dataset.csv", sep=";") with _run.open_resource("datasets/spotify_songs.csv") as f:
df_1 = pd.read_csv(f)
with _run.open_resource("datasets/Spotify_Dataset.csv") as f:
df_2 = pd.read_csv(f, sep=";")
df_1 = df_1.dropna() df_1 = df_1.dropna()
df_2 = df_2.dropna() df_2 = df_2.dropna()
df_2 = df_2.rename(columns={'Title': 'track_name'}) df_2 = df_2.rename(columns={'Title': 'track_name'})
@ -63,12 +66,6 @@ def run_experiment(test_size, random_state, model_filename):
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=test_size, random_state=random_state) X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=test_size, random_state=random_state)
Y_train = np.ravel(Y_train) Y_train = np.ravel(Y_train)
Y_test = np.ravel(Y_test) Y_test = np.ravel(Y_test)
ex.add_resource(X_train)
ex.add_resource(X_test)
ex.add_resource(Y_train)
ex.add_resource(Y_test)
scaler = StandardScaler() scaler = StandardScaler()
numeric_columns = X_train.select_dtypes(include=['int', 'float']).columns numeric_columns = X_train.select_dtypes(include=['int', 'float']).columns
X_train_scaled = scaler.fit_transform(X_train[numeric_columns]) X_train_scaled = scaler.fit_transform(X_train[numeric_columns])

View File

@ -17,19 +17,17 @@ def config():
test_dataset_filename = 'datasets/docker_test_dataset.csv' test_dataset_filename = 'datasets/docker_test_dataset.csv'
@ex.main @ex.main
def run_evaluation(model_filename, test_dataset_filename): def run_evaluation(_run ,model_filename, test_dataset_filename):
with open(model_filename, 'rb') as file: with open(model_filename, 'rb') as file:
model = pickle.load(file) model = pickle.load(file)
print("Model został wczytany z pliku:", model_filename) print("Model został wczytany z pliku:", model_filename)
test_df = pd.read_csv(test_dataset_filename)
with _run.open_resource(test_dataset_filename) as f:
test_df = pd.read_csv(f)
Y_test = test_df[['playlist_genre']] Y_test = test_df[['playlist_genre']]
X_test = test_df.drop(columns='playlist_genre') X_test = test_df.drop(columns='playlist_genre')
Y_test = np.ravel(Y_test) Y_test = np.ravel(Y_test)
scaler = StandardScaler() scaler = StandardScaler()
ex.add_resource(X_test)
ex.add_resource(Y_test)
numeric_columns = X_test.select_dtypes(include=['int', 'float']).columns numeric_columns = X_test.select_dtypes(include=['int', 'float']).columns
X_test_scaled = scaler.fit_transform(X_test[numeric_columns]) X_test_scaled = scaler.fit_transform(X_test[numeric_columns])
Y_pred = model.predict(X_test_scaled) Y_pred = model.predict(X_test_scaled)