update dllib-sacred.py
All checks were successful
s444356-evaluation/pipeline/head This commit looks good
s444356-training/pipeline/head This commit looks good

This commit is contained in:
Maciej Czajka 2022-05-09 21:12:46 +02:00
parent 8e85f90e0a
commit 7a0bf1af21

View File

@ -2,7 +2,6 @@ import numpy as np
import sys
import os
import torch
import mlflow
import pandas as pd
from torch import nn
from torch.autograd import Variable
@ -17,15 +16,13 @@ from sacred.observers import MongoObserver
# EPOCHS = int(sys.argv[1])
#ex = Experiment()
#ex.observers.append(FileStorageObserver('my_res'))
#ex.observers.append(MongoObserver(url='mongodb://admin:IUM_2021@172.17.0.1:27017', db_name='sacred'))
ex = Experiment()
ex.observers.append(FileStorageObserver('my_res'))
ex.observers.append(MongoObserver(url='mongodb://admin:IUM_2021@172.17.0.1:27017', db_name='sacred'))
mlflow.set_experiment("s444356")
#@ex.config
#def my_config():
# epochs = 100
@ex.config
def my_config():
epochs = 100
class Model(nn.Module):
def __init__(self, input_dim):
@ -245,11 +242,8 @@ def remove_list(games):
# features_g = pd.DataFrame(features_g, dtype=np.float64)
# features_g = features_g.to_numpy()
epochs = int(sys.argv[1]) if len(sys.argv) > 20 else 20
#@ex.automain
#def my_main(epochs, _run):
def my_main(epochs):
@ex.automain
def my_main(epochs, _run):
platform = pd.read_csv('all_games.train.csv', sep=',', usecols=[1], header=None).values.tolist()
release_date = pd.read_csv('all_games.train.csv', sep=',', usecols=[2], header=None).values.tolist()
meta_score = pd.read_csv('all_games.train.csv', sep=',', usecols=[4], header=None).values.tolist()
@ -301,8 +295,7 @@ def my_main(epochs):
loss_fn = nn.CrossEntropyLoss()
# epochs = 1000
# epochs = epochs
#_run.info['epochs'] = epochs
mlflow.log_param("epochs", epochs)
_run.info['epochs'] = epochs
def print_(loss):
print ("The loss calculated: ", loss)
@ -329,15 +322,14 @@ def my_main(epochs):
pred = pred.detach().numpy()
print("The accuracy is", accuracy_score(labels_test_g, np.argmax(pred, axis=1)))
#_run.info['accuracy'] = accuracy_score(labels_test_g, np.argmax(pred, axis=1))
_run.info['accuracy'] = accuracy_score(labels_test_g, np.argmax(pred, axis=1))
_run.log_scalar("measure.accuracy", accuracy_score(labels_test_g, np.argmax(pred, axis=1)))
mlflow.log_metric("measure.accuracy", accuracy_score(labels_test_g, np.argmax(pred, axis=1)))
pred = pd.DataFrame(pred)
pred.to_csv('result.csv')
# save model
torch.save(model, "games_model.pkl")
#ex.add_artifact("games_model.pkl")
ex.add_artifact("games_model.pkl")