diff --git a/mongoObserver.py b/mongoObserver.py index dd5f52e..d30a343 100644 --- a/mongoObserver.py +++ b/mongoObserver.py @@ -45,8 +45,8 @@ def readAndtrain(epochs, batch_size, _run): input_dim = 11 output_dim = 1 - _run.info("Batch", str(batch_size)) - _run.info("epoch", str(epochs)) + _run.log_scalar("Batch", str(batch_size)) + _run.log_scalar("epoch", str(epochs)) model = LogisticRegressionModel(input_dim, output_dim) model.load_state_dict(torch.load('DEATH_EVENT.pth')) @@ -67,16 +67,16 @@ def readAndtrain(epochs, batch_size, _run): loss.backward() optimizer.step() - _run.info("Lost", str(loss.item())) + _run.log_scalar("Lost", str(loss.item())) torch.save(model.state_dict(), 'DEATH_EVENT.pth') prediction= model(xTest) - _run.info("accuracy_score", accuracy_score(yTest, np.argmax(prediction.detach().numpy(), axis=1))) - _run.info("F1", f1_score(yTest, np.argmax(prediction.detach().numpy(), axis=1), average=None)) + _run.log_scalar("accuracy_score", accuracy_score(yTest, np.argmax(prediction.detach().numpy(), axis=1))) + # _run.log_scalar("F1", str(f1_score(yTest, np.argmax(prediction.detach().numpy(), axis=1), average=None))) print("accuracy_score", accuracy_score(yTest, np.argmax(prediction.detach().numpy(), axis=1))) - print("F1", f1_score(yTest, np.argmax(prediction.detach().numpy(), axis=1), average=None)) + # print("F1", f1_score(yTest, np.argmax(prediction.detach().numpy(), axis=1), average=None)) @ex.automain def my_main(epochs, batch_size, _run):