diff --git a/zad1.py b/zad1.py index a95332b..d674b57 100644 --- a/zad1.py +++ b/zad1.py @@ -7,16 +7,11 @@ import numpy as np import logging from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score -# logging.basicConfig(level=logging.WARN) -# logger = logging.getLogger(__name__) +logging.basicConfig(level=logging.WARN) +logger = logging.getLogger(__name__) -# mlflow.set_tracking_uri("http://localhost:5000") -# mlflow.set_experiment("s123456") -# def eval_metrics(actual, pred): -# rmse = np.sqrt(mean_squared_error(actual, pred)) -# mae = mean_absolute_error(actual, pred) -# r2 = r2_score(actual, pred) -# return rmse, mae, r2 +mlflow.set_tracking_uri("http://localhost:5000") +mlflow.set_experiment("s487176") import requests @@ -90,6 +85,12 @@ class TabularModel(nn.Module): out = self.fc2(out) out = self.softmax(out) return out + + def predict(self, x): + with torch.no_grad(): + output = self.forward(x) + _, predicted = torch.max(output, 1) + return predicted input_dim = wine_train.shape[1] - 1 hidden_dim = 32 @@ -98,27 +99,31 @@ model = TabularModel(input_dim, hidden_dim, output_dim) criterion = nn.CrossEntropyLoss() optimizer = torch.optim.Adam(model.parameters()) - -model = TabularModel(input_dim=len(wine_train.columns)-1, hidden_dim=32, output_dim=2) -criterion = nn.CrossEntropyLoss() -optimizer = torch.optim.Adam(model.parameters(), lr=0.01) - num_epochs = 10 -for epoch in range(num_epochs): - running_loss = 0.0 - for i, data in enumerate(train_dataloader, 0): - inputs, labels = data - labels = labels.type(torch.LongTensor) - optimizer.zero_grad() - outputs = model(inputs) - loss = criterion(outputs, labels) - loss.backward() - optimizer.step() - running_loss += loss.item() +lr = 0.01 +alpha = 0.01 +model = TabularModel(input_dim=len(wine_train.columns)-1, hidden_dim=hidden_dim, output_dim=output_dim) +criterion = nn.CrossEntropyLoss() +optimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=alpha) +with mlflow.start_run(): + mlflow.log_params({"learning rate":lr,"alpha":alpha}) - # Print the loss every 1000 mini-batches - if (epoch%2) == 0: - print(f'Epoch {epoch + 1}, loss: {running_loss / len(train_dataloader):.4f}') + + for epoch in range(num_epochs): + running_loss = 0.0 + for i, data in enumerate(train_dataloader, 0): + inputs, labels = data + labels = labels.type(torch.LongTensor) + optimizer.zero_grad() + outputs = model(inputs) + loss = criterion(outputs, labels) + loss.backward() + optimizer.step() + running_loss += loss.item() + + # Print the loss every 1000 mini-batches + if (epoch%2) == 0: + print(f'Epoch {epoch + 1}, loss: {running_loss / len(train_dataloader):.4f}') print('Finished Training') @@ -128,9 +133,12 @@ total = 0 with torch.no_grad(): for data in test_dataloader: inputs, labels = data - outputs = model(inputs.float()) - _, predicted = torch.max(outputs.data, 1) + predicted = model.predict(inputs.float()) total += labels.size(0) correct += (predicted == labels).sum().item() -print('Accuracy on test set: %d %%' % (100 * correct / total)) + +accuracy= 100 * correct / total +print('Accuracy on test set: %d %%' % accuracy) +mlflow.log_metric("test_accuracy", accuracy) +mlflow.sklearn.log_model(model, "model") \ No newline at end of file