From 8e88f6e6f03cc686ec739714b4b7d975aa22836c Mon Sep 17 00:00:00 2001 From: Agata Date: Fri, 6 May 2022 09:53:37 +0200 Subject: [PATCH] Update files (sacred) --- Dockerfile | 2 + training.Jenkinsfile | 9 ++-- training_sacred.py | 113 +++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 121 insertions(+), 3 deletions(-) create mode 100755 training_sacred.py diff --git a/Dockerfile b/Dockerfile index a8b8966..975977b 100644 --- a/Dockerfile +++ b/Dockerfile @@ -6,8 +6,10 @@ RUN pip3 install seaborn RUN pip3 install ipython RUN pip3 install torch RUN pip3 install numpy +RUN pip3 install sacred WORKDIR /app COPY ./training.py ./ +COPY ./training_sacred.py ./ COPY ./evaluation.py ./ diff --git a/training.Jenkinsfile b/training.Jenkinsfile index 00cf386..7d228b5 100644 --- a/training.Jenkinsfile +++ b/training.Jenkinsfile @@ -26,8 +26,12 @@ pipeline { stage('Script') { steps { copyArtifacts filter: '*', projectName:'s444421-create-dataset', selector: buildParameter('BUILD_SELECTOR') - sh 'ipython ./training.py $EPOCHS' - archiveArtifacts artifacts: 'model.pth' + sh 'ipython ./training_sacred.py with "epochs=$EPOCHS"' + sh 'cp my_runs/1/config.json config.json' + sh 'cp my_runs/1/model.pth model.pth' + sh 'cp my_runs/_sources/training* training_sacred.py' + sh 'cp my_runs/1/info.json info.json' + archiveArtifacts artifacts: 'config.json, model.pth, training_sacred.py, info.json' } } } @@ -45,5 +49,4 @@ pipeline { changed { emailext body: 'CHANGED', subject: 's444421-training status', to: 'e19191c5.uam.onmicrosoft.com@emea.teams.ms' } - } } diff --git a/training_sacred.py b/training_sacred.py new file mode 100755 index 0000000..8095d1a --- /dev/null +++ b/training_sacred.py @@ -0,0 +1,113 @@ +#!/usr/bin/env python +# coding: utf-8 + +# In[ ]: + + +import numpy as np +import pandas as pd +from sklearn.metrics import accuracy_score +import torch +from torch import nn, optim +import torch.nn.functional as F +import sys +from sacred import Experiment +from sacred.observers import FileStorageObserver + + +# In[ ]: + + +ex = Experiment("file_observer") +ex.observers.append(FileStorageObserver('my_runs')) + +@ex.config +def my_config(): + epochs = 400 + + +# In[ ]: + + +def prepare_data(): + X_train = pd.read_csv('X_train.csv') + y_train = pd.read_csv('y_train.csv') + X_train = torch.from_numpy(np.array(X_train)).float() + y_train = torch.squeeze(torch.from_numpy(y_train.values).float()) + return X_train, y_train + + +# In[ ]: + + +class Net(nn.Module): + def __init__(self, n_features): + super(Net, self).__init__() + self.fc1 = nn.Linear(n_features, 5) + self.fc2 = nn.Linear(5, 3) + self.fc3 = nn.Linear(3, 1) + def forward(self, x): + x = F.relu(self.fc1(x)) + x = F.relu(self.fc2(x)) + return torch.sigmoid(self.fc3(x)) + + +# In[ ]: + + +def calculate_accuracy(y_true, y_pred): + predicted = y_pred.ge(.5).view(-1) + return (y_true == predicted).sum().float() / len(y_true) + + +# In[ ]: + + +def round_tensor(t, decimal_places=3): + return round(t.item(), decimal_places) + +# In[ ]: + + +def train_model(X_train, y_train, device, epochs): + net = Net(X_train.shape[1]) + criterion = nn.BCELoss() + optimizer = optim.Adam(net.parameters(), lr=0.001) + + X_train = X_train.to(device) + y_train = y_train.to(device) + + net = net.to(device) + criterion = criterion.to(device) + + for epoch in range(epochs): + y_pred = net(X_train) + y_pred = torch.squeeze(y_pred) + train_loss = criterion(y_pred, y_train) + if epoch % 100 == 0: + train_acc = calculate_accuracy(y_train, y_pred) + print( + f'''epoch {epoch} + Train set - loss: {round_tensor(train_loss)}, accuracy: {round_tensor(train_acc)} + ''') + optimizer.zero_grad() + train_loss.backward() + optimizer.step() + return net, round_tensor(train_loss) + + +# In[ ]: + + +@ex.automain +def my_main(epochs, _run): + X_train, y_train = prepare_data() + device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") + + model, loss = train_model(X_train, y_train, device, epochs) + torch.save(model, 'model.pth') + ex.add_artifact('model.pth') + + _run.info["epochs"] = epochs + _run.info["loss"] = loss +