Update files (sacred)

This commit is contained in:
Agata 2022-05-06 09:53:37 +02:00
parent 4dd5c36a31
commit 8e88f6e6f0
3 changed files with 121 additions and 3 deletions

View File

@ -6,8 +6,10 @@ RUN pip3 install seaborn
RUN pip3 install ipython RUN pip3 install ipython
RUN pip3 install torch RUN pip3 install torch
RUN pip3 install numpy RUN pip3 install numpy
RUN pip3 install sacred
WORKDIR /app WORKDIR /app
COPY ./training.py ./ COPY ./training.py ./
COPY ./training_sacred.py ./
COPY ./evaluation.py ./ COPY ./evaluation.py ./

View File

@ -26,8 +26,12 @@ pipeline {
stage('Script') { stage('Script') {
steps { steps {
copyArtifacts filter: '*', projectName:'s444421-create-dataset', selector: buildParameter('BUILD_SELECTOR') copyArtifacts filter: '*', projectName:'s444421-create-dataset', selector: buildParameter('BUILD_SELECTOR')
sh 'ipython ./training.py $EPOCHS' sh 'ipython ./training_sacred.py with "epochs=$EPOCHS"'
archiveArtifacts artifacts: 'model.pth' sh 'cp my_runs/1/config.json config.json'
sh 'cp my_runs/1/model.pth model.pth'
sh 'cp my_runs/_sources/training* training_sacred.py'
sh 'cp my_runs/1/info.json info.json'
archiveArtifacts artifacts: 'config.json, model.pth, training_sacred.py, info.json'
} }
} }
} }
@ -45,5 +49,4 @@ pipeline {
changed { changed {
emailext body: 'CHANGED', subject: 's444421-training status', to: 'e19191c5.uam.onmicrosoft.com@emea.teams.ms' emailext body: 'CHANGED', subject: 's444421-training status', to: 'e19191c5.uam.onmicrosoft.com@emea.teams.ms'
} }
}
} }

113
training_sacred.py Executable file
View File

@ -0,0 +1,113 @@
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
import numpy as np
import pandas as pd
from sklearn.metrics import accuracy_score
import torch
from torch import nn, optim
import torch.nn.functional as F
import sys
from sacred import Experiment
from sacred.observers import FileStorageObserver
# In[ ]:
ex = Experiment("file_observer")
ex.observers.append(FileStorageObserver('my_runs'))
@ex.config
def my_config():
epochs = 400
# In[ ]:
def prepare_data():
X_train = pd.read_csv('X_train.csv')
y_train = pd.read_csv('y_train.csv')
X_train = torch.from_numpy(np.array(X_train)).float()
y_train = torch.squeeze(torch.from_numpy(y_train.values).float())
return X_train, y_train
# In[ ]:
class Net(nn.Module):
def __init__(self, n_features):
super(Net, self).__init__()
self.fc1 = nn.Linear(n_features, 5)
self.fc2 = nn.Linear(5, 3)
self.fc3 = nn.Linear(3, 1)
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
return torch.sigmoid(self.fc3(x))
# In[ ]:
def calculate_accuracy(y_true, y_pred):
predicted = y_pred.ge(.5).view(-1)
return (y_true == predicted).sum().float() / len(y_true)
# In[ ]:
def round_tensor(t, decimal_places=3):
return round(t.item(), decimal_places)
# In[ ]:
def train_model(X_train, y_train, device, epochs):
net = Net(X_train.shape[1])
criterion = nn.BCELoss()
optimizer = optim.Adam(net.parameters(), lr=0.001)
X_train = X_train.to(device)
y_train = y_train.to(device)
net = net.to(device)
criterion = criterion.to(device)
for epoch in range(epochs):
y_pred = net(X_train)
y_pred = torch.squeeze(y_pred)
train_loss = criterion(y_pred, y_train)
if epoch % 100 == 0:
train_acc = calculate_accuracy(y_train, y_pred)
print(
f'''epoch {epoch}
Train set - loss: {round_tensor(train_loss)}, accuracy: {round_tensor(train_acc)}
''')
optimizer.zero_grad()
train_loss.backward()
optimizer.step()
return net, round_tensor(train_loss)
# In[ ]:
@ex.automain
def my_main(epochs, _run):
X_train, y_train = prepare_data()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model, loss = train_model(X_train, y_train, device, epochs)
torch.save(model, 'model.pth')
ex.add_artifact('model.pth')
_run.info["epochs"] = epochs
_run.info["loss"] = loss