Added sacred.
Some checks failed
s426206-training/pipeline/head There was a failure building this commit
Some checks failed
s426206-training/pipeline/head There was a failure building this commit
This commit is contained in:
parent
6a76b0713f
commit
06894754f8
1
.gitignore
vendored
1
.gitignore
vendored
@ -4,3 +4,4 @@ venv
|
|||||||
metrics.tsv
|
metrics.tsv
|
||||||
*.pt
|
*.pt
|
||||||
plot.png
|
plot.png
|
||||||
|
my_runs
|
||||||
|
@ -12,6 +12,7 @@ RUN chmod -R 777 /.kaggle
|
|||||||
COPY ./requirments.txt ./
|
COPY ./requirments.txt ./
|
||||||
RUN pip3 install -r requirments.txt
|
RUN pip3 install -r requirments.txt
|
||||||
RUN pip3 install torch==1.8.1+cpu torchvision==0.9.1+cpu torchaudio==0.8.1 -f https://download.pytorch.org/whl/torch_stable.html
|
RUN pip3 install torch==1.8.1+cpu torchvision==0.9.1+cpu torchaudio==0.8.1 -f https://download.pytorch.org/whl/torch_stable.html
|
||||||
|
RUN pip3 install sacred
|
||||||
|
|
||||||
# Stwórzmy w kontenerze (jeśli nie istnieje) katalog /app i przejdźmy do niego (wszystkie kolejne polecenia RUN, CMD, ENTRYPOINT, COPY i ADD będą w nim wykonywane)
|
# Stwórzmy w kontenerze (jeśli nie istnieje) katalog /app i przejdźmy do niego (wszystkie kolejne polecenia RUN, CMD, ENTRYPOINT, COPY i ADD będą w nim wykonywane)
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
|
187
dlgssdpytorch.py
187
dlgssdpytorch.py
@ -5,21 +5,12 @@ import torch.nn as nn
|
|||||||
import torch.optim as optim
|
import torch.optim as optim
|
||||||
from torch.utils.data import Dataset, TensorDataset, DataLoader
|
from torch.utils.data import Dataset, TensorDataset, DataLoader
|
||||||
import argparse
|
import argparse
|
||||||
|
from sacred import Experiment
|
||||||
|
from sacred.observers import MongoObserver, FileStorageObserver
|
||||||
|
|
||||||
parser = argparse.ArgumentParser(description='Program do uczenia modelu')
|
ex = Experiment("426206", interactive=False, save_git_info=False)
|
||||||
parser.add_argument('-l', '--lr', type=float, default=1e-3, help="Współczynik uczenia (lr)", required=False)
|
ex.observers.append(FileStorageObserver('my_runs'))
|
||||||
parser.add_argument('-e', '--epochs', type=int, default=100, help="Liczba epok", required=False)
|
ex.observers.append(MongoObserver(url='mongodb://mongo_user:mongo_password_IUM_2021@172.17.0.1:27017', db_name='sacred'))
|
||||||
args = parser.parse_args()
|
|
||||||
|
|
||||||
lr = args.lr
|
|
||||||
n_epochs = args.epochs
|
|
||||||
|
|
||||||
train_dataset = torch.load('train_dataset.pt')
|
|
||||||
#val_dataset = torch.load('val_dataset.pt')
|
|
||||||
|
|
||||||
train_loader = DataLoader(dataset=train_dataset)
|
|
||||||
#val_loader = DataLoader(dataset=val_dataset)
|
|
||||||
|
|
||||||
class LayerLinearRegression(nn.Module):
|
class LayerLinearRegression(nn.Module):
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
@ -30,80 +21,110 @@ class LayerLinearRegression(nn.Module):
|
|||||||
# Now it only takes a call to the layer to make predictions
|
# Now it only takes a call to the layer to make predictions
|
||||||
return self.linear(x)
|
return self.linear(x)
|
||||||
|
|
||||||
model = LayerLinearRegression()
|
# parser = argparse.ArgumentParser(description='Program do uczenia modelu')
|
||||||
# Checks model's parameters
|
# parser.add_argument('-l', '--lr', type=float, default=1e-3, help="Współczynik uczenia (lr)", required=False)
|
||||||
#print(model.state_dict())
|
# parser.add_argument('-e', '--epochs', type=int, default=100, help="Liczba epok", required=False)
|
||||||
|
# args = parser.parse_args()
|
||||||
|
#python3 dlgssdpytorch.py with lr=0.01 n_epochs=10
|
||||||
|
|
||||||
loss_fn = nn.MSELoss(reduction='mean')
|
@ex.config
|
||||||
optimizer = optim.SGD(model.parameters(), lr=lr)
|
def my_config():
|
||||||
|
lr = 1e-3
|
||||||
|
n_epochs = 100
|
||||||
|
|
||||||
def make_train_step(model, loss_fn, optimizer):
|
@ex.capture
|
||||||
# Builds function that performs a step in the train loop
|
def train(lr, n_epochs, _run):
|
||||||
def train_step(x, y):
|
train_dataset = torch.load('train_dataset.pt')
|
||||||
# Sets model to TRAIN mode
|
#val_dataset = torch.load('val_dataset.pt')
|
||||||
model.train()
|
|
||||||
# Makes predictions
|
|
||||||
yhat = model(x)
|
|
||||||
# Computes loss
|
|
||||||
loss = loss_fn(y, yhat)
|
|
||||||
# Computes gradients
|
|
||||||
loss.backward()
|
|
||||||
# Updates parameters and zeroes gradients
|
|
||||||
optimizer.step()
|
|
||||||
optimizer.zero_grad()
|
|
||||||
# Returns the loss
|
|
||||||
return loss.item()
|
|
||||||
|
|
||||||
# Returns the function that will be called inside the train loop
|
train_loader = DataLoader(dataset=train_dataset)
|
||||||
return train_step
|
#val_loader = DataLoader(dataset=val_dataset)
|
||||||
|
|
||||||
# Creates the train_step function for our model, loss function and optimizer
|
model = LayerLinearRegression()
|
||||||
train_step = make_train_step(model, loss_fn, optimizer)
|
# Checks model's parameters
|
||||||
training_losses = []
|
#print(model.state_dict())
|
||||||
validation_losses = []
|
|
||||||
#print(model.state_dict())
|
|
||||||
# For each epoch...
|
|
||||||
for epoch in range(n_epochs):
|
|
||||||
losses = []
|
|
||||||
# Uses loader to fetch one mini-batch for training
|
|
||||||
for x_batch, y_batch in train_loader:
|
|
||||||
# NOW, sends the mini-batch data to the device
|
|
||||||
# so it matches location of the MODEL
|
|
||||||
# x_batch = x_batch.to(device)
|
|
||||||
# y_batch = y_batch.to(device)
|
|
||||||
# One stpe of training
|
|
||||||
loss = train_step(x_batch, y_batch)
|
|
||||||
losses.append(loss)
|
|
||||||
training_loss = np.mean(losses)
|
|
||||||
training_losses.append(training_loss)
|
|
||||||
|
|
||||||
# After finishing training steps for all mini-batches,
|
loss_fn = nn.MSELoss(reduction='mean')
|
||||||
# it is time for evaluation!
|
optimizer = optim.SGD(model.parameters(), lr=lr)
|
||||||
# Ewaluacja jest już tutaj nie potrzebna bo odbywa sie w evaluation.py. Można jednak włączyć podgląd ewaluacji dla poszczególnych epok.
|
|
||||||
# # We tell PyTorch to NOT use autograd...
|
|
||||||
# # Do you remember why?
|
|
||||||
# with torch.no_grad():
|
|
||||||
# val_losses = []
|
|
||||||
# # Uses loader to fetch one mini-batch for validation
|
|
||||||
# for x_val, y_val in val_loader:
|
|
||||||
# # Again, sends data to same device as model
|
|
||||||
# # x_val = x_val.to(device)
|
|
||||||
# # y_val = y_val.to(device)
|
|
||||||
|
|
||||||
# model.eval()
|
def make_train_step(model, loss_fn, optimizer):
|
||||||
# # Makes predictions
|
# Builds function that performs a step in the train loop
|
||||||
# yhat = model(x_val)
|
def train_step(x, y):
|
||||||
# # Computes validation loss
|
# Sets model to TRAIN mode
|
||||||
# val_loss = loss_fn(y_val, yhat)
|
model.train()
|
||||||
# val_losses.append(val_loss.item())
|
# Makes predictions
|
||||||
# validation_loss = np.mean(val_losses)
|
yhat = model(x)
|
||||||
# validation_losses.append(validation_loss)
|
# Computes loss
|
||||||
|
loss = loss_fn(y, yhat)
|
||||||
|
# Computes gradients
|
||||||
|
loss.backward()
|
||||||
|
# Updates parameters and zeroes gradients
|
||||||
|
optimizer.step()
|
||||||
|
optimizer.zero_grad()
|
||||||
|
# Returns the loss
|
||||||
|
return loss.item()
|
||||||
|
|
||||||
# print(f"[{epoch+1}] Training loss: {training_loss:.3f}\t Validation loss: {validation_loss:.3f}")
|
# Returns the function that will be called inside the train loop
|
||||||
print(f"[{epoch+1}] Training loss: {training_loss:.3f}\t")
|
return train_step
|
||||||
|
|
||||||
torch.save({
|
# Creates the train_step function for our model, loss function and optimizer
|
||||||
'model_state_dict': model.state_dict(),
|
train_step = make_train_step(model, loss_fn, optimizer)
|
||||||
'optimizer_state_dict': optimizer.state_dict(),
|
training_losses = []
|
||||||
'loss': lr,
|
validation_losses = []
|
||||||
}, 'model.pt')
|
#print(model.state_dict())
|
||||||
|
# For each epoch...
|
||||||
|
for epoch in range(n_epochs):
|
||||||
|
|
||||||
|
_run.log_scalar("Epoch", str(epoch))
|
||||||
|
losses = []
|
||||||
|
# Uses loader to fetch one mini-batch for training
|
||||||
|
for x_batch, y_batch in train_loader:
|
||||||
|
# NOW, sends the mini-batch data to the device
|
||||||
|
# so it matches location of the MODEL
|
||||||
|
# x_batch = x_batch.to(device)
|
||||||
|
# y_batch = y_batch.to(device)
|
||||||
|
# One stpe of training
|
||||||
|
loss = train_step(x_batch, y_batch)
|
||||||
|
losses.append(loss)
|
||||||
|
training_loss = np.mean(losses)
|
||||||
|
training_losses.append(training_loss)
|
||||||
|
|
||||||
|
_run.log_scalar("MSE", str(training_loss))
|
||||||
|
|
||||||
|
# After finishing training steps for all mini-batches,
|
||||||
|
# it is time for evaluation!
|
||||||
|
# Ewaluacja jest już tutaj nie potrzebna bo odbywa sie w evaluation.py. Można jednak włączyć podgląd ewaluacji dla poszczególnych epok.
|
||||||
|
# # We tell PyTorch to NOT use autograd...
|
||||||
|
# # Do you remember why?
|
||||||
|
# with torch.no_grad():
|
||||||
|
# val_losses = []
|
||||||
|
# # Uses loader to fetch one mini-batch for validation
|
||||||
|
# for x_val, y_val in val_loader:
|
||||||
|
# # Again, sends data to same device as model
|
||||||
|
# # x_val = x_val.to(device)
|
||||||
|
# # y_val = y_val.to(device)
|
||||||
|
|
||||||
|
# model.eval()
|
||||||
|
# # Makes predictions
|
||||||
|
# yhat = model(x_val)
|
||||||
|
# # Computes validation loss
|
||||||
|
# val_loss = loss_fn(y_val, yhat)
|
||||||
|
# val_losses.append(val_loss.item())
|
||||||
|
# validation_loss = np.mean(val_losses)
|
||||||
|
# validation_losses.append(validation_loss)
|
||||||
|
|
||||||
|
# print(f"[{epoch+1}] Training loss: {training_loss:.3f}\t Validation loss: {validation_loss:.3f}")
|
||||||
|
print(f"[{epoch+1}] Training loss: {training_loss:.3f}\t")
|
||||||
|
|
||||||
|
torch.save({
|
||||||
|
'model_state_dict': model.state_dict(),
|
||||||
|
'optimizer_state_dict': optimizer.state_dict(),
|
||||||
|
'loss': lr,
|
||||||
|
}, 'model.pt')
|
||||||
|
|
||||||
|
@ex.automain
|
||||||
|
def my_main(lr, n_epochs, _run):
|
||||||
|
train(lr, n_epochs, _run)
|
||||||
|
|
||||||
|
ex.run()
|
||||||
|
ex.add_artifact('model.pt')
|
||||||
|
Loading…
Reference in New Issue
Block a user