Added customizable model params
All checks were successful
s444409-training/pipeline/head This commit looks good

This commit is contained in:
Marcin Kostrzewski 2022-05-05 22:11:32 +02:00
parent 7abe9c8d08
commit 46d7831b98
2 changed files with 35 additions and 3 deletions

View File

@ -1,4 +1,19 @@
pipeline {
parameters {
string(
defaultValue: '64',
description: 'Batch size used in ADAM',
name: 'BATCHSIZE',
trim: true
)
string(
defaultValue: '5',
description: 'Number of iterations',
name: 'EPOCHS',
trim: true
)
}
agent {
docker {
image 's444409-create-dataset'
@ -8,7 +23,7 @@ pipeline {
stages {
stage('Train model') {
steps {
sh "python train_model.py"
sh "python train_model.py -e ${params.EPOCHS} -b ${params.BATCHSIZE}"
}
}
}

View File

@ -1,10 +1,17 @@
from ast import arg
from sqlite3 import paramstyle
import numpy as np
import pandas as pd
import torch
import argparse
from torch import nn
from torch.utils.data import DataLoader, Dataset
default_batch_size = 64
default_epochs = 5
def hour_to_int(text: str):
return float(text.replace(':', ''))
@ -82,10 +89,20 @@ def test(dataloader, model, loss_fn):
print(f"Avg loss: {test_loss:>8f} \n")
def setup_args():
args_parser = argparse.ArgumentParser(prefix_chars='-')
args_parser.add_argument('-b', '--batchSize', type=int, default=default_batch_size)
args_parser.add_argument('-e', '--epochs', type=int, default=default_epochs)
return args_parser.parse_args()
device = "cuda" if torch.cuda.is_available() else "cpu"
print(f"Using {device} device")
batch_size = 64
args = setup_args()
batch_size = args.batchSize
plant_test = PlantsDataset('data/Plant_1_Generation_Data.csv.test')
plant_train = PlantsDataset('data/Plant_1_Generation_Data.csv.train')
@ -103,7 +120,7 @@ print(model)
loss_fn = nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)
epochs = 5
epochs = args.epochs
for t in range(epochs):
print(f"Epoch {t + 1}\n-------------------------------")
train(train_dataloader, model, loss_fn, optimizer)