From 1060ec99768bc38ac2ec68ea263682ca90db5edf Mon Sep 17 00:00:00 2001 From: Alagris Date: Sun, 23 May 2021 19:40:20 +0200 Subject: [PATCH] fix --- train-model.Jenkinsfile | 1 + train_model.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/train-model.Jenkinsfile b/train-model.Jenkinsfile index 4507346..b51c215 100644 --- a/train-model.Jenkinsfile +++ b/train-model.Jenkinsfile @@ -10,6 +10,7 @@ pipeline { stages { stage('Build') { steps { + cleanWs() git 'https://git.wmi.amu.edu.pl/s434749/ium_434749.git' copyArtifacts fingerprintArtifacts: true, projectName: 's434749-create-dataset', selector: lastSuccessful() sh "python3 train_model.py with 'batch_size=${params.batch_size}' 'learning_rate=${params.learning_rate}' 'epochs=${params.epochs}'" diff --git a/train_model.py b/train_model.py index 7e1097a..76ac7ed 100644 --- a/train_model.py +++ b/train_model.py @@ -77,7 +77,7 @@ def encode(batch: [(torch.tensor, str)], in_alphabet, max_len): def encode_str(batch: [(str, str)], in_alphabet, max_len): batch = [(torch.tensor([in_alphabet[letter] for letter in in_str], dtype=torch.int), out_str) for in_str, out_str in batch] - return encode(batch) + return encode(batch, in_alphabet, max_len) def train_model(model, learning_rate, in_alphabet, max_len, data, epochs, batch_size):