06-training

This commit is contained in:
Szymon Bartanowicz 2024-05-14 19:21:17 +02:00
parent 9dca2d4283
commit af6518f064
2 changed files with 23 additions and 20 deletions

43
Jenkinsfile vendored
View File

@ -1,36 +1,37 @@
pipeline { pipeline {
agent any agent any
triggers {
upstream(upstreamProjects: 'z-s464937-create-dataset', threshold: hudson.model.Result.SUCCESS)
}
parameters { parameters {
string(name: 'CUTOFF', defaultValue: '100', description: 'Ilość wierszy do odcięcia') string(name: 'EPOCHS', defaultValue: '10', description: 'Epochs')
string(name: 'KAGGLE_USERNAME', defaultValue: '', description: 'Kaggle username') buildSelector(defaultSelector: lastSuccessful(), description: 'Build no', name: 'BUILD_SELECTOR')
password(name: 'KAGGLE_KEY', defaultValue: '', description: 'Kaggle API key')
} }
stages { stages {
stage('Clone repo') { stage('Clone Repository') {
steps { steps {
git branch: "main", url: "https://git.wmi.amu.edu.pl/s464937/ium_464937" git branch: 'training', url: "https://git.wmi.amu.edu.pl/s464937/ium_464937.git"
} }
} }
stage('Copy Artifacts') {
stage('Download and preprocess') { steps {
environment { copyArtifacts filter: 'openpowerlifting.csv', projectName: 'z-s464937-create-dataset', selector: buildParameter('BUILD_SELECTOR')
KAGGLE_USERNAME = "szymonbartanowicz" }
KAGGLE_KEY = "4692239eb65f20ec79f9a59ef30e67eb" }
stage("Run") {
agent {
dockerfile {
filename 'Dockerfile'
reuseNode true
}
} }
steps { steps {
withEnv([ sh "chmod +x ./model.py"
"KAGGLE_USERNAME=${env.KAGGLE_USERNAME}", sh "python3 ./model.py ${params.EPOCHS}"
"KAGGLE_KEY=${env.KAGGLE_KEY}" archiveArtifacts artifacts: 'powerlifting_model.h5', onlyIfSuccessful: true
]) {
sh "bash ./script1.sh ${params.CUTOFF}"
}
}
}
stage('Archive') {
steps {
archiveArtifacts artifacts: 'data/*', onlyIfSuccessful: true
} }
} }
} }

View File

@ -1,3 +1,5 @@
import sys
import pandas as pd import pandas as pd
from sklearn.model_selection import train_test_split from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler, OneHotEncoder from sklearn.preprocessing import StandardScaler, OneHotEncoder
@ -34,6 +36,6 @@ pipeline = Pipeline(steps=[
pipeline['model'].compile(optimizer='adam', loss='mse', metrics=['mae']) pipeline['model'].compile(optimizer='adam', loss='mse', metrics=['mae'])
pipeline.fit(X_train, y_train, model__epochs=10, model__validation_split=0.1) pipeline.fit(X_train, y_train, model__epochs=int(sys.argv[1]), model__validation_split=0.1)
pipeline['model'].save('powerlifting_model.h5') pipeline['model'].save('powerlifting_model.h5')