Compare commits

..

No commits in common. "b06db2db4b04110d4b46be6505586aa78a76110d" and "1a16fb2768d10d4421cd8c6735d6821d5a4ed2c8" have entirely different histories.

5 changed files with 98 additions and 4 deletions

View File

@ -1,8 +1,6 @@
FROM ubuntu:latest
RUN apt-get update && apt-get install -y python3-pip unzip coreutils
RUN pip install --user kaggle pandas scikit-learn tensorflow
RUN apt-get update && apt-get install -y python3-pip unzip coreutils python3-venv
WORKDIR /app
@ -11,4 +9,11 @@ COPY ./OrangeQualityData.csv ./
COPY ./orange_quality_model_tf.h5 ./
COPY ./predictions_tf.json ./
RUN python3 -m venv venv
RUN /bin/bash -c "source venv/bin/activate"
RUN pip install tensorflow
RUN apt-get install -y python3-sklearn
CMD ["python3", "data_processing.sh"]

85
Jenkingfile-multi Normal file
View File

@ -0,0 +1,85 @@
pipeline {
agent {
dockerfile {
filename 'Dockerfile'
label 'docker'
}
}
triggers {
upstream(upstreamProjects: 's123456-create-dataset', threshold: hudson.model.Result.SUCCESS)
}
parameters {
string(
defaultValue: '--epochs 100 --batch_size 32 --learning_rate 0.01',
description: 'Parametry trenowania',
name: 'TRAINING_PARAMS'
)
}
environment {
DATASET_PROJECT = 's123456-create-dataset'
DATA_DIR = 'data'
}
stages {
stage('Clone repository') {
steps {
checkout scm
}
}
stage('Copy Dataset') {
steps {
script {
copyArtifacts(
projectName: "${env.DATASET_PROJECT}",
selector: lastSuccessful(),
target: "${env.DATA_DIR}"
)
}
}
}
stage('Set execute permission') {
steps {
script {
sh 'chmod +x data_processing.sh'
}
}
}
stage('Run shell script') {
steps {
script {
sh './data_processing.sh'
}
}
post {
success {
archiveArtifacts artifacts: 'results.txt', allowEmptyArchive: true
}
}
}
stage('Train Model') {
steps {
sh "python3 model.py ${params.TRAINING_PARAMS}"
}
}
stage('Archive Model') {
steps {
archiveArtifacts artifacts: 'orange_quality_model_tf.h5', allowEmptyArchive: true
archiveArtifacts artifacts: 'predictions_tf.json', allowEmptyArchive: true
}
}
}
post {
always {
cleanWs()
}
}
}

2
Jenkinsfile vendored
View File

@ -6,7 +6,7 @@ pipeline {
parameters {
string(
defaultValue: '10000',
name: 'CUTOFF',
name: 'CUTOFF',
description: 'Liczba wierszy do obcięcia ze zbioru danych')
}

BIN
environment.yml Normal file

Binary file not shown.

4
requirements.txt Normal file
View File

@ -0,0 +1,4 @@
pandas
scikit-learn
tensorflow
kaggle