From ae48fc205053e77035b0b3637135979c5bf76693 Mon Sep 17 00:00:00 2001 From: zgolebiewska Date: Sun, 26 May 2024 13:45:44 +0200 Subject: [PATCH] update --- Dockerfile | 15 +++++---------- Jenkinsfile | 47 +---------------------------------------------- 2 files changed, 6 insertions(+), 56 deletions(-) diff --git a/Dockerfile b/Dockerfile index c9fb8ce..876ae88 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,19 +1,14 @@ FROM ubuntu:latest -RUN apt-get update && apt-get install -y \ - python3-pip \ - unzip \ - coreutils +RUN apt-get update && apt-get install -y python3-pip unzip coreutils -RUN pip3 install kaggle pandas scikit-learn tensorflow +RUN pip install --user kaggle pandas scikit-learn tensorflow WORKDIR /app COPY ./data_processing.sh ./ -COPY ./model.py ./ COPY ./OrangeQualityData.csv ./ -COPY ./requirements.txt ./ +COPY ./orange_quality_model_tf.h5 ./ +COPY ./predictions_tf.json ./ -RUN pip3 install -r requirements.txt - -CMD ["python3", "model.py"] +CMD ["python3", "data_processing.sh"] diff --git a/Jenkinsfile b/Jenkinsfile index 39547db..715d747 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -6,18 +6,8 @@ pipeline { parameters { string( defaultValue: '10000', - name: 'CUTOFF', + name: 'CUTOFF', description: 'Liczba wierszy do obcięcia ze zbioru danych') - string( - defaultValue: '--epochs 100 --batch_size 32 --learning_rate 0.01', - name: 'TRAINING_PARAMS', - description: 'Parametry trenowania' - ) - } - - environment { - DATASET_PROJECT = 's123456-create-dataset' - DATA_DIR = 'data' } stages { @@ -26,17 +16,6 @@ pipeline { checkout([$class: 'GitSCM', branches: [[name: '*/master']], userRemoteConfigs: [[url: 'https://git.wmi.amu.edu.pl/s464906/ium_464906']]]) } } - stage('Copy Dataset') { - steps { - script { - copyArtifacts( - projectName: "${DATASET_PROJECT}", - selector: lastSuccessful(), - target: "${env.DATA_DIR}" - ) - } - } - } stage('Set execute permission') { steps { script { @@ -56,29 +35,5 @@ pipeline { } } } - - stage('Install Dependencies') { - steps { - sh 'pip install -r requirements.txt' - } - } - - stage('Train Model') { - steps { - sh "python model.py ${params.TRAINING_PARAMS}" - } - } - - stage('Archive Model') { - steps { - archiveArtifacts artifacts: 'orange_quality_model_tf.h5', allowEmptyArchive: true - archiveArtifacts artifacts: 'predictions_tf.json', allowEmptyArchive: true - } - } - } - post { - always { - cleanWs() - } } }