Compare commits

...

7 Commits

Author SHA1 Message Date
29602e30a6 Update Dockerfile 2024-06-01 17:53:51 +02:00
3d80335ede IUM_06 2024-05-04 11:48:12 +02:00
f6c7f5981e IUM_06 2024-05-04 11:41:45 +02:00
c0b07aaac4 IUM_06 2024-05-04 11:31:39 +02:00
979785f5b7 IUM_06 2024-05-04 11:30:47 +02:00
795b91c695 IUM_06 2024-05-04 11:26:17 +02:00
91508718a0 IUM_06 2024-05-04 10:40:09 +02:00
3 changed files with 27 additions and 32 deletions

View File

@ -2,4 +2,4 @@ FROM ubuntu:latest
RUN apt update && apt install -y python3-pip RUN apt update && apt install -y python3-pip
RUN pip install pandas numpy scikit-learn tensorflow RUN pip install pandas numpy scikit-learn tensorflow --break-system-packages

49
Jenkinsfile vendored
View File

@ -1,54 +1,45 @@
pipeline { pipeline {
agent any agent {
dockerfile true
}
triggers {
upstream(upstreamProjects: 'z-s464913-create-dataset', threshold: hudson.model.Result.SUCCESS)
}
parameters { parameters {
string ( buildSelector(
defaultValue: 'vskyper', defaultSelector: lastSuccessful(),
description: 'Kaggle username', description: 'Which build to use for copying artifacts',
name: 'KAGGLE_USERNAME', name: 'BUILD_SELECTOR'
trim: false
)
password (
defaultValue: '',
description: 'Kaggle API key',
name: 'KAGGLE_KEY',
) )
string(name: 'LEARNING_RATE', defaultValue: '0.001', description: 'Learning rate')
string(name: 'EPOCHS', defaultValue: '5', description: 'Number of epochs')
} }
stages { stages {
stage('Clone Repository') { stage('Clone Repository') {
steps { steps {
git branch: 'main', url: 'https://git.wmi.amu.edu.pl/s464913/ium_464913.git' git branch: 'training', url: 'https://git.wmi.amu.edu.pl/s464913/ium_464913.git'
} }
} }
stage('Download dataset') { stage('Copy Artifacts') {
steps { steps {
withEnv(["KAGGLE_USERNAME=${params.KAGGLE_USERNAME}", "KAGGLE_KEY=${params.KAGGLE_KEY}"]) { copyArtifacts filter: 'data/*', projectName: 'z-s464913-create-dataset', selector: buildParameter('BUILD_SELECTOR')
sh 'pip install kaggle'
sh 'kaggle datasets download -d mlg-ulb/creditcardfraud'
sh 'unzip -o creditcardfraud.zip'
sh 'rm creditcardfraud.zip'
}
} }
} }
stage('Run create-dataset script') { stage('Run train_model script') {
agent {
dockerfile {
reuseNode true
}
}
steps { steps {
sh 'chmod +x create-dataset.py' sh 'chmod +x train_model.py'
sh 'python3 ./create-dataset.py' sh "python3 ./train_model.py ${params.LEARNING_RATE} ${params.EPOCHS}"
} }
} }
stage('Archive Artifacts') { stage('Archive Artifacts') {
steps { steps {
archiveArtifacts artifacts: 'data/*', onlyIfSuccessful: true archiveArtifacts artifacts: 'model/*', onlyIfSuccessful: true
} }
} }
} }

View File

@ -6,6 +6,7 @@ from keras.models import Sequential
from keras.layers import BatchNormalization, Dropout, Dense, Flatten, Conv1D from keras.layers import BatchNormalization, Dropout, Dense, Flatten, Conv1D
from keras.optimizers import Adam from keras.optimizers import Adam
import pandas as pd import pandas as pd
import sys
def main(): def main():
@ -22,6 +23,9 @@ def main():
X_train = X_train.reshape(X_train.shape[0], X_train.shape[1], 1) X_train = X_train.reshape(X_train.shape[0], X_train.shape[1], 1)
X_val = X_val.reshape(X_val.shape[0], X_val.shape[1], 1) X_val = X_val.reshape(X_val.shape[0], X_val.shape[1], 1)
learning_rate = float(sys.argv[1])
epochs = int(sys.argv[2])
model = Sequential( model = Sequential(
[ [
Conv1D(32, 2, activation="relu", input_shape=X_train[0].shape), Conv1D(32, 2, activation="relu", input_shape=X_train[0].shape),
@ -38,7 +42,7 @@ def main():
) )
model.compile( model.compile(
optimizer=Adam(learning_rate=1e-3), optimizer=Adam(learning_rate=learning_rate),
loss="binary_crossentropy", loss="binary_crossentropy",
metrics=["accuracy"], metrics=["accuracy"],
) )
@ -47,7 +51,7 @@ def main():
X_train, X_train,
y_train, y_train,
validation_data=(X_val, y_val), validation_data=(X_val, y_val),
epochs=5, epochs=epochs,
verbose=1, verbose=1,
) )