Compare commits

...

10 Commits
main ... model

Author SHA1 Message Date
Alicja Szulecka
36b1428694 Update Jenkinsfile 2024-05-04 15:55:49 +02:00
Alicja Szulecka
6a0b357945 Update model.py 2024-04-29 21:47:03 +02:00
Alicja Szulecka
b45d036d42 Update Jenkinsfile 2024-04-29 21:45:09 +02:00
Alicja Szulecka
45beb68c25 Update Jenkinsfile 2024-04-29 21:43:25 +02:00
Alicja Szulecka
03f4d0b47a Update model.py 2024-04-29 21:27:45 +02:00
Alicja Szulecka
ca24c39ada Update Jenkinsfile 2024-04-29 21:22:42 +02:00
Alicja Szulecka
f883cd5e17 add parameter 2024-04-29 21:21:21 +02:00
Alicja Szulecka
ac93029123 Update Jenkinsfile 2024-04-29 21:09:13 +02:00
Alicja Szulecka
5ff6e66c4f Update Jenkinsfile 2024-04-29 21:08:45 +02:00
Alicja Szulecka
66d15ac8f4 Update Jenkinsfile 2024-04-29 21:02:47 +02:00
2 changed files with 20 additions and 32 deletions

41
Jenkinsfile vendored
View File

@ -1,9 +1,15 @@
pipeline { pipeline {
agent any agent any
triggers {
upstream(upstreamProjects: 'z-s464914-create-dataset', threshold: hudson.model.Result.SUCCESS)
}
parameters { parameters {
string(name: 'KAGGLE_USERNAME', defaultValue: 'alicjaszulecka', description: 'Kaggle username') buildSelector (
password(name: 'KAGGLE_KEY', defaultValue:'', description: 'Kaggle Key') defaultSelector: lastSuccessful(),
string(name: 'CUTOFF', defaultValue: '100', description: 'cut off number') description: 'Build for copying artifacts',
name: 'BUILD_SELECTOR'
)
string(name: 'EPOCHS', defaultValue: '10', description: 'epochs')
} }
stages { stages {
stage('Git Checkout') { stage('Git Checkout') {
@ -11,38 +17,17 @@ pipeline {
checkout scm checkout scm
} }
} }
stage('Download dataset') { stage('Copy Artifacts') {
steps { steps {
withEnv(["KAGGLE_USERNAME=${params.KAGGLE_USERNAME}", "KAGGLE_KEY=${params.KAGGLE_KEY}"]) { copyArtifacts fingerprintArtifacts: true, projectName: 'z-s464914-create-dataset', selector: buildParameter('BUILD_SELECTOR')
sh 'pip install kaggle'
sh 'kaggle datasets download -d uciml/forest-cover-type-dataset'
sh 'unzip -o forest-cover-type-dataset.zip'
sh 'rm forest-cover-type-dataset.zip'
} }
} }
} stage('Train') {
stage('Build') {
steps {
script {
withEnv(["KAGGLE_USERNAME=${params.KAGGLE_USERNAME}",
"KAGGLE_KEY=${params.KAGGLE_KEY}" ]) {
def customImage = docker.build("custom-image")
customImage.inside {
sh 'python3 ./IUM_2.py'
archiveArtifacts artifacts: 'covtype.csv, forest_train.csv, forest_test.csv, forest_val.csv', onlyIfSuccessful: true
}
}
}
}
}
stage('Train and Predict') {
steps { steps {
script { script {
def customImage = docker.build("custom-image") def customImage = docker.build("custom-image")
customImage.inside { customImage.inside {
sh 'python3 ./model.py' sh 'python3 ./model.py ' + params.EPOCHS
sh 'python3 ./prediction.py'
archiveArtifacts artifacts: 'model.pth, predictions.txt', onlyIfSuccessful: true archiveArtifacts artifacts: 'model.pth, predictions.txt', onlyIfSuccessful: true
} }
} }

View File

@ -6,6 +6,7 @@ import pandas as pd
from sklearn.model_selection import train_test_split from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import LabelEncoder
import torch.nn.functional as F import torch.nn.functional as F
import sys
device = ( device = (
@ -30,6 +31,9 @@ class Model(nn.Module):
return x return x
def main(): def main():
epochs = int(sys.argv[1])
print(epochs)
forest_train = pd.read_csv('forest_train.csv') forest_train = pd.read_csv('forest_train.csv')
forest_val = pd.read_csv('forest_val.csv') forest_val = pd.read_csv('forest_val.csv')
@ -59,7 +63,6 @@ def main():
val_loader = DataLoader(list(zip(X_val, y_val)), batch_size=64) val_loader = DataLoader(list(zip(X_val, y_val)), batch_size=64)
# Training loop # Training loop
epochs = 10
for epoch in range(epochs): for epoch in range(epochs):
model.train() # Set model to training mode model.train() # Set model to training mode
running_loss = 0.0 running_loss = 0.0