Compare commits

...

10 Commits
main ... model

Author SHA1 Message Date
Alicja Szulecka 36b1428694 Update Jenkinsfile 2024-05-04 15:55:49 +02:00
Alicja Szulecka 6a0b357945 Update model.py 2024-04-29 21:47:03 +02:00
Alicja Szulecka b45d036d42 Update Jenkinsfile 2024-04-29 21:45:09 +02:00
Alicja Szulecka 45beb68c25 Update Jenkinsfile 2024-04-29 21:43:25 +02:00
Alicja Szulecka 03f4d0b47a Update model.py 2024-04-29 21:27:45 +02:00
Alicja Szulecka ca24c39ada Update Jenkinsfile 2024-04-29 21:22:42 +02:00
Alicja Szulecka f883cd5e17 add parameter 2024-04-29 21:21:21 +02:00
Alicja Szulecka ac93029123 Update Jenkinsfile 2024-04-29 21:09:13 +02:00
Alicja Szulecka 5ff6e66c4f Update Jenkinsfile 2024-04-29 21:08:45 +02:00
Alicja Szulecka 66d15ac8f4 Update Jenkinsfile 2024-04-29 21:02:47 +02:00
2 changed files with 20 additions and 32 deletions

47
Jenkinsfile vendored
View File

@ -1,48 +1,33 @@
pipeline {
agent any
parameters {
string(name: 'KAGGLE_USERNAME', defaultValue: 'alicjaszulecka', description: 'Kaggle username')
password(name: 'KAGGLE_KEY', defaultValue:'', description: 'Kaggle Key')
string(name: 'CUTOFF', defaultValue: '100', description: 'cut off number')
triggers {
upstream(upstreamProjects: 'z-s464914-create-dataset', threshold: hudson.model.Result.SUCCESS)
}
parameters {
buildSelector (
defaultSelector: lastSuccessful(),
description: 'Build for copying artifacts',
name: 'BUILD_SELECTOR'
)
string(name: 'EPOCHS', defaultValue: '10', description: 'epochs')
}
stages {
stage('Git Checkout') {
steps {
checkout scm
}
}
stage('Download dataset') {
steps {
withEnv(["KAGGLE_USERNAME=${params.KAGGLE_USERNAME}", "KAGGLE_KEY=${params.KAGGLE_KEY}"]) {
sh 'pip install kaggle'
sh 'kaggle datasets download -d uciml/forest-cover-type-dataset'
sh 'unzip -o forest-cover-type-dataset.zip'
sh 'rm forest-cover-type-dataset.zip'
stage('Copy Artifacts') {
steps {
copyArtifacts fingerprintArtifacts: true, projectName: 'z-s464914-create-dataset', selector: buildParameter('BUILD_SELECTOR')
}
}
}
}
stage('Build') {
steps {
script {
withEnv(["KAGGLE_USERNAME=${params.KAGGLE_USERNAME}",
"KAGGLE_KEY=${params.KAGGLE_KEY}" ]) {
def customImage = docker.build("custom-image")
customImage.inside {
sh 'python3 ./IUM_2.py'
archiveArtifacts artifacts: 'covtype.csv, forest_train.csv, forest_test.csv, forest_val.csv', onlyIfSuccessful: true
}
}
}
}
}
stage('Train and Predict') {
stage('Train') {
steps {
script {
def customImage = docker.build("custom-image")
customImage.inside {
sh 'python3 ./model.py'
sh 'python3 ./prediction.py'
sh 'python3 ./model.py ' + params.EPOCHS
archiveArtifacts artifacts: 'model.pth, predictions.txt', onlyIfSuccessful: true
}
}

View File

@ -6,6 +6,7 @@ import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
import torch.nn.functional as F
import sys
device = (
@ -30,6 +31,9 @@ class Model(nn.Module):
return x
def main():
epochs = int(sys.argv[1])
print(epochs)
forest_train = pd.read_csv('forest_train.csv')
forest_val = pd.read_csv('forest_val.csv')
@ -59,7 +63,6 @@ def main():
val_loader = DataLoader(list(zip(X_val, y_val)), batch_size=64)
# Training loop
epochs = 10
for epoch in range(epochs):
model.train() # Set model to training mode
running_loss = 0.0