update jenkinsfile_train-sacred, dllib-mlflow.py
This commit is contained in:
parent
ac87a425b9
commit
531f469e4e
@ -1,4 +1,4 @@
|
|||||||
import numpy as np
|
67import numpy as np
|
||||||
import sys
|
import sys
|
||||||
import os
|
import os
|
||||||
import torch
|
import torch
|
||||||
@ -237,6 +237,7 @@ def remove_list(games):
|
|||||||
# 'user_review': games['user_review']}
|
# 'user_review': games['user_review']}
|
||||||
# features_g = pd.DataFrame(features_g, dtype=np.float64)
|
# features_g = pd.DataFrame(features_g, dtype=np.float64)
|
||||||
# features_g = features_g.to_numpy()
|
# features_g = features_g.to_numpy()
|
||||||
|
epochs = int(sys.argv[1]) if len(sys.argv) > 1 else 20
|
||||||
|
|
||||||
def my_main(epochs):
|
def my_main(epochs):
|
||||||
platform = pd.read_csv('all_games.train.csv', sep=',', usecols=[1], header=None).values.tolist()
|
platform = pd.read_csv('all_games.train.csv', sep=',', usecols=[1], header=None).values.tolist()
|
||||||
@ -290,41 +291,43 @@ def my_main(epochs):
|
|||||||
loss_fn = nn.CrossEntropyLoss()
|
loss_fn = nn.CrossEntropyLoss()
|
||||||
# epochs = 1000
|
# epochs = 1000
|
||||||
# epochs = epochs
|
# epochs = epochs
|
||||||
epochs = int(sys.argv[1]) if len(sys.argv) > 1 else 20
|
|
||||||
mlflow.log_param("epochs", epochs)
|
|
||||||
|
|
||||||
def print_(loss):
|
def print_(loss):
|
||||||
print ("The loss calculated: ", loss)
|
print ("The loss calculated: ", loss)
|
||||||
|
|
||||||
|
x_train, y_train = Variable(torch.from_numpy(features_train_g)).float(), Variable(
|
||||||
|
torch.from_numpy(labels_train_g)).long()
|
||||||
|
for epoch in range(1, epochs + 1):
|
||||||
|
print("Epoch #", epoch)
|
||||||
|
y_pred = model(x_train)
|
||||||
|
|
||||||
with mlflow.start_run() as run:
|
loss = loss_fn(y_pred, y_train.squeeze(-1))
|
||||||
x_train, y_train = Variable(torch.from_numpy(features_train_g)).float(), Variable(torch.from_numpy(labels_train_g)).long()
|
print_(loss.item())
|
||||||
for epoch in range(1, epochs + 1):
|
|
||||||
print("Epoch #", epoch)
|
|
||||||
y_pred = model(x_train)
|
|
||||||
|
|
||||||
loss = loss_fn(y_pred, y_train.squeeze(-1))
|
# Zero gradients
|
||||||
print_(loss.item())
|
optimizer.zero_grad()
|
||||||
|
loss.backward() # Gradients
|
||||||
|
optimizer.step() # Update
|
||||||
|
|
||||||
# Zero gradients
|
# Prediction
|
||||||
optimizer.zero_grad()
|
x_test = Variable(torch.from_numpy(features_test_g)).float()
|
||||||
loss.backward() # Gradients
|
pred = model(x_test)
|
||||||
optimizer.step() # Update
|
|
||||||
|
|
||||||
# Prediction
|
pred = pred.detach().numpy()
|
||||||
x_test = Variable(torch.from_numpy(features_test_g)).float()
|
|
||||||
pred = model(x_test)
|
|
||||||
|
|
||||||
pred = pred.detach().numpy()
|
print("The accuracy is", accuracy_score(labels_test_g, np.argmax(pred, axis=1)))
|
||||||
|
# mlflow.log_metric("accuracy", accuracy_score(labels_test_g, np.argmax(pred, axis=1)))
|
||||||
|
|
||||||
print("The accuracy is", accuracy_score(labels_test_g, np.argmax(pred, axis=1)))
|
pred = pd.DataFrame(pred)
|
||||||
mlflow.log_metric("accuracy", accuracy_score(labels_test_g, np.argmax(pred, axis=1)))
|
|
||||||
|
|
||||||
pred = pd.DataFrame(pred)
|
pred.to_csv('result.csv')
|
||||||
|
|
||||||
pred.to_csv('result.csv')
|
# save model
|
||||||
|
torch.save(model, "games_model.pkl")
|
||||||
# save model
|
return accuracy_score(labels_test_g, np.argmax(pred, axis=1))
|
||||||
torch.save(model, "games_model.pkl")
|
|
||||||
|
|
||||||
|
|
||||||
|
with mlflow.start_run() as run:
|
||||||
|
acc = my_main(epochs)
|
||||||
|
mlflow.log_param("epochs", epochs)
|
||||||
|
mlflow.log_metric("accuracy", acc)
|
@ -1,12 +1,12 @@
|
|||||||
pipeline {
|
pipeline {
|
||||||
agent {
|
agent {
|
||||||
dockerfile {
|
docker {
|
||||||
additionalBuildArgs "--build-arg KAGGLE_USERNAME=${params.KAGGLE_USERNAME} --build-arg KAGGLE_KEY=${params.KAGGLE_KEY} --build-arg CUTOFF=${params.CUTOFF} -t maciejczajka"
|
image 'maciejczajka'
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
parameters {
|
parameters {
|
||||||
string(
|
string(
|
||||||
defaultValue: '1000',
|
defaultValue: '100',
|
||||||
description: 'Number of epochs',
|
description: 'Number of epochs',
|
||||||
name: 'EPOCHS',
|
name: 'EPOCHS',
|
||||||
trim: false
|
trim: false
|
||||||
@ -16,28 +16,13 @@ pipeline {
|
|||||||
stage('Script'){
|
stage('Script'){
|
||||||
steps {
|
steps {
|
||||||
copyArtifacts filter: '*', projectName: 's444356-create-dataset'
|
copyArtifacts filter: '*', projectName: 's444356-create-dataset'
|
||||||
sh 'python Biblioteka_DL/dllib-sacred.py with "epochs=$EPOCHS"'
|
sh "python Biblioteka_DL/dllib-mlflow.py -e $EPOCHS"
|
||||||
sh 'ls my_res'
|
archiveArtifacts artifacts: 'games_model.pkl'
|
||||||
sh 'cp -r my_res res'
|
sh 'ls -al'
|
||||||
archiveArtifacts artifacts: 'games_model.pkl, res/**/*.*'
|
sh 'cat MLProject'
|
||||||
sh 'rm -r my_res'
|
sh 'ls mlruns'
|
||||||
sh 'rm -r res'
|
archiveArtifacts artifacts: 'mlruns/**'
|
||||||
build job: 's444356-evaluation/master/'
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
post {
|
|
||||||
success {
|
|
||||||
emailext body: 'SUCCESS', subject: 's444356-training', to: 'e19191c5.uam.onmicrosoft.com@emea.teams.ms'
|
|
||||||
}
|
|
||||||
failure {
|
|
||||||
emailext body: 'FAILURE', subject: 's444356-training', to: 'e19191c5.uam.onmicrosoft.com@emea.teams.ms'
|
|
||||||
}
|
|
||||||
unstable {
|
|
||||||
emailext body: 'UNSTABLE', subject: 's444356-training', to: 'e19191c5.uam.onmicrosoft.com@emea.teams.ms'
|
|
||||||
}
|
|
||||||
changed {
|
|
||||||
emailext body: 'CHANGED', subject: 's444356-training', to: 'e19191c5.uam.onmicrosoft.com@emea.teams.ms'
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
Loading…
Reference in New Issue
Block a user