diff --git a/Jenkinsfile_predict_r b/Jenkinsfile_predict_r new file mode 100644 index 0000000..d226f53 --- /dev/null +++ b/Jenkinsfile_predict_r @@ -0,0 +1,48 @@ +pipeline { + agent {docker { image 'snowycocoon/ium_434788:4'}} + parameters{ + buildSelector( + defaultSelector: lastSuccessful(), + description: 'Which build to use for copying artifacts', + name: 'WHICH_BUILD' + ) + } + + stages { + stage('copy artifacts') + { + steps + { + sh 'rm -r my_model' + sh 'rm -r model' + copyArtifacts(fingerprintArtifacts: true, projectName: 's434695-training/train', selector: buildParameter('WHICH_BUILD')) + } + } + stage('predict') + { + steps + { + catchError { + sh 'python3.8 Zadanie_09_MLflow_Predict_Registry.py ${BATCH_SIZE} ${EPOCHS}' + } + } + } + } + + post { + success { + mail body: 'SUCCESS', + subject: 's434788 mlflow predict from registry', + to: '26ab8f35.uam.onmicrosoft.com@emea.teams.ms' + } + unstable { + mail body: 'UNSTABLE', subject: 's434788 mlflow predict from registry', to: '26ab8f35.uam.onmicrosoft.com@emea.teams.ms' + } + failure { + mail body: 'FAILURE', subject: 's434788 mlflow predict from registry', to: '26ab8f35.uam.onmicrosoft.com@emea.teams.ms' + } + changed { + mail body: 'CHANGED', subject: 's434788 mlflow predict from registry', to: '26ab8f35.uam.onmicrosoft.com@emea.teams.ms' + } + } +} \ No newline at end of file diff --git a/Jenkinsfile_train b/Jenkinsfile_train index 08dd56b..dd40f6b 100644 --- a/Jenkinsfile_train +++ b/Jenkinsfile_train @@ -32,6 +32,7 @@ pipeline { { catchError { sh 'rm -r my_model' + sh 'rm -r model' sh 'python3.8 Zadanie_08_and_09_MLflow.py ${BATCH_SIZE} ${EPOCHS}' } } diff --git a/Zadanie_09_MLflow_Predict_Registry.py b/Zadanie_09_MLflow_Predict_Registry.py new file mode 100644 index 0000000..7bf4222 --- /dev/null +++ b/Zadanie_09_MLflow_Predict_Registry.py @@ -0,0 +1,21 @@ + +from mlflow.tracking import MlflowClient +import mlflow +import pandas as pd + +mlflow.set_tracking_uri("http://172.17.0.1:5000") +client = MlflowClient() +version = 0 +model_name = "s434695" +for mv in client.search_model_versions(f"name='{model_name}'"): + if int(mv.version) > version: + version = int(mv.version) + +model = mlflow.pytorch.load_model( + model_uri=f"models:/{model_name}/{version}" +) + +data = pd.read_json('my_model/input_example.json', orient='split') + +print(data) +print(model.predict(data)) \ No newline at end of file