lab8
This commit is contained in:
parent
2218297162
commit
bdf10f8606
23
Jenkinsfile_artifact
Normal file
23
Jenkinsfile_artifact
Normal file
@ -0,0 +1,23 @@
|
||||
pipeline {
|
||||
agent {
|
||||
dockerfile {
|
||||
args '-v /mlruns:/mlruns'
|
||||
}
|
||||
}
|
||||
parameters {
|
||||
buildSelector(
|
||||
defaultSelector: lastSuccessful(),
|
||||
description: 'select build from which to copy artifacts',
|
||||
name: 'BUILD_SELECTOR'
|
||||
)
|
||||
}
|
||||
stages {
|
||||
stage('Copy artifacts') {
|
||||
steps {
|
||||
copyArtifacts filter: 'mlruns.tar.gz', projectName: 's449288-training/master', selector: buildParameter('BUILD_SELECTOR')
|
||||
sh 'mkdir -p mlrunsArtifact && tar xzf mlruns.tar.gz -C mlrunsArtifact --strip-components 1'
|
||||
sh "python ./predictArtifact.py"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
15
Jenkinsfile_mlflow
Normal file
15
Jenkinsfile_mlflow
Normal file
@ -0,0 +1,15 @@
|
||||
pipeline {
|
||||
agent {
|
||||
dockerfile {
|
||||
args '-v /mlruns:/mlruns'
|
||||
}
|
||||
}
|
||||
stages {
|
||||
stage('Prediction') {
|
||||
steps {
|
||||
sh 'ls -la'
|
||||
sh "python ./predictMlflow.py"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
13
MLproject
Normal file
13
MLproject
Normal file
@ -0,0 +1,13 @@
|
||||
name: tutorial
|
||||
# conda_env: conda.yaml
|
||||
|
||||
docker_env:
|
||||
image: ksero/ium:mlflow
|
||||
|
||||
entry_points:
|
||||
main:
|
||||
parameters:
|
||||
epochs: {type: float, default: 1500}
|
||||
command: "python ./pytorch/pytorch.py {epochs}"
|
||||
test:
|
||||
command: "python ./evaluation.py"
|
14
predictArtifact.py
Normal file
14
predictArtifact.py
Normal file
@ -0,0 +1,14 @@
|
||||
import mlflow
|
||||
import numpy as np
|
||||
import json
|
||||
|
||||
logged_model = 'mlrunsArtifact/1/6b2323cf51794581bf1e2f6d060d50f6/artifacts/model'
|
||||
loaded_model = mlflow.pyfunc.load_model(logged_model)
|
||||
|
||||
with open(f'{logged_model}/input_example.json') as f:
|
||||
input_example_data = json.load(f)
|
||||
|
||||
input_example = np.array(input_example_data['inputs']).reshape(-1,)
|
||||
|
||||
print(f'Input: {input_example}')
|
||||
print(f'Prediction: {loaded_model.predict(input_example)}')
|
14
predictMlflow.py
Normal file
14
predictMlflow.py
Normal file
@ -0,0 +1,14 @@
|
||||
import mlflow
|
||||
import numpy as np
|
||||
import json
|
||||
|
||||
logged_model = '/mlruns/20/80fe21a0804844088147d15a3cebb3e5/artifacts/lego'
|
||||
loaded_model = mlflow.pyfunc.load_model(logged_model)
|
||||
|
||||
with open(f'{logged_model}/input_example.json') as f:
|
||||
input_example_data = json.load(f)
|
||||
|
||||
input_example = np.array(input_example_data['inputs']).reshape(-1,)
|
||||
|
||||
print(f'Input: {input_example}')
|
||||
print(f'Prediction: {loaded_model.predict(input_example)}')
|
@ -21,6 +21,8 @@ import sys
|
||||
from sacred import Experiment
|
||||
from sacred.observers import FileStorageObserver
|
||||
from sacred.observers import MongoObserver
|
||||
import mlflow
|
||||
import mlflow.keras
|
||||
|
||||
# In[2]:
|
||||
ex = Experiment(save_git_info=False)
|
||||
@ -30,7 +32,8 @@ ex.observers.append(FileStorageObserver('my_runs'))
|
||||
# ex.observers.append(MongoObserver(url='mongodb://mongo_user:mongo_password_IUM_2021@localhost:27017', db_name='sacred'))
|
||||
ex.observers.append(MongoObserver(url='mongodb://admin:IUM_2021@172.17.0.1:27017', db_name='sacred'))
|
||||
|
||||
|
||||
mlflow.set_tracking_uri("http://172.17.0.1:5000")
|
||||
mlflow.set_experiment('s444354')
|
||||
|
||||
try:
|
||||
numberOfEpochParam = int(sys.argv[1])
|
||||
@ -162,21 +165,39 @@ def fit(epochs, lr, model, train_loader, val_loader, _log, _run, opt_func=torch.
|
||||
epochs=epochs
|
||||
history = []
|
||||
optimizer = opt_func(model.parameters(), lr)
|
||||
for epoch in range(epochs):
|
||||
for batch in train_loader:
|
||||
loss = model.training_step(batch)
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
optimizer.zero_grad()
|
||||
result = evaluate(model, val_loader)
|
||||
model.epoch_end(epoch, result, epochs)
|
||||
history.append(result)
|
||||
with mlflow.start_run():
|
||||
|
||||
for epoch in range(epochs):
|
||||
for batch in train_loader:
|
||||
loss = model.training_step(batch)
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
optimizer.zero_grad()
|
||||
result = evaluate(model, val_loader)
|
||||
model.epoch_end(epoch, result, epochs)
|
||||
history.append(result)
|
||||
|
||||
mlflow.log_param('epochs', epochs)
|
||||
mlflow.log_param('lr', lr)
|
||||
|
||||
torch.save(model, 'saved_model.pth')
|
||||
ex.add_artifact("saved_model.pth")
|
||||
|
||||
_run.info["epochs"] = epochs
|
||||
|
||||
signature = mlflow.models.signature.infer_signature(house_price_features, linear_model.predict(house_price_features))
|
||||
|
||||
tracking_url_type_store = urlparse(mlflow.get_tracking_uri()).scheme
|
||||
|
||||
sampleInp = [0.1, 0.1, 546.0, 555.223, 1., 1., 33.16376, 84.12426]
|
||||
|
||||
if tracking_url_type_store != "file":
|
||||
mlflow.keras.log_model(model, "model", registered_model_name="red-wine-quality", signature=signature)
|
||||
else:
|
||||
mlflow.keras.log_model(model, "model", signature=signature, input_example=np.array(sampleInp))
|
||||
|
||||
|
||||
|
||||
return history
|
||||
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user