Poprawiona ewaluacja, tworzenie wykresu.
This commit is contained in:
parent
7fd26e40e1
commit
2dce7b1d0a
@ -15,6 +15,8 @@ pipeline {
|
||||
stage('Copy artifact') {
|
||||
steps {
|
||||
copyArtifacts filter: 'model.pt', fingerprintArtifacts: false, projectName: 's426206-training', selector: buildParameter('BUILD_SELECTOR')
|
||||
copyArtifacts filter: 'val_dataset.pt', fingerprintArtifacts: false, projectName: 's426206-create-dataset', selector: buildParameter('BUILD_SELECTOR')
|
||||
copyArtifacts filter: 'metrics.tsv', fingerprintArtifacts: false, optional: true, projectName: 's426206-evaluation', selector: buildParameter('BUILD_SELECTOR')
|
||||
}
|
||||
}
|
||||
stage('docker') {
|
||||
@ -23,7 +25,7 @@ pipeline {
|
||||
def img = docker.build('rokoch/ium:01')
|
||||
img.inside {
|
||||
sh 'chmod +x evaluation.py'
|
||||
sh 'python3 ./evaluation.py >>> metryki.txt'
|
||||
sh 'python3 ./evaluation.py >> metrics.tsv'
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -32,8 +34,27 @@ pipeline {
|
||||
stage('end') {
|
||||
steps {
|
||||
//Zarchiwizuj wynik
|
||||
archiveArtifacts 'model.pt'
|
||||
archiveArtifacts 'model.pt, metrics.tsv, plot.png'
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
post {
|
||||
success {
|
||||
//Wysłanie maila
|
||||
emailext body: 'Success evaluation',
|
||||
subject: 's426206 evaluation',
|
||||
to: '26ab8f35.uam.onmicrosoft.com@emea.teams.ms'
|
||||
|
||||
}
|
||||
unstable {
|
||||
emailext body: 'Unstable evaluation', subject: 's426206 evaluation', to: '26ab8f35.uam.onmicrosoft.com@emea.teams.ms'
|
||||
}
|
||||
failure {
|
||||
emailext body: 'Failure evaluation', subject: 's426206 evaluation', to: '26ab8f35.uam.onmicrosoft.com@emea.teams.ms'
|
||||
}
|
||||
changed {
|
||||
emailext body: 'Changed evaluation', subject: 's426206 evaluation', to: '26ab8f35.uam.onmicrosoft.com@emea.teams.ms'
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -46,18 +46,21 @@ pipeline {
|
||||
post {
|
||||
success {
|
||||
//Wysłanie maila
|
||||
emailext body: 'SUCCESS',
|
||||
subject: 's426206',
|
||||
emailext body: 'Success train',
|
||||
subject: 's426206 train',
|
||||
to: '26ab8f35.uam.onmicrosoft.com@emea.teams.ms'
|
||||
//Uruchamianie innego zadania
|
||||
build job: 's426206-evaluation/master', string(name: 'BUILD_SELECTOR', value: '<StatusBuildSelector plugin="copyartifact@1.46"/>')]
|
||||
|
||||
}
|
||||
unstable {
|
||||
emailext body: 'UNSTABLE', subject: 's426206', to: '26ab8f35.uam.onmicrosoft.com@emea.teams.ms'
|
||||
emailext body: 'Unstable train', subject: 's426206 train', to: '26ab8f35.uam.onmicrosoft.com@emea.teams.ms'
|
||||
}
|
||||
failure {
|
||||
emailext body: 'FAILURE', subject: 's426206', to: '26ab8f35.uam.onmicrosoft.com@emea.teams.ms'
|
||||
emailext body: 'Failure train', subject: 's426206 train', to: '26ab8f35.uam.onmicrosoft.com@emea.teams.ms'
|
||||
}
|
||||
changed {
|
||||
emailext body: 'CHANGED', subject: 's426206', to: '26ab8f35.uam.onmicrosoft.com@emea.teams.ms'
|
||||
emailext body: 'Changed train', subject: 's426206 train', to: '26ab8f35.uam.onmicrosoft.com@emea.teams.ms'
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -15,10 +15,10 @@ lr = args.lr
|
||||
n_epochs = args.epochs
|
||||
|
||||
train_dataset = torch.load('train_dataset.pt')
|
||||
val_dataset = torch.load('val_dataset.pt')
|
||||
#val_dataset = torch.load('val_dataset.pt')
|
||||
|
||||
train_loader = DataLoader(dataset=train_dataset)
|
||||
val_loader = DataLoader(dataset=val_dataset)
|
||||
#val_loader = DataLoader(dataset=val_dataset)
|
||||
|
||||
class LayerLinearRegression(nn.Module):
|
||||
def __init__(self):
|
||||
@ -79,27 +79,28 @@ for epoch in range(n_epochs):
|
||||
|
||||
# After finishing training steps for all mini-batches,
|
||||
# it is time for evaluation!
|
||||
|
||||
# We tell PyTorch to NOT use autograd...
|
||||
# Do you remember why?
|
||||
with torch.no_grad():
|
||||
val_losses = []
|
||||
# Uses loader to fetch one mini-batch for validation
|
||||
for x_val, y_val in val_loader:
|
||||
# Again, sends data to same device as model
|
||||
# x_val = x_val.to(device)
|
||||
# y_val = y_val.to(device)
|
||||
# Ewaluacja jest już tutaj nie potrzebna bo odbywa sie w evaluation.py. Można jednak włączyć podgląd ewaluacji dla poszczególnych epok.
|
||||
# # We tell PyTorch to NOT use autograd...
|
||||
# # Do you remember why?
|
||||
# with torch.no_grad():
|
||||
# val_losses = []
|
||||
# # Uses loader to fetch one mini-batch for validation
|
||||
# for x_val, y_val in val_loader:
|
||||
# # Again, sends data to same device as model
|
||||
# # x_val = x_val.to(device)
|
||||
# # y_val = y_val.to(device)
|
||||
|
||||
model.eval()
|
||||
# Makes predictions
|
||||
yhat = model(x_val)
|
||||
# Computes validation loss
|
||||
val_loss = loss_fn(y_val, yhat)
|
||||
val_losses.append(val_loss.item())
|
||||
validation_loss = np.mean(val_losses)
|
||||
validation_losses.append(validation_loss)
|
||||
# model.eval()
|
||||
# # Makes predictions
|
||||
# yhat = model(x_val)
|
||||
# # Computes validation loss
|
||||
# val_loss = loss_fn(y_val, yhat)
|
||||
# val_losses.append(val_loss.item())
|
||||
# validation_loss = np.mean(val_losses)
|
||||
# validation_losses.append(validation_loss)
|
||||
|
||||
print(f"[{epoch+1}] Training loss: {training_loss:.3f}\t Validation loss: {validation_loss:.3f}")
|
||||
# print(f"[{epoch+1}] Training loss: {training_loss:.3f}\t Validation loss: {validation_loss:.3f}")
|
||||
print(f"[{epoch+1}] Training loss: {training_loss:.3f}\t")
|
||||
|
||||
torch.save({
|
||||
'model_state_dict': model.state_dict(),
|
||||
|
@ -3,7 +3,7 @@ import numpy as np
|
||||
from datetime import datetime
|
||||
import torch.nn as nn
|
||||
import torch.optim as optim
|
||||
#from torch.utils.data import Dataset, TensorDataset, DataLoader
|
||||
from torch.utils.data import Dataset, TensorDataset, DataLoader
|
||||
|
||||
class LayerLinearRegression(nn.Module):
|
||||
def __init__(self):
|
||||
@ -18,24 +18,46 @@ class LayerLinearRegression(nn.Module):
|
||||
checkpoint = torch.load('model.pt')
|
||||
|
||||
model = LayerLinearRegression()
|
||||
optimizer = optim.SGD(model.parameters(), lr=checkpoint['loss'])
|
||||
#optimizer = optim.SGD(model.parameters(), lr=checkpoint['loss'])
|
||||
|
||||
model.load_state_dict(checkpoint['model_state_dict'])
|
||||
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
|
||||
#optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
|
||||
|
||||
model.eval()
|
||||
|
||||
now = datetime.now()
|
||||
print("\n-----------{}-----------".format(now.strftime("%d/%m/%Y, %H:%M:%S")))
|
||||
# Checks model's parameters
|
||||
print("Model's state_dict:")
|
||||
for param_tensor in model.state_dict():
|
||||
print(param_tensor, "\t", model.state_dict()[param_tensor])
|
||||
|
||||
# Print optimizer's state_dict
|
||||
print("Optimizer's state_dict:")
|
||||
for var_name in optimizer.state_dict():
|
||||
print(var_name, "\t", optimizer.state_dict()[var_name])
|
||||
#print("Mean squared error for training: ", np.mean(losses))
|
||||
#print("Mean squared error for validating: ", np.mean(val_losses))
|
||||
print("----------------------\n")
|
||||
loss_fn = nn.MSELoss(reduction='mean')
|
||||
val_dataset = torch.load('val_dataset.pt')
|
||||
val_loader = DataLoader(dataset=val_dataset)
|
||||
|
||||
with torch.no_grad():
|
||||
val_losses = []
|
||||
# Uses loader to fetch one mini-batch for validation
|
||||
for x_val, y_val in val_loader:
|
||||
# Again, sends data to same device as model
|
||||
# x_val = x_val.to(device)
|
||||
# y_val = y_val.to(device)
|
||||
|
||||
model.eval()
|
||||
# Makes predictions
|
||||
yhat = model(x_val)
|
||||
# Computes validation loss
|
||||
val_loss = loss_fn(y_val, yhat)
|
||||
val_losses.append(val_loss.item())
|
||||
validation_loss = np.mean(val_losses)
|
||||
|
||||
#now = datetime.now()
|
||||
#print("\n-----------{}-----------".format(now.strftime("%d/%m/%Y, %H:%M:%S")))
|
||||
#print(f"Mean Squared Error: {validation_loss:.4f}")
|
||||
#print("------------------------------------------\n")
|
||||
print(f"{validation_loss:.4f}")
|
||||
# # Checks model's parameters
|
||||
# print("Model's state_dict:")
|
||||
# for param_tensor in model.state_dict():
|
||||
# print(param_tensor, "\t", model.state_dict()[param_tensor])
|
||||
|
||||
# # Print optimizer's state_dict
|
||||
# print("Optimizer's state_dict:")
|
||||
# for var_name in optimizer.state_dict():
|
||||
# print(var_name, "\t", optimizer.state_dict()[var_name])
|
||||
# print("----------------------\n")
|
||||
|
16
plot.py
Normal file
16
plot.py
Normal file
@ -0,0 +1,16 @@
|
||||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
import pandas as pd
|
||||
|
||||
y = []
|
||||
with open('metrics.tsv','r') as test_in_file:
|
||||
for line in test_in_file:
|
||||
y.append(float(line.rstrip('\n')))
|
||||
|
||||
fig = plt.figure()
|
||||
plt.plot(list(range(1,len(y)+1)), y)
|
||||
plt.xticks(range(1,len(y)+1))
|
||||
plt.ylabel("MSE")
|
||||
plt.xlabel("Build number")
|
||||
plt.savefig('plot.png')
|
||||
plt.show()
|
Loading…
Reference in New Issue
Block a user