2021-05-13 00:15:26 +02:00
|
|
|
import torch
|
|
|
|
import numpy as np
|
|
|
|
from datetime import datetime
|
|
|
|
import torch.nn as nn
|
|
|
|
import torch.optim as optim
|
2021-05-13 22:20:14 +02:00
|
|
|
from torch.utils.data import Dataset, TensorDataset, DataLoader
|
2021-05-13 00:15:26 +02:00
|
|
|
|
|
|
|
class LayerLinearRegression(nn.Module):
|
|
|
|
def __init__(self):
|
|
|
|
super().__init__()
|
|
|
|
# Instead of our custom parameters, we use a Linear layer with single input and single output
|
|
|
|
self.linear = nn.Linear(1, 1)
|
|
|
|
|
|
|
|
def forward(self, x):
|
|
|
|
# Now it only takes a call to the layer to make predictions
|
|
|
|
return self.linear(x)
|
|
|
|
|
|
|
|
checkpoint = torch.load('model.pt')
|
|
|
|
|
|
|
|
model = LayerLinearRegression()
|
2021-05-13 22:20:14 +02:00
|
|
|
#optimizer = optim.SGD(model.parameters(), lr=checkpoint['loss'])
|
2021-05-13 00:15:26 +02:00
|
|
|
|
|
|
|
model.load_state_dict(checkpoint['model_state_dict'])
|
2021-05-13 22:20:14 +02:00
|
|
|
#optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
|
2021-05-13 00:15:26 +02:00
|
|
|
|
|
|
|
model.eval()
|
|
|
|
|
2021-05-13 22:20:14 +02:00
|
|
|
|
|
|
|
loss_fn = nn.MSELoss(reduction='mean')
|
|
|
|
val_dataset = torch.load('val_dataset.pt')
|
|
|
|
val_loader = DataLoader(dataset=val_dataset)
|
|
|
|
|
|
|
|
with torch.no_grad():
|
|
|
|
val_losses = []
|
|
|
|
# Uses loader to fetch one mini-batch for validation
|
|
|
|
for x_val, y_val in val_loader:
|
|
|
|
# Again, sends data to same device as model
|
|
|
|
# x_val = x_val.to(device)
|
|
|
|
# y_val = y_val.to(device)
|
|
|
|
|
|
|
|
model.eval()
|
|
|
|
# Makes predictions
|
|
|
|
yhat = model(x_val)
|
|
|
|
# Computes validation loss
|
|
|
|
val_loss = loss_fn(y_val, yhat)
|
|
|
|
val_losses.append(val_loss.item())
|
|
|
|
validation_loss = np.mean(val_losses)
|
|
|
|
|
|
|
|
#now = datetime.now()
|
|
|
|
#print("\n-----------{}-----------".format(now.strftime("%d/%m/%Y, %H:%M:%S")))
|
|
|
|
#print(f"Mean Squared Error: {validation_loss:.4f}")
|
|
|
|
#print("------------------------------------------\n")
|
|
|
|
print(f"{validation_loss:.4f}")
|
|
|
|
# # Checks model's parameters
|
|
|
|
# print("Model's state_dict:")
|
|
|
|
# for param_tensor in model.state_dict():
|
|
|
|
# print(param_tensor, "\t", model.state_dict()[param_tensor])
|
|
|
|
|
|
|
|
# # Print optimizer's state_dict
|
|
|
|
# print("Optimizer's state_dict:")
|
|
|
|
# for var_name in optimizer.state_dict():
|
|
|
|
# print(var_name, "\t", optimizer.state_dict()[var_name])
|
|
|
|
# print("----------------------\n")
|