115 lines
3.9 KiB
Python
115 lines
3.9 KiB
Python
|
import torch
|
||
|
import numpy as np
|
||
|
|
||
|
import torch.nn as nn
|
||
|
import torch.optim as optim
|
||
|
from torch.utils.data import Dataset, TensorDataset, DataLoader
|
||
|
import argparse
|
||
|
|
||
|
parser = argparse.ArgumentParser(description='Program do uczenia modelu')
|
||
|
parser.add_argument('-l', '--lr', type=float, default=1e-3, help="Współczynik uczenia (lr)", required=False)
|
||
|
parser.add_argument('-e', '--epochs', type=int, default=100, help="Liczba epok", required=False)
|
||
|
args = parser.parse_args()
|
||
|
|
||
|
lr = args.lr
|
||
|
n_epochs = args.epochs
|
||
|
|
||
|
train_dataset = torch.load('train_dataset.pt')
|
||
|
val_dataset = torch.load('val_dataset.pt')
|
||
|
|
||
|
train_loader = DataLoader(dataset=train_dataset)
|
||
|
val_loader = DataLoader(dataset=val_dataset)
|
||
|
|
||
|
class LayerLinearRegression(nn.Module):
|
||
|
def __init__(self):
|
||
|
super().__init__()
|
||
|
# Instead of our custom parameters, we use a Linear layer with single input and single output
|
||
|
self.linear = nn.Linear(1, 1)
|
||
|
|
||
|
def forward(self, x):
|
||
|
# Now it only takes a call to the layer to make predictions
|
||
|
return self.linear(x)
|
||
|
|
||
|
model = LayerLinearRegression()
|
||
|
# Checks model's parameters
|
||
|
#print(model.state_dict())
|
||
|
|
||
|
loss_fn = nn.MSELoss(reduction='mean')
|
||
|
optimizer = optim.SGD(model.parameters(), lr=lr)
|
||
|
|
||
|
def make_train_step(model, loss_fn, optimizer):
|
||
|
# Builds function that performs a step in the train loop
|
||
|
def train_step(x, y):
|
||
|
# Sets model to TRAIN mode
|
||
|
model.train()
|
||
|
# Makes predictions
|
||
|
yhat = model(x)
|
||
|
# Computes loss
|
||
|
loss = loss_fn(y, yhat)
|
||
|
# Computes gradients
|
||
|
loss.backward()
|
||
|
# Updates parameters and zeroes gradients
|
||
|
optimizer.step()
|
||
|
optimizer.zero_grad()
|
||
|
# Returns the loss
|
||
|
return loss.item()
|
||
|
|
||
|
# Returns the function that will be called inside the train loop
|
||
|
return train_step
|
||
|
|
||
|
# Creates the train_step function for our model, loss function and optimizer
|
||
|
train_step = make_train_step(model, loss_fn, optimizer)
|
||
|
training_losses = []
|
||
|
validation_losses = []
|
||
|
#print(model.state_dict())
|
||
|
# For each epoch...
|
||
|
for epoch in range(n_epochs):
|
||
|
losses = []
|
||
|
# Uses loader to fetch one mini-batch for training
|
||
|
for x_batch, y_batch in train_loader:
|
||
|
# NOW, sends the mini-batch data to the device
|
||
|
# so it matches location of the MODEL
|
||
|
# x_batch = x_batch.to(device)
|
||
|
# y_batch = y_batch.to(device)
|
||
|
# One stpe of training
|
||
|
loss = train_step(x_batch, y_batch)
|
||
|
losses.append(loss)
|
||
|
training_loss = np.mean(losses)
|
||
|
training_losses.append(training_loss)
|
||
|
|
||
|
# After finishing training steps for all mini-batches,
|
||
|
# it is time for evaluation!
|
||
|
|
||
|
# We tell PyTorch to NOT use autograd...
|
||
|
# Do you remember why?
|
||
|
with torch.no_grad():
|
||
|
val_losses = []
|
||
|
# Uses loader to fetch one mini-batch for validation
|
||
|
for x_val, y_val in val_loader:
|
||
|
# Again, sends data to same device as model
|
||
|
# x_val = x_val.to(device)
|
||
|
# y_val = y_val.to(device)
|
||
|
|
||
|
model.eval()
|
||
|
# Makes predictions
|
||
|
yhat = model(x_val)
|
||
|
# Computes validation loss
|
||
|
val_loss = loss_fn(y_val, yhat)
|
||
|
val_losses.append(val_loss.item())
|
||
|
validation_loss = np.mean(val_losses)
|
||
|
validation_losses.append(validation_loss)
|
||
|
|
||
|
print(f"[{epoch+1}] Training loss: {training_loss:.3f}\t Validation loss: {validation_loss:.3f}")
|
||
|
|
||
|
# Checks model's parameters
|
||
|
print("Model's state_dict:")
|
||
|
for param_tensor in model.state_dict():
|
||
|
print(param_tensor, "\t", model.state_dict()[param_tensor])
|
||
|
|
||
|
# Print optimizer's state_dict
|
||
|
print("Optimizer's state_dict:")
|
||
|
for var_name in optimizer.state_dict():
|
||
|
print(var_name, "\t", optimizer.state_dict()[var_name])
|
||
|
print("Mean squared error for training: ", np.mean(losses))
|
||
|
print("Mean squared error for validating: ", np.mean(val_losses))
|
||
|
torch.save(model, 'model.pt')
|