Computer_Vision/Chapter02/Fetching_values_of_intermediate_layers.ipynb
2024-02-13 03:34:51 +01:00

38 KiB

Open In Colab

import torch
x = [[1,2],[3,4],[5,6],[7,8]]
y = [[3],[7],[11],[15]]
X = torch.tensor(x).float()
Y = torch.tensor(y).float()
device = 'cuda' if torch.cuda.is_available() else 'cpu'
X = X.to(device)
Y = Y.to(device)
import torch.nn as nn
class MyNeuralNet(nn.Module):
    def __init__(self):
        super().__init__()
        self.input_to_hidden_layer = nn.Linear(2,8)
        self.hidden_layer_activation = nn.ReLU()
        self.hidden_to_output_layer = nn.Linear(8,1)
    def forward(self, x):
        x = self.input_to_hidden_layer(x)
        x = self.hidden_layer_activation(x)
        x = self.hidden_to_output_layer(x)
        return x
torch.random.manual_seed(10)
mynet = MyNeuralNet().to(device)
loss_func = nn.MSELoss()
_Y = mynet(X)
loss_value = loss_func(_Y,Y)
print(loss_value)
tensor(102.1545, grad_fn=<MseLossBackward>)
from torch.optim import SGD
opt = SGD(mynet.parameters(), lr = 0.001)
loss_history = []
for _ in range(50):
    opt.zero_grad()
    loss_value = loss_func(mynet(X),Y)
    loss_value.backward()
    opt.step()
    loss_history.append(loss_value)
import matplotlib.pyplot as plt
%matplotlib inline
plt.plot(loss_history)
plt.title('Loss variation over increasing epochs')
plt.xlabel('epochs')
plt.ylabel('loss value')
Text(0, 0.5, 'loss value')

1. Fetching intermediate values by directly calling the intermediate layer

mynet.input_to_hidden_layer(X)
tensor([[-4.0139e-01,  7.2155e-03, -4.9131e-01,  1.4615e+00, -3.8093e-01,
         -7.1646e-01,  4.6765e-01,  2.0814e+00],
        [-5.6844e-01, -2.2575e-01, -1.5498e+00,  3.1695e+00, -5.2755e-01,
         -7.3935e-01,  1.9716e+00,  5.3073e+00],
        [-7.3548e-01, -4.5871e-01, -2.6083e+00,  4.8776e+00, -6.7418e-01,
         -7.6225e-01,  3.4756e+00,  8.5332e+00],
        [-9.0252e-01, -6.9167e-01, -3.6667e+00,  6.5856e+00, -8.2080e-01,
         -7.8514e-01,  4.9795e+00,  1.1759e+01]], grad_fn=<AddmmBackward>)

2. Fetching intermediate values by returning them in nn.Module definition

torch.random.manual_seed(10)
class MyNeuralNet(nn.Module):
    def __init__(self):
        super().__init__()
        self.input_to_hidden_layer = nn.Linear(2,8)
        self.hidden_layer_activation = nn.ReLU()
        self.hidden_to_output_layer = nn.Linear(8,1)
    def forward(self, x):
        hidden1 = self.input_to_hidden_layer(x)
        hidden2 = self.hidden_layer_activation(hidden1)
        x = self.hidden_to_output_layer(hidden2)
        return x, hidden1

mynet = MyNeuralNet().to(device)
loss_func = nn.MSELoss()
_Y, _Y_hidden = mynet(X)
loss_value = loss_func(_Y,Y)
opt = SGD(mynet.parameters(), lr = 0.001)
loss_history = []
for _ in range(50):
    opt.zero_grad()
    loss_value = loss_func(mynet(X)[0],Y)
    loss_value.backward()
    opt.step()
    loss_history.append(loss_value)

import matplotlib.pyplot as plt
%matplotlib inline
plt.plot(loss_history)
plt.title('Loss variation over increasing epochs')
plt.xlabel('epochs')
plt.ylabel('loss value')
Text(0, 0.5, 'loss value')
mynet(X)[1]
tensor([[-4.0139e-01,  7.2155e-03, -4.9131e-01,  1.4615e+00, -3.8093e-01,
         -7.1646e-01,  4.6765e-01,  2.0814e+00],
        [-5.6844e-01, -2.2575e-01, -1.5498e+00,  3.1695e+00, -5.2755e-01,
         -7.3935e-01,  1.9716e+00,  5.3073e+00],
        [-7.3548e-01, -4.5871e-01, -2.6083e+00,  4.8776e+00, -6.7418e-01,
         -7.6225e-01,  3.4756e+00,  8.5332e+00],
        [-9.0252e-01, -6.9167e-01, -3.6667e+00,  6.5856e+00, -8.2080e-01,
         -7.8514e-01,  4.9795e+00,  1.1759e+01]], grad_fn=<AddmmBackward>)