Projekt_AI-Automatyczny_saper/venv/Lib/site-packages/torch/optim/adadelta.py
2021-06-01 17:38:31 +02:00

91 lines
3.1 KiB
Python

import torch
from . import _functional as F
from .optimizer import Optimizer
class Adadelta(Optimizer):
"""Implements Adadelta algorithm.
It has been proposed in `ADADELTA: An Adaptive Learning Rate Method`__.
Args:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
rho (float, optional): coefficient used for computing a running average
of squared gradients (default: 0.9)
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-6)
lr (float, optional): coefficient that scale delta before it is applied
to the parameters (default: 1.0)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
__ https://arxiv.org/abs/1212.5701
"""
def __init__(self, params, lr=1.0, rho=0.9, eps=1e-6, weight_decay=0):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= rho <= 1.0:
raise ValueError("Invalid rho value: {}".format(rho))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= weight_decay:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
defaults = dict(lr=lr, rho=rho, eps=eps, weight_decay=weight_decay)
super(Adadelta, self).__init__(params, defaults)
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Args:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
params_with_grad = []
grads = []
square_avgs = []
acc_deltas = []
for p in group['params']:
if p.grad is None:
continue
params_with_grad.append(p)
if p.grad.is_sparse:
raise RuntimeError('Adadelta does not support sparse gradients')
grads.append(p.grad)
state = self.state[p]
# Lazy state initialization
if len(state) == 0:
state['step'] = 0
state['square_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
state['acc_delta'] = torch.zeros_like(p, memory_format=torch.preserve_format)
square_avgs.append(state['square_avg'])
acc_deltas.append(state['acc_delta'])
lr, rho, eps, weight_decay = group['lr'], group['rho'], group['eps'], group['weight_decay']
state['step'] += 1
F.adadelta(params_with_grad,
grads,
square_avgs,
acc_deltas,
lr,
rho,
eps,
weight_decay)
return loss