mirror of
https://github.com/iperov/DeepFaceLive
synced 2025-08-19 21:13:21 -07:00
upd xlib.torch
This commit is contained in:
parent
fd0ca499bf
commit
6e84cbe8b6
3 changed files with 74 additions and 4 deletions
59
xlib/torch/optim/AdaBelief.py
Normal file
59
xlib/torch/optim/AdaBelief.py
Normal file
|
@ -0,0 +1,59 @@
|
|||
import torch
|
||||
import numpy as np
|
||||
|
||||
class AdaBelief(torch.optim.Optimizer):
|
||||
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-16,
|
||||
weight_decay=0, lr_dropout = 1.0):
|
||||
|
||||
defaults = dict(lr=lr, lr_dropout=lr_dropout, betas=betas, eps=eps, weight_decay=weight_decay)
|
||||
super(AdaBelief, self).__init__(params, defaults)
|
||||
|
||||
def __setstate__(self, state):
|
||||
super(AdaBelief, self).__setstate__(state)
|
||||
|
||||
def reset(self):
|
||||
for group in self.param_groups:
|
||||
for p in group['params']:
|
||||
state = self.state[p]
|
||||
state['step'] = 0
|
||||
state['m_t'] = torch.zeros_like(p.data,memory_format=torch.preserve_format)
|
||||
state['v_t'] = torch.zeros_like(p.data,memory_format=torch.preserve_format)
|
||||
|
||||
def step(self):
|
||||
for group in self.param_groups:
|
||||
for p in group['params']:
|
||||
if p.grad is None:
|
||||
continue
|
||||
|
||||
grad = p.grad.data
|
||||
|
||||
beta1, beta2 = group['betas']
|
||||
|
||||
state = self.state[p]
|
||||
if len(state) == 0:
|
||||
state['step'] = 0
|
||||
state['m_t'] = torch.zeros_like(p.data,memory_format=torch.preserve_format)
|
||||
state['v_t'] = torch.zeros_like(p.data,memory_format=torch.preserve_format)
|
||||
|
||||
if group['weight_decay'] != 0:
|
||||
grad.add_(p.data, alpha=group['weight_decay'])
|
||||
|
||||
state['step'] += 1
|
||||
|
||||
m_t, v_t = state['m_t'], state['v_t']
|
||||
m_t.mul_(beta1).add_( grad , alpha=1 - beta1)
|
||||
v_t.mul_(beta2).add_( (grad - m_t)**2 , alpha=1 - beta2)
|
||||
|
||||
v_diff = (-group['lr'] * m_t).div_( v_t.sqrt().add_(group['eps']) )
|
||||
|
||||
if group['lr_dropout'] < 1.0:
|
||||
lrd_rand = torch.ones_like(p.data)
|
||||
v_diff *= torch.bernoulli(lrd_rand * group['lr_dropout'] )
|
||||
|
||||
# from xlib.console.diacon import Diacon
|
||||
# Diacon.stop()
|
||||
# import code
|
||||
# code.interact(local=dict(globals(), **locals()))
|
||||
|
||||
|
||||
p.data.add_(v_diff)
|
Loading…
Add table
Add a link
Reference in a new issue