forked from bzhangGo/sltunet
-
Notifications
You must be signed in to change notification settings - Fork 0
/
lr.py
61 lines (48 loc) · 1.82 KB
/
lr.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
# coding: utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
class Lr(object):
def __init__(self,
init_lrate, # initial learning rate
min_lrate, # minimum learning rate
max_lrate, # maximum learning rate
warmup_steps, # warmup step
hidden_size, # model hidden size
name="noam_lr", # model name, no use
):
self.name = name
self.init_lrate = init_lrate # just record the init learning rate
self.lrate = init_lrate # active learning rate, change with training
self.min_lrate = min_lrate
self.max_lrate = max_lrate
self.warmup_steps = warmup_steps
self.hidden_size = hidden_size
assert self.max_lrate > self.min_lrate, \
"Minimum learning rate should less than maximum learning rate"
# suppose the eidx starts from 1
def before_epoch(self, eidx=None):
pass
def after_epoch(self, eidx=None):
pass
def step(self, step):
step = float(step)
warmup_steps = float(self.warmup_steps)
multiplier = float(self.hidden_size) ** -0.5
decay = multiplier * np.minimum((step + 1) * (warmup_steps ** -1.5),
(step + 1) ** -0.5)
self.lrate = self.init_lrate * decay
def after_eval(self, eval_score):
pass
def get_lr(self):
"""Return the learning rate whenever you want"""
return max(min(self.lrate, self.max_lrate), self.min_lrate)
def get_lr(params):
return Lr(
params.lrate,
params.min_lrate,
params.max_lrate,
params.warmup_steps,
params.hidden_size
)