optimizer.py 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361
  1. # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. from __future__ import absolute_import
  15. from __future__ import division
  16. from __future__ import print_function
  17. import sys
  18. import math
  19. import paddle
  20. import paddle.nn as nn
  21. import paddle.optimizer as optimizer
  22. import paddle.regularizer as regularizer
  23. from paddlex.ppdet.core.workspace import register, serializable
  24. import copy
  25. __all__ = ['LearningRate', 'OptimizerBuilder']
  26. from paddlex.ppdet.utils.logger import setup_logger
  27. logger = setup_logger(__name__)
  28. @serializable
  29. class CosineDecay(object):
  30. """
  31. Cosine learning rate decay
  32. Args:
  33. max_epochs (int): max epochs for the training process.
  34. if you commbine cosine decay with warmup, it is recommended that
  35. the max_iters is much larger than the warmup iter
  36. use_warmup (bool): whether to use warmup. Default: True.
  37. min_lr_ratio (float): minimum learning rate ratio. Default: 0.
  38. last_plateau_epochs (int): use minimum learning rate in
  39. the last few epochs. Default: 0.
  40. """
  41. def __init__(self,
  42. max_epochs=1000,
  43. use_warmup=True,
  44. min_lr_ratio=0.,
  45. last_plateau_epochs=0):
  46. self.max_epochs = max_epochs
  47. self.use_warmup = use_warmup
  48. self.min_lr_ratio = min_lr_ratio
  49. self.last_plateau_epochs = last_plateau_epochs
  50. def __call__(self,
  51. base_lr=None,
  52. boundary=None,
  53. value=None,
  54. step_per_epoch=None):
  55. assert base_lr is not None, "either base LR or values should be provided"
  56. max_iters = self.max_epochs * int(step_per_epoch)
  57. last_plateau_iters = self.last_plateau_epochs * int(step_per_epoch)
  58. min_lr = base_lr * self.min_lr_ratio
  59. if boundary is not None and value is not None and self.use_warmup:
  60. # use warmup
  61. warmup_iters = len(boundary)
  62. for i in range(int(boundary[-1]), max_iters):
  63. boundary.append(i)
  64. if i < max_iters - last_plateau_iters:
  65. decayed_lr = min_lr + (base_lr - min_lr) * 0.5 * (math.cos(
  66. (i - warmup_iters) * math.pi /
  67. (max_iters - warmup_iters - last_plateau_iters)) + 1)
  68. value.append(decayed_lr)
  69. else:
  70. value.append(min_lr)
  71. return optimizer.lr.PiecewiseDecay(boundary, value)
  72. elif last_plateau_iters > 0:
  73. # not use warmup, but set `last_plateau_epochs` > 0
  74. boundary = []
  75. value = []
  76. for i in range(max_iters):
  77. if i < max_iters - last_plateau_iters:
  78. decayed_lr = min_lr + (base_lr - min_lr) * 0.5 * (math.cos(
  79. i * math.pi / (max_iters - last_plateau_iters)) + 1)
  80. value.append(decayed_lr)
  81. else:
  82. value.append(min_lr)
  83. if i > 0:
  84. boundary.append(i)
  85. return optimizer.lr.PiecewiseDecay(boundary, value)
  86. return optimizer.lr.CosineAnnealingDecay(
  87. base_lr, T_max=max_iters, eta_min=min_lr)
  88. @serializable
  89. class PiecewiseDecay(object):
  90. """
  91. Multi step learning rate decay
  92. Args:
  93. gamma (float | list): decay factor
  94. milestones (list): steps at which to decay learning rate
  95. """
  96. def __init__(self,
  97. gamma=[0.1, 0.01],
  98. milestones=[8, 11],
  99. values=None,
  100. use_warmup=True):
  101. super(PiecewiseDecay, self).__init__()
  102. if type(gamma) is not list:
  103. self.gamma = []
  104. for i in range(len(milestones)):
  105. self.gamma.append(gamma / 10**i)
  106. else:
  107. self.gamma = gamma
  108. self.milestones = milestones
  109. self.values = values
  110. self.use_warmup = use_warmup
  111. def __call__(self,
  112. base_lr=None,
  113. boundary=None,
  114. value=None,
  115. step_per_epoch=None):
  116. if boundary is not None and self.use_warmup:
  117. boundary.extend([int(step_per_epoch) * i for i in self.milestones])
  118. else:
  119. # do not use LinearWarmup
  120. boundary = [int(step_per_epoch) * i for i in self.milestones]
  121. value = [base_lr] # during step[0, boundary[0]] is base_lr
  122. # self.values is setted directly in config
  123. if self.values is not None:
  124. assert len(self.milestones) + 1 == len(self.values)
  125. return optimizer.lr.PiecewiseDecay(boundary, self.values)
  126. # value is computed by self.gamma
  127. value = value if value is not None else [base_lr]
  128. for i in self.gamma:
  129. value.append(base_lr * i)
  130. return optimizer.lr.PiecewiseDecay(boundary, value)
  131. @serializable
  132. class LinearWarmup(object):
  133. """
  134. Warm up learning rate linearly
  135. Args:
  136. steps (int): warm up steps
  137. start_factor (float): initial learning rate factor
  138. epochs (int|None): use epochs as warm up steps, the priority
  139. of `epochs` is higher than `steps`. Default: None.
  140. """
  141. def __init__(self, steps=500, start_factor=1. / 3, epochs=None):
  142. super(LinearWarmup, self).__init__()
  143. self.steps = steps
  144. self.start_factor = start_factor
  145. self.epochs = epochs
  146. def __call__(self, base_lr, step_per_epoch):
  147. boundary = []
  148. value = []
  149. warmup_steps = self.epochs * step_per_epoch \
  150. if self.epochs is not None else self.steps
  151. for i in range(warmup_steps + 1):
  152. if warmup_steps > 0:
  153. alpha = i / warmup_steps
  154. factor = self.start_factor * (1 - alpha) + alpha
  155. lr = base_lr * factor
  156. value.append(lr)
  157. if i > 0:
  158. boundary.append(i)
  159. return boundary, value
  160. @serializable
  161. class BurninWarmup(object):
  162. """
  163. Warm up learning rate in burnin mode
  164. Args:
  165. steps (int): warm up steps
  166. """
  167. def __init__(self, steps=1000):
  168. super(BurninWarmup, self).__init__()
  169. self.steps = steps
  170. def __call__(self, base_lr, step_per_epoch):
  171. boundary = []
  172. value = []
  173. burnin = min(self.steps, step_per_epoch)
  174. for i in range(burnin + 1):
  175. factor = (i * 1.0 / burnin)**4
  176. lr = base_lr * factor
  177. value.append(lr)
  178. if i > 0:
  179. boundary.append(i)
  180. return boundary, value
  181. @serializable
  182. class ExpWarmup(object):
  183. """
  184. Warm up learning rate in exponential mode
  185. Args:
  186. steps (int): warm up steps.
  187. epochs (int|None): use epochs as warm up steps, the priority
  188. of `epochs` is higher than `steps`. Default: None.
  189. """
  190. def __init__(self, steps=5, epochs=None):
  191. super(ExpWarmup, self).__init__()
  192. self.steps = steps
  193. self.epochs = epochs
  194. def __call__(self, base_lr, step_per_epoch):
  195. boundary = []
  196. value = []
  197. warmup_steps = self.epochs * step_per_epoch if self.epochs is not None else self.steps
  198. for i in range(warmup_steps + 1):
  199. factor = (i / float(warmup_steps))**2
  200. value.append(base_lr * factor)
  201. if i > 0:
  202. boundary.append(i)
  203. return boundary, value
  204. @register
  205. class LearningRate(object):
  206. """
  207. Learning Rate configuration
  208. Args:
  209. base_lr (float): base learning rate
  210. schedulers (list): learning rate schedulers
  211. """
  212. __category__ = 'optim'
  213. def __init__(self,
  214. base_lr=0.01,
  215. schedulers=[PiecewiseDecay(), LinearWarmup()]):
  216. super(LearningRate, self).__init__()
  217. self.base_lr = base_lr
  218. self.schedulers = []
  219. schedulers = copy.deepcopy(schedulers)
  220. for sched in schedulers:
  221. if isinstance(sched, dict):
  222. # support dict sched instantiate
  223. module = sys.modules[__name__]
  224. type = sched.pop("name")
  225. scheduler = getattr(module, type)(**sched)
  226. self.schedulers.append(scheduler)
  227. else:
  228. self.schedulers.append(sched)
  229. def __call__(self, step_per_epoch):
  230. assert len(self.schedulers) >= 1
  231. if not self.schedulers[0].use_warmup:
  232. return self.schedulers[0](base_lr=self.base_lr,
  233. step_per_epoch=step_per_epoch)
  234. # TODO: split warmup & decay
  235. # warmup
  236. boundary, value = self.schedulers[1](self.base_lr, step_per_epoch)
  237. # decay
  238. decay_lr = self.schedulers[0](self.base_lr, boundary, value,
  239. step_per_epoch)
  240. return decay_lr
  241. @register
  242. class OptimizerBuilder():
  243. """
  244. Build optimizer handles
  245. Args:
  246. regularizer (object): an `Regularizer` instance
  247. optimizer (object): an `Optimizer` instance
  248. """
  249. __category__ = 'optim'
  250. def __init__(self,
  251. clip_grad_by_norm=None,
  252. regularizer={'type': 'L2',
  253. 'factor': .0001},
  254. optimizer={'type': 'Momentum',
  255. 'momentum': .9}):
  256. self.clip_grad_by_norm = clip_grad_by_norm
  257. self.regularizer = regularizer
  258. self.optimizer = optimizer
  259. def __call__(self, learning_rate, model=None):
  260. if self.clip_grad_by_norm is not None:
  261. grad_clip = nn.ClipGradByGlobalNorm(
  262. clip_norm=self.clip_grad_by_norm)
  263. else:
  264. grad_clip = None
  265. if self.regularizer and self.regularizer != 'None':
  266. reg_type = self.regularizer['type'] + 'Decay'
  267. reg_factor = self.regularizer['factor']
  268. regularization = getattr(regularizer, reg_type)(reg_factor)
  269. else:
  270. regularization = None
  271. optim_args = self.optimizer.copy()
  272. optim_type = optim_args['type']
  273. del optim_args['type']
  274. if optim_type != 'AdamW':
  275. optim_args['weight_decay'] = regularization
  276. op = getattr(optimizer, optim_type)
  277. if 'param_groups' in optim_args:
  278. assert isinstance(optim_args['param_groups'], list), ''
  279. param_groups = optim_args.pop('param_groups')
  280. params, visited = [], []
  281. for group in param_groups:
  282. assert isinstance(group,
  283. dict) and 'params' in group and isinstance(
  284. group['params'], list), ''
  285. _params = {
  286. n: p
  287. for n, p in model.named_parameters()
  288. if any([k in n for k in group['params']])
  289. }
  290. _group = group.copy()
  291. _group.update({'params': list(_params.values())})
  292. params.append(_group)
  293. visited.extend(list(_params.keys()))
  294. ext_params = [
  295. p for n, p in model.named_parameters() if n not in visited
  296. ]
  297. if len(ext_params) < len(model.parameters()):
  298. params.append({'params': ext_params})
  299. elif len(ext_params) > len(model.parameters()):
  300. raise RuntimeError
  301. else:
  302. params = model.parameters()
  303. return op(learning_rate=learning_rate,
  304. parameters=params,
  305. grad_clip=grad_clip,
  306. **optim_args)