optimizer.py 8.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285
  1. # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. from __future__ import absolute_import
  15. from __future__ import division
  16. from __future__ import print_function
  17. import math
  18. import copy
  19. import paddle
  20. import paddle.nn as nn
  21. import paddle.optimizer as optimizer
  22. from paddle.optimizer.lr import CosineAnnealingDecay
  23. import paddle.regularizer as regularizer
  24. from paddle import cos
  25. from paddlex.ppdet.core.workspace import register, serializable
  26. __all__ = ['LearningRate', 'OptimizerBuilder']
  27. from paddlex.ppdet.utils.logger import setup_logger
  28. logger = setup_logger(__name__)
  29. @serializable
  30. class CosineDecay(object):
  31. """
  32. Cosine learning rate decay
  33. Args:
  34. max_epochs (int): max epochs for the training process.
  35. if you commbine cosine decay with warmup, it is recommended that
  36. the max_iters is much larger than the warmup iter
  37. """
  38. def __init__(self, max_epochs=1000, use_warmup=True):
  39. self.max_epochs = max_epochs
  40. self.use_warmup = use_warmup
  41. def __call__(self,
  42. base_lr=None,
  43. boundary=None,
  44. value=None,
  45. step_per_epoch=None):
  46. assert base_lr is not None, "either base LR or values should be provided"
  47. max_iters = self.max_epochs * int(step_per_epoch)
  48. if boundary is not None and value is not None and self.use_warmup:
  49. for i in range(int(boundary[-1]), max_iters):
  50. boundary.append(i)
  51. decayed_lr = base_lr * 0.5 * (
  52. math.cos(i * math.pi / max_iters) + 1)
  53. value.append(decayed_lr)
  54. return optimizer.lr.PiecewiseDecay(boundary, value)
  55. return optimizer.lr.CosineAnnealingDecay(base_lr, T_max=max_iters)
  56. @serializable
  57. class PiecewiseDecay(object):
  58. """
  59. Multi step learning rate decay
  60. Args:
  61. gamma (float | list): decay factor
  62. milestones (list): steps at which to decay learning rate
  63. """
  64. def __init__(self,
  65. gamma=[0.1, 0.01],
  66. milestones=[8, 11],
  67. values=None,
  68. use_warmup=True):
  69. super(PiecewiseDecay, self).__init__()
  70. if type(gamma) is not list:
  71. self.gamma = []
  72. for i in range(len(milestones)):
  73. self.gamma.append(gamma / 10**i)
  74. else:
  75. self.gamma = gamma
  76. self.milestones = milestones
  77. self.values = values
  78. self.use_warmup = use_warmup
  79. def __call__(self,
  80. base_lr=None,
  81. boundary=None,
  82. value=None,
  83. step_per_epoch=None):
  84. if boundary is not None and self.use_warmup:
  85. boundary.extend([int(step_per_epoch) * i for i in self.milestones])
  86. else:
  87. # do not use LinearWarmup
  88. boundary = [int(step_per_epoch) * i for i in self.milestones]
  89. value = [base_lr] # during step[0, boundary[0]] is base_lr
  90. # self.values is setted directly in config
  91. if self.values is not None:
  92. assert len(self.milestones) + 1 == len(self.values)
  93. return optimizer.lr.PiecewiseDecay(boundary, self.values)
  94. # value is computed by self.gamma
  95. value = value if value is not None else [base_lr]
  96. for i in self.gamma:
  97. value.append(base_lr * i)
  98. return optimizer.lr.PiecewiseDecay(boundary, value)
  99. @serializable
  100. class LinearWarmup(object):
  101. """
  102. Warm up learning rate linearly
  103. Args:
  104. steps (int): warm up steps
  105. start_factor (float): initial learning rate factor
  106. """
  107. def __init__(self, steps=500, start_factor=1. / 3):
  108. super(LinearWarmup, self).__init__()
  109. self.steps = steps
  110. self.start_factor = start_factor
  111. def __call__(self, base_lr, step_per_epoch):
  112. boundary = []
  113. value = []
  114. for i in range(self.steps + 1):
  115. if self.steps > 0:
  116. alpha = i / self.steps
  117. factor = self.start_factor * (1 - alpha) + alpha
  118. lr = base_lr * factor
  119. value.append(lr)
  120. if i > 0:
  121. boundary.append(i)
  122. return boundary, value
  123. @serializable
  124. class BurninWarmup(object):
  125. """
  126. Warm up learning rate in burnin mode
  127. Args:
  128. steps (int): warm up steps
  129. """
  130. def __init__(self, steps=1000):
  131. super(BurninWarmup, self).__init__()
  132. self.steps = steps
  133. def __call__(self, base_lr, step_per_epoch):
  134. boundary = []
  135. value = []
  136. burnin = min(self.steps, step_per_epoch)
  137. for i in range(burnin + 1):
  138. factor = (i * 1.0 / burnin)**4
  139. lr = base_lr * factor
  140. value.append(lr)
  141. if i > 0:
  142. boundary.append(i)
  143. return boundary, value
  144. @register
  145. class LearningRate(object):
  146. """
  147. Learning Rate configuration
  148. Args:
  149. base_lr (float): base learning rate
  150. schedulers (list): learning rate schedulers
  151. """
  152. __category__ = 'optim'
  153. def __init__(self,
  154. base_lr=0.01,
  155. schedulers=[PiecewiseDecay(), LinearWarmup()]):
  156. super(LearningRate, self).__init__()
  157. self.base_lr = base_lr
  158. self.schedulers = schedulers
  159. def __call__(self, step_per_epoch):
  160. assert len(self.schedulers) >= 1
  161. if not self.schedulers[0].use_warmup:
  162. return self.schedulers[0](base_lr=self.base_lr,
  163. step_per_epoch=step_per_epoch)
  164. # TODO: split warmup & decay
  165. # warmup
  166. boundary, value = self.schedulers[1](self.base_lr, step_per_epoch)
  167. # decay
  168. decay_lr = self.schedulers[0](self.base_lr, boundary, value,
  169. step_per_epoch)
  170. return decay_lr
  171. @register
  172. class OptimizerBuilder():
  173. """
  174. Build optimizer handles
  175. Args:
  176. regularizer (object): an `Regularizer` instance
  177. optimizer (object): an `Optimizer` instance
  178. """
  179. __category__ = 'optim'
  180. def __init__(self,
  181. clip_grad_by_norm=None,
  182. regularizer={'type': 'L2',
  183. 'factor': .0001},
  184. optimizer={'type': 'Momentum',
  185. 'momentum': .9}):
  186. self.clip_grad_by_norm = clip_grad_by_norm
  187. self.regularizer = regularizer
  188. self.optimizer = optimizer
  189. def __call__(self, learning_rate, params=None):
  190. if self.clip_grad_by_norm is not None:
  191. grad_clip = nn.ClipGradByGlobalNorm(
  192. clip_norm=self.clip_grad_by_norm)
  193. else:
  194. grad_clip = None
  195. if self.regularizer and self.regularizer != 'None':
  196. reg_type = self.regularizer['type'] + 'Decay'
  197. reg_factor = self.regularizer['factor']
  198. regularization = getattr(regularizer, reg_type)(reg_factor)
  199. else:
  200. regularization = None
  201. optim_args = self.optimizer.copy()
  202. optim_type = optim_args['type']
  203. del optim_args['type']
  204. op = getattr(optimizer, optim_type)
  205. return op(learning_rate=learning_rate,
  206. parameters=params,
  207. weight_decay=regularization,
  208. grad_clip=grad_clip,
  209. **optim_args)
  210. class ModelEMA(object):
  211. def __init__(self, decay, model, use_thres_step=False):
  212. self.step = 0
  213. self.decay = decay
  214. self.state_dict = dict()
  215. for k, v in model.state_dict().items():
  216. self.state_dict[k] = paddle.zeros_like(v)
  217. self.use_thres_step = use_thres_step
  218. def update(self, model):
  219. if self.use_thres_step:
  220. decay = min(self.decay, (1 + self.step) / (10 + self.step))
  221. else:
  222. decay = self.decay
  223. self._decay = decay
  224. model_dict = model.state_dict()
  225. for k, v in self.state_dict.items():
  226. v = decay * v + (1 - decay) * model_dict[k]
  227. v.stop_gradient = True
  228. self.state_dict[k] = v
  229. self.step += 1
  230. def apply(self):
  231. if self.step == 0:
  232. return self.state_dict
  233. state_dict = dict()
  234. for k, v in self.state_dict.items():
  235. v = v / (1 - self._decay**self.step)
  236. v.stop_gradient = True
  237. state_dict[k] = v
  238. return state_dict