learning_rate.py 4.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159
  1. # copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. from __future__ import absolute_import
  15. from __future__ import division
  16. from __future__ import print_function
  17. import sys
  18. import math
  19. from paddle.optimizer.lr import LinearWarmup
  20. from paddle.optimizer.lr import PiecewiseDecay
  21. from paddle.optimizer.lr import CosineAnnealingDecay
  22. from paddle.optimizer.lr import ExponentialDecay
  23. __all__ = ['LearningRateBuilder']
  24. class Cosine(CosineAnnealingDecay):
  25. """
  26. Cosine learning rate decay
  27. lr = 0.05 * (math.cos(epoch * (math.pi / epochs)) + 1)
  28. Args:
  29. lr(float): initial learning rate
  30. step_each_epoch(int): steps each epoch
  31. epochs(int): total training epochs
  32. """
  33. def __init__(self, lr, step_each_epoch, epochs, **kwargs):
  34. super(Cosine, self).__init__(
  35. learning_rate=lr,
  36. T_max=step_each_epoch * epochs, )
  37. self.update_specified = False
  38. class Piecewise(PiecewiseDecay):
  39. """
  40. Piecewise learning rate decay
  41. Args:
  42. lr(float): initial learning rate
  43. step_each_epoch(int): steps each epoch
  44. decay_epochs(list): piecewise decay epochs
  45. gamma(float): decay factor
  46. """
  47. def __init__(self, lr, step_each_epoch, decay_epochs, gamma=0.1, **kwargs):
  48. boundaries = [step_each_epoch * e for e in decay_epochs]
  49. lr_values = [lr * (gamma**i) for i in range(len(boundaries) + 1)]
  50. super(Piecewise, self).__init__(
  51. boundaries=boundaries, values=lr_values)
  52. self.update_specified = False
  53. class CosineWarmup(LinearWarmup):
  54. """
  55. Cosine learning rate decay with warmup
  56. [0, warmup_epoch): linear warmup
  57. [warmup_epoch, epochs): cosine decay
  58. Args:
  59. lr(float): initial learning rate
  60. step_each_epoch(int): steps each epoch
  61. epochs(int): total training epochs
  62. warmup_epoch(int): epoch num of warmup
  63. """
  64. def __init__(self, lr, step_each_epoch, epochs, warmup_epoch=5, **kwargs):
  65. assert epochs > warmup_epoch, "total epoch({}) should be larger than warmup_epoch({}) in CosineWarmup.".format(
  66. epochs, warmup_epoch)
  67. warmup_step = warmup_epoch * step_each_epoch
  68. start_lr = 0.0
  69. end_lr = lr
  70. lr_sch = Cosine(lr, step_each_epoch, epochs - warmup_epoch)
  71. super(CosineWarmup, self).__init__(
  72. learning_rate=lr_sch,
  73. warmup_steps=warmup_step,
  74. start_lr=start_lr,
  75. end_lr=end_lr)
  76. self.update_specified = False
  77. class ExponentialWarmup(LinearWarmup):
  78. """
  79. Exponential learning rate decay with warmup
  80. [0, warmup_epoch): linear warmup
  81. [warmup_epoch, epochs): Exponential decay
  82. Args:
  83. lr(float): initial learning rate
  84. step_each_epoch(int): steps each epoch
  85. decay_epochs(float): decay epochs
  86. decay_rate(float): decay rate
  87. warmup_epoch(int): epoch num of warmup
  88. """
  89. def __init__(self,
  90. lr,
  91. step_each_epoch,
  92. decay_epochs=2.4,
  93. decay_rate=0.97,
  94. warmup_epoch=5,
  95. **kwargs):
  96. warmup_step = warmup_epoch * step_each_epoch
  97. start_lr = 0.0
  98. end_lr = lr
  99. lr_sch = ExponentialDecay(lr, decay_rate)
  100. super(ExponentialWarmup, self).__init__(
  101. learning_rate=lr_sch,
  102. warmup_steps=warmup_step,
  103. start_lr=start_lr,
  104. end_lr=end_lr)
  105. # NOTE: hac method to update exponential lr scheduler
  106. self.update_specified = True
  107. self.update_start_step = warmup_step
  108. self.update_step_interval = int(decay_epochs * step_each_epoch)
  109. self.step_each_epoch = step_each_epoch
  110. class LearningRateBuilder():
  111. """
  112. Build learning rate variable
  113. https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/layers_cn.html
  114. Args:
  115. function(str): class name of learning rate
  116. params(dict): parameters used for init the class
  117. """
  118. def __init__(self,
  119. function='Linear',
  120. params={'lr': 0.1,
  121. 'steps': 100,
  122. 'end_lr': 0.0}):
  123. self.function = function
  124. self.params = params
  125. def __call__(self):
  126. mod = sys.modules[__name__]
  127. lr = getattr(mod, self.function)(**self.params)
  128. return lr