binary_cross_entropy_loss.py 8.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174
  1. # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import paddle
  15. import paddle.nn as nn
  16. import paddle.nn.functional as F
  17. from paddlex.paddleseg.cvlibs import manager
  18. @manager.LOSSES.add_component
  19. class BCELoss(nn.Layer):
  20. r"""
  21. This operator combines the sigmoid layer and the :ref:`api_nn_loss_BCELoss` layer.
  22. Also, we can see it as the combine of ``sigmoid_cross_entropy_with_logits``
  23. layer and some reduce operations.
  24. This measures the element-wise probability error in classification tasks
  25. in which each class is independent.
  26. This can be thought of as predicting labels for a data-point, where labels
  27. are not mutually exclusive. For example, a news article can be about
  28. politics, technology or sports at the same time or none of these.
  29. First this operator calculate loss function as follows:
  30. .. math::
  31. Out = -Labels * \\log(\\sigma(Logit)) - (1 - Labels) * \\log(1 - \\sigma(Logit))
  32. We know that :math:`\\sigma(Logit) = \\frac{1}{1 + \\e^{-Logit}}`. By substituting this we get:
  33. .. math::
  34. Out = Logit - Logit * Labels + \\log(1 + \\e^{-Logit})
  35. For stability and to prevent overflow of :math:`\\e^{-Logit}` when Logit < 0,
  36. we reformulate the loss as follows:
  37. .. math::
  38. Out = \\max(Logit, 0) - Logit * Labels + \\log(1 + \\e^{-\|Logit\|})
  39. Then, if ``weight`` or ``pos_weight`` is not None, this operator multiply the
  40. weight tensor on the loss `Out`. The ``weight`` tensor will attach different
  41. weight on every items in the batch. The ``pos_weight`` will attach different
  42. weight on the positive label of each class.
  43. Finally, this operator applies reduce operation on the loss.
  44. If :attr:`reduction` set to ``'none'``, the operator will return the original loss `Out`.
  45. If :attr:`reduction` set to ``'mean'``, the reduced mean loss is :math:`Out = MEAN(Out)`.
  46. If :attr:`reduction` set to ``'sum'``, the reduced sum loss is :math:`Out = SUM(Out)`.
  47. Note that the target labels ``label`` should be numbers between 0 and 1.
  48. Args:
  49. weight (Tensor | str, optional): A manual rescaling weight given to the loss of each
  50. batch element. If given, it has to be a 1D Tensor whose size is `[N, ]`,
  51. The data type is float32, float64. If type is str, it should equal to 'dynamic'.
  52. It will compute weight dynamically in every step.
  53. Default is ``'None'``.
  54. pos_weight (float|str, optional): A weight of positive examples. If type is str,
  55. it should equal to 'dynamic'. It will compute weight dynamically in every step.
  56. Default is ``'None'``.
  57. ignore_index (int64, optional): Specifies a target value that is ignored
  58. and does not contribute to the input gradient. Default ``255``.
  59. edge_label (bool, optional): Whether to use edge label. Default: False
  60. Shapes:
  61. logit (Tensor): The input predications tensor. 2-D tensor with shape: [N, *],
  62. N is batch_size, `*` means number of additional dimensions. The ``logit``
  63. is usually the output of Linear layer. Available dtype is float32, float64.
  64. label (Tensor): The target labels tensor. 2-D tensor with the same shape as
  65. ``logit``. The target labels which values should be numbers between 0 and 1.
  66. Available dtype is float32, float64.
  67. Returns:
  68. A callable object of BCEWithLogitsLoss.
  69. Examples:
  70. .. code-block:: python
  71. import paddle
  72. paddle.disable_static()
  73. logit = paddle.to_tensor([5.0, 1.0, 3.0], dtype="float32")
  74. label = paddle.to_tensor([1.0, 0.0, 1.0], dtype="float32")
  75. bce_logit_loss = paddle.nn.BCEWithLogitsLoss()
  76. output = bce_logit_loss(logit, label)
  77. print(output.numpy()) # [0.45618808]
  78. """
  79. def __init__(self,
  80. weight=None,
  81. pos_weight=None,
  82. ignore_index=255,
  83. edge_label=False):
  84. super().__init__()
  85. self.weight = weight
  86. self.pos_weight = pos_weight
  87. self.ignore_index = ignore_index
  88. self.edge_label = edge_label
  89. self.EPS = 1e-10
  90. if self.weight is not None:
  91. if isinstance(self.weight, str):
  92. if self.weight != 'dynamic':
  93. raise ValueError(
  94. "if type of `weight` is str, it should equal to 'dynamic', but it is {}"
  95. .format(self.weight))
  96. elif isinstance(self.weight, paddle.VarBase):
  97. raise TypeError(
  98. 'The type of `weight` is wrong, it should be Tensor or str, but it is {}'
  99. .format(type(self.weight)))
  100. if self.pos_weight is not None:
  101. if isinstance(self.pos_weight, str):
  102. if self.pos_weight != 'dynamic':
  103. raise ValueError(
  104. "if type of `pos_weight` is str, it should equal to 'dynamic', but it is {}"
  105. .format(self.pos_weight))
  106. elif isinstance(self.pos_weight, float):
  107. self.pos_weight = paddle.to_tensor(
  108. self.pos_weight, dtype='float32')
  109. else:
  110. raise TypeError(
  111. 'The type of `pos_weight` is wrong, it should be float or str, but it is {}'
  112. .format(type(self.pos_weight)))
  113. def forward(self, logit, label):
  114. """
  115. Forward computation.
  116. Args:
  117. logit (Tensor): Logit tensor, the data type is float32, float64. Shape is
  118. (N, C), where C is number of classes, and if shape is more than 2D, this
  119. is (N, C, D1, D2,..., Dk), k >= 1.
  120. label (Tensor): Label tensor, the data type is int64. Shape is (N, C), where each
  121. value is 0 or 1, and if shape is more than 2D, this is
  122. (N, C, D1, D2,..., Dk), k >= 1.
  123. """
  124. if len(label.shape) != len(logit.shape):
  125. label = paddle.unsqueeze(label, 1)
  126. mask = (label != self.ignore_index)
  127. mask = paddle.cast(mask, 'float32')
  128. # label.shape should equal to the logit.shape
  129. if label.shape[1] != logit.shape[1]:
  130. label = label.squeeze(1)
  131. label = F.one_hot(label, logit.shape[1])
  132. label = label.transpose((0, 3, 1, 2))
  133. if isinstance(self.weight, str):
  134. pos_index = (label == 1)
  135. neg_index = (label == 0)
  136. pos_num = paddle.sum(pos_index.astype('float32'))
  137. neg_num = paddle.sum(neg_index.astype('float32'))
  138. sum_num = pos_num + neg_num
  139. weight_pos = 2 * neg_num / (sum_num + self.EPS)
  140. weight_neg = 2 * pos_num / (sum_num + self.EPS)
  141. weight = weight_pos * label + weight_neg * (1 - label)
  142. else:
  143. weight = self.weight
  144. if isinstance(self.pos_weight, str):
  145. pos_index = (label == 1)
  146. neg_index = (label == 0)
  147. pos_num = paddle.sum(pos_index.astype('float32'))
  148. neg_num = paddle.sum(neg_index.astype('float32'))
  149. sum_num = pos_num + neg_num
  150. pos_weight = 2 * neg_num / (sum_num + self.EPS)
  151. else:
  152. pos_weight = self.pos_weight
  153. label = label.astype('float32')
  154. loss = paddle.nn.functional.binary_cross_entropy_with_logits(
  155. logit,
  156. label,
  157. weight=weight,
  158. reduction='none',
  159. pos_weight=pos_weight)
  160. loss = loss * mask
  161. loss = paddle.mean(loss) / (paddle.mean(mask) + self.EPS)
  162. label.stop_gradient = True
  163. mask.stop_gradient = True
  164. return loss