lovasz_loss.py 7.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222
  1. # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. """Lovasz-Softmax and Jaccard hinge loss in PaddlePaddle"""
  15. from __future__ import absolute_import
  16. from __future__ import division
  17. from __future__ import print_function
  18. import numpy as np
  19. import paddle
  20. from paddle import nn
  21. import paddle.nn.functional as F
  22. from paddlex.paddleseg.cvlibs import manager
  23. @manager.LOSSES.add_component
  24. class LovaszSoftmaxLoss(nn.Layer):
  25. """
  26. Multi-class Lovasz-Softmax loss.
  27. Args:
  28. ignore_index (int64): Specifies a target value that is ignored and does not contribute to the input gradient. Default ``255``.
  29. classes (str|list): 'all' for all, 'present' for classes present in labels, or a list of classes to average.
  30. """
  31. def __init__(self, ignore_index=255, classes='present'):
  32. super(LovaszSoftmaxLoss, self).__init__()
  33. self.ignore_index = ignore_index
  34. self.classes = classes
  35. def forward(self, logits, labels):
  36. r"""
  37. Forward computation.
  38. Args:
  39. logits (Tensor): Shape is [N, C, H, W], logits at each prediction (between -\infty and +\infty).
  40. labels (Tensor): Shape is [N, 1, H, W] or [N, H, W], ground truth labels (between 0 and C - 1).
  41. """
  42. probas = F.softmax(logits, axis=1)
  43. vprobas, vlabels = flatten_probas(probas, labels, self.ignore_index)
  44. loss = lovasz_softmax_flat(vprobas, vlabels, classes=self.classes)
  45. return loss
  46. @manager.LOSSES.add_component
  47. class LovaszHingeLoss(nn.Layer):
  48. """
  49. Binary Lovasz hinge loss.
  50. Args:
  51. ignore_index (int64): Specifies a target value that is ignored and does not contribute to the input gradient. Default ``255``.
  52. """
  53. def __init__(self, ignore_index=255):
  54. super(LovaszHingeLoss, self).__init__()
  55. self.ignore_index = ignore_index
  56. def forward(self, logits, labels):
  57. r"""
  58. Forward computation.
  59. Args:
  60. logits (Tensor): Shape is [N, 1, H, W] or [N, 2, H, W], logits at each pixel (between -\infty and +\infty).
  61. labels (Tensor): Shape is [N, 1, H, W] or [N, H, W], binary ground truth masks (0 or 1).
  62. """
  63. if logits.shape[1] == 2:
  64. logits = binary_channel_to_unary(logits)
  65. loss = lovasz_hinge_flat(*flatten_binary_scores(logits, labels,
  66. self.ignore_index))
  67. return loss
  68. def lovasz_grad(gt_sorted):
  69. """
  70. Computes gradient of the Lovasz extension w.r.t sorted errors.
  71. See Alg. 1 in paper.
  72. """
  73. gts = paddle.sum(gt_sorted)
  74. p = len(gt_sorted)
  75. intersection = gts - paddle.cumsum(gt_sorted, axis=0)
  76. union = gts + paddle.cumsum(1 - gt_sorted, axis=0)
  77. jaccard = 1.0 - intersection.cast('float32') / union.cast('float32')
  78. if p > 1: # cover 1-pixel case
  79. jaccard[1:p] = jaccard[1:p] - jaccard[0:-1]
  80. return jaccard
  81. def binary_channel_to_unary(logits, eps=1e-9):
  82. """
  83. Converts binary channel logits to unary channel logits for lovasz hinge loss.
  84. """
  85. probas = F.softmax(logits, axis=1)
  86. probas = probas[:, 1, :, :]
  87. logits = paddle.log(probas + eps / (1 - probas + eps))
  88. logits = logits.unsqueeze(1)
  89. return logits
  90. def lovasz_hinge_flat(logits, labels):
  91. r"""
  92. Binary Lovasz hinge loss.
  93. Args:
  94. logits (Tensor): Shape is [P], logits at each prediction (between -\infty and +\infty).
  95. labels (Tensor): Shape is [P], binary ground truth labels (0 or 1).
  96. """
  97. if len(labels) == 0:
  98. # only void pixels, the gradients should be 0
  99. return logits.sum() * 0.
  100. signs = 2. * labels - 1.
  101. signs.stop_gradient = True
  102. errors = 1. - logits * signs
  103. errors_sorted, perm = paddle.fluid.core.ops.argsort(errors, 'axis', 0,
  104. 'descending', True)
  105. errors_sorted.stop_gradient = False
  106. gt_sorted = paddle.gather(labels, perm)
  107. grad = lovasz_grad(gt_sorted)
  108. grad.stop_gradient = True
  109. loss = paddle.sum(F.relu(errors_sorted) * grad)
  110. return loss
  111. def flatten_binary_scores(scores, labels, ignore=None):
  112. """
  113. Flattens predictions in the batch (binary case).
  114. Remove labels according to 'ignore'.
  115. """
  116. scores = paddle.reshape(scores, [-1])
  117. labels = paddle.reshape(labels, [-1])
  118. labels.stop_gradient = True
  119. if ignore is None:
  120. return scores, labels
  121. valid = labels != ignore
  122. valid_mask = paddle.reshape(valid, (-1, 1))
  123. indexs = paddle.nonzero(valid_mask)
  124. indexs.stop_gradient = True
  125. vscores = paddle.gather(scores, indexs[:, 0])
  126. vlabels = paddle.gather(labels, indexs[:, 0])
  127. return vscores, vlabels
  128. def lovasz_softmax_flat(probas, labels, classes='present'):
  129. """
  130. Multi-class Lovasz-Softmax loss.
  131. Args:
  132. probas (Tensor): Shape is [P, C], class probabilities at each prediction (between 0 and 1).
  133. labels (Tensor): Shape is [P], ground truth labels (between 0 and C - 1).
  134. classes (str|list): 'all' for all, 'present' for classes present in labels, or a list of classes to average.
  135. """
  136. if probas.numel() == 0:
  137. # only void pixels, the gradients should be 0
  138. return probas * 0.
  139. C = probas.shape[1]
  140. losses = []
  141. classes_to_sum = list(range(C)) if classes in ['all', 'present'
  142. ] else classes
  143. for c in classes_to_sum:
  144. fg = paddle.cast(labels == c, probas.dtype) # foreground for class c
  145. if classes == 'present' and fg.sum() == 0:
  146. continue
  147. fg.stop_gradient = True
  148. if C == 1:
  149. if len(classes_to_sum) > 1:
  150. raise ValueError('Sigmoid output possible only with 1 class')
  151. class_pred = probas[:, 0]
  152. else:
  153. class_pred = probas[:, c]
  154. errors = paddle.abs(fg - class_pred)
  155. errors_sorted, perm = paddle.fluid.core.ops.argsort(errors, 'axis', 0,
  156. 'descending', True)
  157. errors_sorted.stop_gradient = False
  158. fg_sorted = paddle.gather(fg, perm)
  159. fg_sorted.stop_gradient = True
  160. grad = lovasz_grad(fg_sorted)
  161. grad.stop_gradient = True
  162. loss = paddle.sum(errors_sorted * grad)
  163. losses.append(loss)
  164. if len(classes_to_sum) == 1:
  165. return losses[0]
  166. losses_tensor = paddle.stack(losses)
  167. mean_loss = paddle.mean(losses_tensor)
  168. return mean_loss
  169. def flatten_probas(probas, labels, ignore=None):
  170. """
  171. Flattens predictions in the batch.
  172. """
  173. if len(probas.shape) == 3:
  174. probas = paddle.unsqueeze(probas, axis=1)
  175. C = probas.shape[1]
  176. probas = paddle.transpose(probas, [0, 2, 3, 1])
  177. probas = paddle.reshape(probas, [-1, C])
  178. labels = paddle.reshape(labels, [-1])
  179. if ignore is None:
  180. return probas, labels
  181. valid = labels != ignore
  182. valid_mask = paddle.reshape(valid, [-1, 1])
  183. indexs = paddle.nonzero(valid_mask)
  184. indexs.stop_gradient = True
  185. vprobas = paddle.gather(probas, indexs[:, 0])
  186. vlabels = paddle.gather(labels, indexs[:, 0])
  187. return vprobas, vlabels