gscnn_dual_task_loss.py 5.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141
  1. # you may not use this file except in compliance with the License.
  2. # You may obtain a copy of the License at
  3. #
  4. # http://www.apache.org/licenses/LICENSE-2.0
  5. #
  6. # Unless required by applicable law or agreed to in writing, software
  7. # distributed under the License is distributed on an "AS IS" BASIS,
  8. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  9. # See the License for the specific language governing permissions and
  10. # limitations under the License.
  11. import paddle
  12. import paddle.nn as nn
  13. import paddle.nn.functional as F
  14. from paddlex.paddleseg.cvlibs import manager
  15. @manager.LOSSES.add_component
  16. class DualTaskLoss(nn.Layer):
  17. """
  18. The dual task loss implement of GSCNN
  19. Args:
  20. ignore_index (int64): Specifies a target value that is ignored
  21. and does not contribute to the input gradient. Default ``255``.
  22. tau (float): the tau of gumbel softmax sample.
  23. """
  24. def __init__(self, ignore_index=255, tau=0.5):
  25. super().__init__()
  26. self.ignore_index = ignore_index
  27. self.tau = tau
  28. def _gumbel_softmax_sample(self, logit, tau=1, eps=1e-10):
  29. """
  30. Draw a sample from the Gumbel-Softmax distribution
  31. based on
  32. https://github.com/ericjang/gumbel-softmax/blob/3c8584924603869e90ca74ac20a6a03d99a91ef9/Categorical%20VAE.ipynb
  33. (MIT license)
  34. """
  35. gumbel_noise = paddle.rand(logit.shape)
  36. gumbel_noise = -paddle.log(eps - paddle.log(gumbel_noise + eps))
  37. logit = logit + gumbel_noise
  38. return F.softmax(logit / tau, axis=1)
  39. def compute_grad_mag(self, x):
  40. eps = 1e-6
  41. n, c, h, w = x.shape
  42. if h <= 1 or w <= 1:
  43. raise ValueError(
  44. 'The width and height of tensor to compute grad must be greater than 1, but the shape is {}.'
  45. .format(x.shape))
  46. x = self.conv_tri(x, r=4)
  47. kernel = [[-1, 0, 1]]
  48. kernel = paddle.to_tensor(kernel).astype('float32')
  49. kernel = 0.5 * kernel
  50. kernel_x = paddle.concat([kernel.unsqueeze((0, 1))] * c, axis=0)
  51. grad_x = F.conv2d(x, kernel_x, padding='same', groups=c)
  52. kernel_y = paddle.concat([kernel.t().unsqueeze((0, 1))] * c, axis=0)
  53. grad_y = F.conv2d(x, kernel_y, padding='same', groups=c)
  54. mag = paddle.sqrt(grad_x * grad_x + grad_y * grad_y + eps)
  55. return mag / mag.max()
  56. def conv_tri(self, input, r):
  57. """
  58. Convolves an image by a 2D triangle filter (the 1D triangle filter f is
  59. [1:r r+1 r:-1:1]/(r+1)^2, the 2D version is simply conv2(f,f'))
  60. """
  61. if r <= 1:
  62. raise ValueError(
  63. '`r` should be greater than 1, but it is {}.'.format(r))
  64. kernel = [
  65. list(range(1, r + 1)) + [r + 1] + list(reversed(range(1, r + 1)))
  66. ]
  67. kernel = paddle.to_tensor(kernel).astype('float32')
  68. kernel = kernel / (r + 1)**2
  69. input_ = F.pad(input, [1, 1, 0, 0], mode='replicate')
  70. input_ = F.pad(input_, [r, r, 0, 0], mode='reflect')
  71. input_ = [input_[:, :, :, :r], input, input_[:, :, :, -r:]]
  72. input_ = paddle.concat(input_, axis=3)
  73. tem = input_.clone()
  74. input_ = F.pad(input_, [0, 0, 1, 1], mode='replicate')
  75. input_ = F.pad(input_, [0, 0, r, r], mode='reflect')
  76. input_ = [input_[:, :, :r, :], tem, input_[:, :, -r:, :]]
  77. input_ = paddle.concat(input_, axis=2)
  78. c = input.shape[1]
  79. kernel_x = paddle.concat([kernel.unsqueeze((0, 1))] * c, axis=0)
  80. output = F.conv2d(input_, kernel_x, padding=0, groups=c)
  81. kernel_y = paddle.concat([kernel.t().unsqueeze((0, 1))] * c, axis=0)
  82. output = F.conv2d(output, kernel_y, padding=0, groups=c)
  83. return output
  84. def forward(self, logit, labels):
  85. # import pdb; pdb.set_trace()
  86. n, c, h, w = logit.shape
  87. th = 1e-8
  88. eps = 1e-10
  89. if len(labels.shape) == 3:
  90. labels = labels.unsqueeze(1)
  91. mask = (labels != self.ignore_index)
  92. mask.stop_gradient = True
  93. logit = logit * mask
  94. labels = labels * mask
  95. if len(labels.shape) == 4:
  96. labels = labels.squeeze(1)
  97. labels.stop_gradient = True
  98. labels = F.one_hot(labels, logit.shape[1]).transpose((0, 3, 1, 2))
  99. labels.stop_gradient = True
  100. g = self._gumbel_softmax_sample(logit, tau=self.tau)
  101. g = self.compute_grad_mag(g)
  102. g_hat = self.compute_grad_mag(labels)
  103. loss = F.l1_loss(g, g_hat, reduction='none')
  104. loss = loss * mask
  105. g_mask = (g > th).astype('float32')
  106. g_mask.stop_gradient = True
  107. g_mask_sum = paddle.sum(g_mask)
  108. loss_g = paddle.sum(loss * g_mask)
  109. if g_mask_sum > eps:
  110. loss_g = loss_g / g_mask_sum
  111. g_hat_mask = (g_hat > th).astype('float32')
  112. g_hat_mask.stop_gradient = True
  113. g_hat_mask_sum = paddle.sum(g_hat_mask)
  114. loss_g_hat = paddle.sum(loss * g_hat_mask)
  115. if g_hat_mask_sum > eps:
  116. loss_g_hat = loss_g_hat / g_hat_mask_sum
  117. total_loss = 0.5 * loss_g + 0.5 * loss_g_hat
  118. return total_loss