danet.py 7.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220
  1. # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import paddle
  15. import paddle.nn as nn
  16. import paddle.nn.functional as F
  17. from paddlex.paddleseg.cvlibs import manager
  18. from paddlex.paddleseg.models import layers
  19. from paddlex.paddleseg.utils import utils
  20. @manager.MODELS.add_component
  21. class DANet(nn.Layer):
  22. """
  23. The DANet implementation based on PaddlePaddle.
  24. The original article refers to
  25. Fu, jun, et al. "Dual Attention Network for Scene Segmentation"
  26. (https://arxiv.org/pdf/1809.02983.pdf)
  27. Args:
  28. num_classes (int): The unique number of target classes.
  29. backbone (Paddle.nn.Layer): A backbone network.
  30. backbone_indices (tuple): The values in the tuple indicate the indices of
  31. output of backbone.
  32. align_corners (bool): An argument of F.interpolate. It should be set to False when the output size of feature
  33. is even, e.g. 1024x512, otherwise it is True, e.g. 769x769. Default: False.
  34. pretrained (str, optional): The path or url of pretrained model. Default: None.
  35. """
  36. def __init__(self,
  37. num_classes,
  38. backbone,
  39. backbone_indices,
  40. align_corners=False,
  41. pretrained=None):
  42. super().__init__()
  43. self.backbone = backbone
  44. self.backbone_indices = backbone_indices
  45. in_channels = [
  46. self.backbone.feat_channels[i] for i in backbone_indices
  47. ]
  48. self.head = DAHead(num_classes=num_classes, in_channels=in_channels)
  49. self.align_corners = align_corners
  50. self.pretrained = pretrained
  51. self.init_weight()
  52. def forward(self, x):
  53. feats = self.backbone(x)
  54. feats = [feats[i] for i in self.backbone_indices]
  55. logit_list = self.head(feats)
  56. if not self.training:
  57. logit_list = [logit_list[0]]
  58. logit_list = [
  59. F.interpolate(
  60. logit,
  61. paddle.shape(x)[2:],
  62. mode='bilinear',
  63. align_corners=self.align_corners,
  64. align_mode=1) for logit in logit_list
  65. ]
  66. return logit_list
  67. def init_weight(self):
  68. if self.pretrained is not None:
  69. utils.load_entire_model(self, self.pretrained)
  70. class DAHead(nn.Layer):
  71. """
  72. The Dual attention head.
  73. Args:
  74. num_classes (int): The unique number of target classes.
  75. in_channels (tuple): The number of input channels.
  76. """
  77. def __init__(self, num_classes, in_channels):
  78. super().__init__()
  79. in_channels = in_channels[-1]
  80. inter_channels = in_channels // 4
  81. self.channel_conv = layers.ConvBNReLU(in_channels, inter_channels, 3)
  82. self.position_conv = layers.ConvBNReLU(in_channels, inter_channels, 3)
  83. self.pam = PAM(inter_channels)
  84. self.cam = CAM(inter_channels)
  85. self.conv1 = layers.ConvBNReLU(inter_channels, inter_channels, 3)
  86. self.conv2 = layers.ConvBNReLU(inter_channels, inter_channels, 3)
  87. self.aux_head = nn.Sequential(
  88. nn.Dropout2D(0.1), nn.Conv2D(in_channels, num_classes, 1))
  89. self.aux_head_pam = nn.Sequential(
  90. nn.Dropout2D(0.1), nn.Conv2D(inter_channels, num_classes, 1))
  91. self.aux_head_cam = nn.Sequential(
  92. nn.Dropout2D(0.1), nn.Conv2D(inter_channels, num_classes, 1))
  93. self.cls_head = nn.Sequential(
  94. nn.Dropout2D(0.1), nn.Conv2D(inter_channels, num_classes, 1))
  95. def forward(self, feat_list):
  96. feats = feat_list[-1]
  97. channel_feats = self.channel_conv(feats)
  98. channel_feats = self.cam(channel_feats)
  99. channel_feats = self.conv1(channel_feats)
  100. position_feats = self.position_conv(feats)
  101. position_feats = self.pam(position_feats)
  102. position_feats = self.conv2(position_feats)
  103. feats_sum = position_feats + channel_feats
  104. logit = self.cls_head(feats_sum)
  105. if not self.training:
  106. return [logit]
  107. cam_logit = self.aux_head_cam(channel_feats)
  108. pam_logit = self.aux_head_cam(position_feats)
  109. aux_logit = self.aux_head(feats)
  110. return [logit, cam_logit, pam_logit, aux_logit]
  111. class PAM(nn.Layer):
  112. """Position attention module."""
  113. def __init__(self, in_channels):
  114. super().__init__()
  115. mid_channels = in_channels // 8
  116. self.mid_channels = mid_channels
  117. self.in_channels = in_channels
  118. self.query_conv = nn.Conv2D(in_channels, mid_channels, 1, 1)
  119. self.key_conv = nn.Conv2D(in_channels, mid_channels, 1, 1)
  120. self.value_conv = nn.Conv2D(in_channels, in_channels, 1, 1)
  121. self.gamma = self.create_parameter(
  122. shape=[1],
  123. dtype='float32',
  124. default_initializer=nn.initializer.Constant(0))
  125. def forward(self, x):
  126. x_shape = paddle.shape(x)
  127. # query: n, h * w, c1
  128. query = self.query_conv(x)
  129. query = paddle.reshape(query, (0, self.mid_channels, -1))
  130. query = paddle.transpose(query, (0, 2, 1))
  131. # key: n, c1, h * w
  132. key = self.key_conv(x)
  133. key = paddle.reshape(key, (0, self.mid_channels, -1))
  134. # sim: n, h * w, h * w
  135. sim = paddle.bmm(query, key)
  136. sim = F.softmax(sim, axis=-1)
  137. value = self.value_conv(x)
  138. value = paddle.reshape(value, (0, self.in_channels, -1))
  139. sim = paddle.transpose(sim, (0, 2, 1))
  140. # feat: from (n, c2, h * w) -> (n, c2, h, w)
  141. feat = paddle.bmm(value, sim)
  142. feat = paddle.reshape(feat,
  143. (0, self.in_channels, x_shape[2], x_shape[3]))
  144. out = self.gamma * feat + x
  145. return out
  146. class CAM(nn.Layer):
  147. """Channel attention module."""
  148. def __init__(self, channels):
  149. super().__init__()
  150. self.channels = channels
  151. self.gamma = self.create_parameter(
  152. shape=[1],
  153. dtype='float32',
  154. default_initializer=nn.initializer.Constant(0))
  155. def forward(self, x):
  156. x_shape = paddle.shape(x)
  157. # query: n, c, h * w
  158. query = paddle.reshape(x, (0, self.channels, -1))
  159. # key: n, h * w, c
  160. key = paddle.reshape(x, (0, self.channels, -1))
  161. key = paddle.transpose(key, (0, 2, 1))
  162. # sim: n, c, c
  163. sim = paddle.bmm(query, key)
  164. # The danet author claims that this can avoid gradient divergence
  165. sim = paddle.max(sim, axis=-1, keepdim=True).tile(
  166. [1, 1, self.channels]) - sim
  167. sim = F.softmax(sim, axis=-1)
  168. # feat: from (n, c, h * w) to (n, c, h, w)
  169. value = paddle.reshape(x, (0, self.channels, -1))
  170. feat = paddle.bmm(sim, value)
  171. feat = paddle.reshape(feat, (0, self.channels, x_shape[2], x_shape[3]))
  172. out = self.gamma * feat + x
  173. return out