dnlnet.py 8.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231
  1. # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import paddle
  15. import paddle.nn as nn
  16. import paddle.nn.functional as F
  17. from paddlex.paddleseg.models import layers
  18. from paddlex.paddleseg.cvlibs import manager
  19. from paddlex.paddleseg.utils import utils
  20. @manager.MODELS.add_component
  21. class DNLNet(nn.Layer):
  22. """Disentangled Non-Local Neural Networks.
  23. The original article refers to
  24. Minghao Yin, et al. "Disentangled Non-Local Neural Networks"
  25. (https://arxiv.org/abs/2006.06668)
  26. Args:
  27. num_classes (int): The unique number of target classes.
  28. backbone (Paddle.nn.Layer): A backbone network.
  29. backbone_indices (tuple): The values in the tuple indicate the indices of output of backbone.
  30. reduction (int): Reduction factor of projection transform. Default: 2.
  31. use_scale (bool): Whether to scale pairwise_weight by
  32. sqrt(1/inter_channels). Default: False.
  33. mode (str): The nonlocal mode. Options are 'embedded_gaussian',
  34. 'dot_product'. Default: 'embedded_gaussian'.
  35. temperature (float): Temperature to adjust attention. Default: 0.05.
  36. concat_input (bool): Whether concat the input and output of convs before classification layer. Default: True
  37. enable_auxiliary_loss (bool, optional): A bool value indicates whether adding auxiliary loss. Default: True.
  38. align_corners (bool): An argument of F.interpolate. It should be set to False when the output size of feature
  39. is even, e.g. 1024x512, otherwise it is True, e.g. 769x769. Default: False.
  40. pretrained (str, optional): The path or url of pretrained model. Default: None.
  41. """
  42. def __init__(self,
  43. num_classes,
  44. backbone,
  45. backbone_indices=(2, 3),
  46. reduction=2,
  47. use_scale=True,
  48. mode='embedded_gaussian',
  49. temperature=0.05,
  50. concat_input=True,
  51. enable_auxiliary_loss=True,
  52. align_corners=False,
  53. pretrained=None):
  54. super().__init__()
  55. self.backbone = backbone
  56. self.backbone_indices = backbone_indices
  57. in_channels = [
  58. self.backbone.feat_channels[i] for i in backbone_indices
  59. ]
  60. self.head = DNLHead(num_classes, in_channels, reduction, use_scale,
  61. mode, temperature, concat_input,
  62. enable_auxiliary_loss)
  63. self.align_corners = align_corners
  64. self.pretrained = pretrained
  65. self.init_weight()
  66. def forward(self, x):
  67. feats = self.backbone(x)
  68. feats = [feats[i] for i in self.backbone_indices]
  69. logit_list = self.head(feats)
  70. logit_list = [
  71. F.interpolate(
  72. logit,
  73. paddle.shape(x)[2:],
  74. mode='bilinear',
  75. align_corners=self.align_corners,
  76. align_mode=1) for logit in logit_list
  77. ]
  78. return logit_list
  79. def init_weight(self):
  80. if self.pretrained is not None:
  81. utils.load_entire_model(self, self.pretrained)
  82. class DNLHead(nn.Layer):
  83. """
  84. The DNLNet head.
  85. Args:
  86. num_classes (int): The unique number of target classes.
  87. in_channels (tuple): The number of input channels.
  88. reduction (int): Reduction factor of projection transform. Default: 2.
  89. use_scale (bool): Whether to scale pairwise_weight by
  90. sqrt(1/inter_channels). Default: False.
  91. mode (str): The nonlocal mode. Options are 'embedded_gaussian',
  92. 'dot_product'. Default: 'embedded_gaussian.'.
  93. temperature (float): Temperature to adjust attention. Default: 0.05
  94. concat_input (bool): Whether concat the input and output of convs before classification layer. Default: True
  95. enable_auxiliary_loss (bool, optional): A bool value indicates whether adding auxiliary loss. Default: True.
  96. """
  97. def __init__(self,
  98. num_classes,
  99. in_channels,
  100. reduction,
  101. use_scale,
  102. mode,
  103. temperature,
  104. concat_input=True,
  105. enable_auxiliary_loss=True,
  106. **kwargs):
  107. super(DNLHead, self).__init__()
  108. self.in_channels = in_channels[-1]
  109. self.concat_input = concat_input
  110. self.enable_auxiliary_loss = enable_auxiliary_loss
  111. inter_channels = self.in_channels // 4
  112. self.dnl_block = DisentangledNonLocal2D(
  113. in_channels=inter_channels,
  114. reduction=reduction,
  115. use_scale=use_scale,
  116. temperature=temperature,
  117. mode=mode)
  118. self.conv0 = layers.ConvBNReLU(
  119. in_channels=self.in_channels,
  120. out_channels=inter_channels,
  121. kernel_size=3,
  122. bias_attr=False)
  123. self.conv1 = layers.ConvBNReLU(
  124. in_channels=inter_channels,
  125. out_channels=inter_channels,
  126. kernel_size=3,
  127. bias_attr=False)
  128. self.cls = nn.Sequential(
  129. nn.Dropout2D(p=0.1), nn.Conv2D(inter_channels, num_classes, 1))
  130. self.aux = nn.Sequential(
  131. layers.ConvBNReLU(
  132. in_channels=1024,
  133. out_channels=256,
  134. kernel_size=3,
  135. bias_attr=False),
  136. nn.Dropout2D(p=0.1),
  137. nn.Conv2D(256, num_classes, 1))
  138. if self.concat_input:
  139. self.conv_cat = layers.ConvBNReLU(
  140. self.in_channels + inter_channels,
  141. inter_channels,
  142. kernel_size=3,
  143. bias_attr=False)
  144. def forward(self, feat_list):
  145. C3, C4 = feat_list
  146. output = self.conv0(C4)
  147. output = self.dnl_block(output)
  148. output = self.conv1(output)
  149. if self.concat_input:
  150. output = self.conv_cat(paddle.concat([C4, output], axis=1))
  151. output = self.cls(output)
  152. if self.enable_auxiliary_loss:
  153. auxout = self.aux(C3)
  154. return [output, auxout]
  155. else:
  156. return [output]
  157. class DisentangledNonLocal2D(layers.NonLocal2D):
  158. """Disentangled Non-Local Blocks.
  159. Args:
  160. temperature (float): Temperature to adjust attention.
  161. """
  162. def __init__(self, temperature, *arg, **kwargs):
  163. super().__init__(*arg, **kwargs)
  164. self.temperature = temperature
  165. self.conv_mask = nn.Conv2D(self.in_channels, 1, kernel_size=1)
  166. def embedded_gaussian(self, theta_x, phi_x):
  167. pairwise_weight = paddle.matmul(theta_x, phi_x)
  168. if self.use_scale:
  169. pairwise_weight /= theta_x.shape[-1]**0.5
  170. pairwise_weight /= self.temperature
  171. pairwise_weight = F.softmax(pairwise_weight, -1)
  172. return pairwise_weight
  173. def forward(self, x):
  174. x_shape = paddle.shape(x)
  175. g_x = self.g(x).reshape([0, self.inter_channels,
  176. -1]).transpose([0, 2, 1])
  177. if self.mode == "gaussian":
  178. theta_x = paddle.transpose(
  179. x.reshape([0, self.in_channels, -1]), [0, 2, 1])
  180. if self.sub_sample:
  181. phi_x = paddle.transpose(
  182. self.phi(x), [0, self.in_channels, -1])
  183. else:
  184. phi_x = paddle.transpose(x, [0, self.in_channels, -1])
  185. elif self.mode == "concatenation":
  186. theta_x = paddle.reshape(
  187. self.theta(x), [0, self.inter_channels, -1, 1])
  188. phi_x = paddle.reshape(
  189. self.phi(x), [0, self.inter_channels, 1, -1])
  190. else:
  191. theta_x = self.theta(x).reshape([0, self.inter_channels,
  192. -1]).transpose([0, 2, 1])
  193. phi_x = paddle.reshape(self.phi(x), [0, self.inter_channels, -1])
  194. theta_x -= paddle.mean(theta_x, axis=-2, keepdim=True)
  195. phi_x -= paddle.mean(phi_x, axis=-1, keepdim=True)
  196. pairwise_func = getattr(self, self.mode)
  197. pairwise_weight = pairwise_func(theta_x, phi_x)
  198. y = paddle.matmul(pairwise_weight, g_x).transpose([0, 2, 1]).reshape(
  199. [0, self.inter_channels, x_shape[2], x_shape[3]])
  200. unary_mask = F.softmax(
  201. paddle.reshape(self.conv_mask(x), [0, 1, -1]), -1)
  202. unary_x = paddle.matmul(unary_mask, g_x).transpose([0, 2, 1]).reshape(
  203. [0, self.inter_channels, 1, 1])
  204. output = x + self.conv_out(y + unary_x)
  205. return output