ann.py 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436
  1. # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import paddle
  15. import paddle.nn as nn
  16. import paddle.nn.functional as F
  17. from paddlex.paddleseg.cvlibs import manager
  18. from paddlex.paddleseg.models import layers
  19. from paddlex.paddleseg.utils import utils
  20. @manager.MODELS.add_component
  21. class ANN(nn.Layer):
  22. """
  23. The ANN implementation based on PaddlePaddle.
  24. The original article refers to
  25. Zhen, Zhu, et al. "Asymmetric Non-local Neural Networks for Semantic Segmentation"
  26. (https://arxiv.org/pdf/1908.07678.pdf).
  27. Args:
  28. num_classes (int): The unique number of target classes.
  29. backbone (Paddle.nn.Layer): Backbone network, currently support Resnet50/101.
  30. backbone_indices (tuple, optional): Two values in the tuple indicate the indices of output of backbone.
  31. key_value_channels (int, optional): The key and value channels of self-attention map in both AFNB and APNB modules.
  32. Default: 256.
  33. inter_channels (int, optional): Both input and output channels of APNB modules. Default: 512.
  34. psp_size (tuple, optional): The out size of pooled feature maps. Default: (1, 3, 6, 8).
  35. enable_auxiliary_loss (bool, optional): A bool value indicates whether adding auxiliary loss. Default: True.
  36. align_corners (bool, optional): An argument of F.interpolate. It should be set to False when the feature size is even,
  37. e.g. 1024x512, otherwise it is True, e.g. 769x769. Default: False.
  38. pretrained (str, optional): The path or url of pretrained model. Default: None.
  39. """
  40. def __init__(self,
  41. num_classes,
  42. backbone,
  43. backbone_indices=(2, 3),
  44. key_value_channels=256,
  45. inter_channels=512,
  46. psp_size=(1, 3, 6, 8),
  47. enable_auxiliary_loss=True,
  48. align_corners=False,
  49. pretrained=None):
  50. super().__init__()
  51. self.backbone = backbone
  52. backbone_channels = [
  53. backbone.feat_channels[i] for i in backbone_indices
  54. ]
  55. self.head = ANNHead(num_classes, backbone_indices, backbone_channels,
  56. key_value_channels, inter_channels, psp_size,
  57. enable_auxiliary_loss)
  58. self.align_corners = align_corners
  59. self.pretrained = pretrained
  60. self.init_weight()
  61. def forward(self, x):
  62. feat_list = self.backbone(x)
  63. logit_list = self.head(feat_list)
  64. return [
  65. F.interpolate(
  66. logit,
  67. paddle.shape(x)[2:],
  68. mode='bilinear',
  69. align_corners=self.align_corners) for logit in logit_list
  70. ]
  71. def init_weight(self):
  72. if self.pretrained is not None:
  73. utils.load_entire_model(self, self.pretrained)
  74. class ANNHead(nn.Layer):
  75. """
  76. The ANNHead implementation.
  77. It mainly consists of AFNB and APNB modules.
  78. Args:
  79. num_classes (int): The unique number of target classes.
  80. backbone_indices (tuple): Two values in the tuple indicate the indices of output of backbone.
  81. The first index will be taken as low-level features; the second one will be
  82. taken as high-level features in AFNB module. Usually backbone consists of four
  83. downsampling stage, such as ResNet, and return an output of each stage. If it is (2, 3),
  84. it means taking feature map of the third stage and the fourth stage in backbone.
  85. backbone_channels (tuple): The same length with "backbone_indices". It indicates the channels of corresponding index.
  86. key_value_channels (int): The key and value channels of self-attention map in both AFNB and APNB modules.
  87. inter_channels (int): Both input and output channels of APNB modules.
  88. psp_size (tuple): The out size of pooled feature maps.
  89. enable_auxiliary_loss (bool, optional): A bool value indicates whether adding auxiliary loss. Default: True.
  90. """
  91. def __init__(self,
  92. num_classes,
  93. backbone_indices,
  94. backbone_channels,
  95. key_value_channels,
  96. inter_channels,
  97. psp_size,
  98. enable_auxiliary_loss=True):
  99. super().__init__()
  100. low_in_channels = backbone_channels[0]
  101. high_in_channels = backbone_channels[1]
  102. self.fusion = AFNB(
  103. low_in_channels=low_in_channels,
  104. high_in_channels=high_in_channels,
  105. out_channels=high_in_channels,
  106. key_channels=key_value_channels,
  107. value_channels=key_value_channels,
  108. dropout_prob=0.05,
  109. repeat_sizes=([1]),
  110. psp_size=psp_size)
  111. self.context = nn.Sequential(
  112. layers.ConvBNReLU(
  113. in_channels=high_in_channels,
  114. out_channels=inter_channels,
  115. kernel_size=3,
  116. padding=1),
  117. APNB(
  118. in_channels=inter_channels,
  119. out_channels=inter_channels,
  120. key_channels=key_value_channels,
  121. value_channels=key_value_channels,
  122. dropout_prob=0.05,
  123. repeat_sizes=([1]),
  124. psp_size=psp_size))
  125. self.cls = nn.Conv2D(
  126. in_channels=inter_channels,
  127. out_channels=num_classes,
  128. kernel_size=1)
  129. self.auxlayer = layers.AuxLayer(
  130. in_channels=low_in_channels,
  131. inter_channels=low_in_channels // 2,
  132. out_channels=num_classes,
  133. dropout_prob=0.05)
  134. self.backbone_indices = backbone_indices
  135. self.enable_auxiliary_loss = enable_auxiliary_loss
  136. def forward(self, feat_list):
  137. logit_list = []
  138. low_level_x = feat_list[self.backbone_indices[0]]
  139. high_level_x = feat_list[self.backbone_indices[1]]
  140. x = self.fusion(low_level_x, high_level_x)
  141. x = self.context(x)
  142. logit = self.cls(x)
  143. logit_list.append(logit)
  144. if self.enable_auxiliary_loss:
  145. auxiliary_logit = self.auxlayer(low_level_x)
  146. logit_list.append(auxiliary_logit)
  147. return logit_list
  148. class AFNB(nn.Layer):
  149. """
  150. Asymmetric Fusion Non-local Block.
  151. Args:
  152. low_in_channels (int): Low-level-feature channels.
  153. high_in_channels (int): High-level-feature channels.
  154. out_channels (int): Out channels of AFNB module.
  155. key_channels (int): The key channels in self-attention block.
  156. value_channels (int): The value channels in self-attention block.
  157. dropout_prob (float): The dropout rate of output.
  158. repeat_sizes (tuple, optional): The number of AFNB modules. Default: ([1]).
  159. psp_size (tuple. optional): The out size of pooled feature maps. Default: (1, 3, 6, 8).
  160. """
  161. def __init__(self,
  162. low_in_channels,
  163. high_in_channels,
  164. out_channels,
  165. key_channels,
  166. value_channels,
  167. dropout_prob,
  168. repeat_sizes=([1]),
  169. psp_size=(1, 3, 6, 8)):
  170. super().__init__()
  171. self.psp_size = psp_size
  172. self.stages = nn.LayerList([
  173. SelfAttentionBlock_AFNB(low_in_channels, high_in_channels,
  174. key_channels, value_channels, out_channels,
  175. size) for size in repeat_sizes
  176. ])
  177. self.conv_bn = layers.ConvBN(
  178. in_channels=out_channels + high_in_channels,
  179. out_channels=out_channels,
  180. kernel_size=1)
  181. self.dropout = nn.Dropout(p=dropout_prob)
  182. def forward(self, low_feats, high_feats):
  183. priors = [stage(low_feats, high_feats) for stage in self.stages]
  184. context = priors[0]
  185. for i in range(1, len(priors)):
  186. context += priors[i]
  187. output = self.conv_bn(paddle.concat([context, high_feats], axis=1))
  188. output = self.dropout(output)
  189. return output
  190. class APNB(nn.Layer):
  191. """
  192. Asymmetric Pyramid Non-local Block.
  193. Args:
  194. in_channels (int): The input channels of APNB module.
  195. out_channels (int): Out channels of APNB module.
  196. key_channels (int): The key channels in self-attention block.
  197. value_channels (int): The value channels in self-attention block.
  198. dropout_prob (float): The dropout rate of output.
  199. repeat_sizes (tuple, optional): The number of AFNB modules. Default: ([1]).
  200. psp_size (tuple, optional): The out size of pooled feature maps. Default: (1, 3, 6, 8).
  201. """
  202. def __init__(self,
  203. in_channels,
  204. out_channels,
  205. key_channels,
  206. value_channels,
  207. dropout_prob,
  208. repeat_sizes=([1]),
  209. psp_size=(1, 3, 6, 8)):
  210. super().__init__()
  211. self.psp_size = psp_size
  212. self.stages = nn.LayerList([
  213. SelfAttentionBlock_APNB(in_channels, out_channels, key_channels,
  214. value_channels, size)
  215. for size in repeat_sizes
  216. ])
  217. self.conv_bn = layers.ConvBNReLU(
  218. in_channels=in_channels * 2,
  219. out_channels=out_channels,
  220. kernel_size=1)
  221. self.dropout = nn.Dropout(p=dropout_prob)
  222. def forward(self, x):
  223. priors = [stage(x) for stage in self.stages]
  224. context = priors[0]
  225. for i in range(1, len(priors)):
  226. context += priors[i]
  227. output = self.conv_bn(paddle.concat([context, x], axis=1))
  228. output = self.dropout(output)
  229. return output
  230. def _pp_module(x, psp_size):
  231. n, c, h, w = x.shape
  232. priors = []
  233. for size in psp_size:
  234. feat = F.adaptive_avg_pool2d(x, size)
  235. feat = paddle.reshape(feat, shape=(0, c, -1))
  236. priors.append(feat)
  237. center = paddle.concat(priors, axis=-1)
  238. return center
  239. class SelfAttentionBlock_AFNB(nn.Layer):
  240. """
  241. Self-Attention Block for AFNB module.
  242. Args:
  243. low_in_channels (int): Low-level-feature channels.
  244. high_in_channels (int): High-level-feature channels.
  245. key_channels (int): The key channels in self-attention block.
  246. value_channels (int): The value channels in self-attention block.
  247. out_channels (int, optional): Out channels of AFNB module. Default: None.
  248. scale (int, optional): Pooling size. Default: 1.
  249. psp_size (tuple, optional): The out size of pooled feature maps. Default: (1, 3, 6, 8).
  250. """
  251. def __init__(self,
  252. low_in_channels,
  253. high_in_channels,
  254. key_channels,
  255. value_channels,
  256. out_channels=None,
  257. scale=1,
  258. psp_size=(1, 3, 6, 8)):
  259. super().__init__()
  260. self.scale = scale
  261. self.in_channels = low_in_channels
  262. self.out_channels = out_channels
  263. self.key_channels = key_channels
  264. self.value_channels = value_channels
  265. if out_channels == None:
  266. self.out_channels = high_in_channels
  267. self.pool = nn.MaxPool2D(scale)
  268. self.f_key = layers.ConvBNReLU(
  269. in_channels=low_in_channels,
  270. out_channels=key_channels,
  271. kernel_size=1)
  272. self.f_query = layers.ConvBNReLU(
  273. in_channels=high_in_channels,
  274. out_channels=key_channels,
  275. kernel_size=1)
  276. self.f_value = nn.Conv2D(
  277. in_channels=low_in_channels,
  278. out_channels=value_channels,
  279. kernel_size=1)
  280. self.W = nn.Conv2D(
  281. in_channels=value_channels,
  282. out_channels=out_channels,
  283. kernel_size=1)
  284. self.psp_size = psp_size
  285. def forward(self, low_feats, high_feats):
  286. batch_size, _, h, w = high_feats.shape
  287. value = self.f_value(low_feats)
  288. value = _pp_module(value, self.psp_size)
  289. value = paddle.transpose(value, (0, 2, 1))
  290. query = self.f_query(high_feats)
  291. query = paddle.reshape(query, shape=(0, self.key_channels, -1))
  292. query = paddle.transpose(query, perm=(0, 2, 1))
  293. key = self.f_key(low_feats)
  294. key = _pp_module(key, self.psp_size)
  295. sim_map = paddle.matmul(query, key)
  296. sim_map = (self.key_channels**-.5) * sim_map
  297. sim_map = F.softmax(sim_map, axis=-1)
  298. context = paddle.matmul(sim_map, value)
  299. context = paddle.transpose(context, perm=(0, 2, 1))
  300. hf_shape = paddle.shape(high_feats)
  301. context = paddle.reshape(
  302. context, shape=[0, self.value_channels, hf_shape[2], hf_shape[3]])
  303. context = self.W(context)
  304. return context
  305. class SelfAttentionBlock_APNB(nn.Layer):
  306. """
  307. Self-Attention Block for APNB module.
  308. Args:
  309. in_channels (int): The input channels of APNB module.
  310. out_channels (int): The out channels of APNB module.
  311. key_channels (int): The key channels in self-attention block.
  312. value_channels (int): The value channels in self-attention block.
  313. scale (int, optional): Pooling size. Default: 1.
  314. psp_size (tuple, optional): The out size of pooled feature maps. Default: (1, 3, 6, 8).
  315. """
  316. def __init__(self,
  317. in_channels,
  318. out_channels,
  319. key_channels,
  320. value_channels,
  321. scale=1,
  322. psp_size=(1, 3, 6, 8)):
  323. super().__init__()
  324. self.scale = scale
  325. self.in_channels = in_channels
  326. self.out_channels = out_channels
  327. self.key_channels = key_channels
  328. self.value_channels = value_channels
  329. self.pool = nn.MaxPool2D(scale)
  330. self.f_key = layers.ConvBNReLU(
  331. in_channels=self.in_channels,
  332. out_channels=self.key_channels,
  333. kernel_size=1)
  334. self.f_query = self.f_key
  335. self.f_value = nn.Conv2D(
  336. in_channels=self.in_channels,
  337. out_channels=self.value_channels,
  338. kernel_size=1)
  339. self.W = nn.Conv2D(
  340. in_channels=self.value_channels,
  341. out_channels=self.out_channels,
  342. kernel_size=1)
  343. self.psp_size = psp_size
  344. def forward(self, x):
  345. batch_size, _, h, w = x.shape
  346. if self.scale > 1:
  347. x = self.pool(x)
  348. value = self.f_value(x)
  349. value = _pp_module(value, self.psp_size)
  350. value = paddle.transpose(value, perm=(0, 2, 1))
  351. query = self.f_query(x)
  352. query = paddle.reshape(query, shape=(0, self.key_channels, -1))
  353. query = paddle.transpose(query, perm=(0, 2, 1))
  354. key = self.f_key(x)
  355. key = _pp_module(key, self.psp_size)
  356. sim_map = paddle.matmul(query, key)
  357. sim_map = (self.key_channels**-.5) * sim_map
  358. sim_map = F.softmax(sim_map, axis=-1)
  359. context = paddle.matmul(sim_map, value)
  360. context = paddle.transpose(context, perm=(0, 2, 1))
  361. x_shape = paddle.shape(x)
  362. context = paddle.reshape(
  363. context, shape=[0, self.value_channels, x_shape[2], x_shape[3]])
  364. context = self.W(context)
  365. return context