ttf_fpn.py 8.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243
  1. # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import paddle
  15. import paddle.nn as nn
  16. import paddle.nn.functional as F
  17. from paddle import ParamAttr
  18. from paddle.nn.initializer import Constant, Uniform, Normal, XavierUniform
  19. from paddle import ParamAttr
  20. from paddlex.ppdet.core.workspace import register, serializable
  21. from paddle.regularizer import L2Decay
  22. from paddlex.ppdet.modeling.layers import DeformableConvV2, ConvNormLayer, LiteConv
  23. import math
  24. from paddlex.ppdet.modeling.ops import batch_norm
  25. from ..shape_spec import ShapeSpec
  26. __all__ = ['TTFFPN']
  27. class Upsample(nn.Layer):
  28. def __init__(self, ch_in, ch_out, norm_type='bn'):
  29. super(Upsample, self).__init__()
  30. fan_in = ch_in * 3 * 3
  31. stdv = 1. / math.sqrt(fan_in)
  32. self.dcn = DeformableConvV2(
  33. ch_in,
  34. ch_out,
  35. kernel_size=3,
  36. weight_attr=ParamAttr(initializer=Uniform(-stdv, stdv)),
  37. bias_attr=ParamAttr(
  38. initializer=Constant(0),
  39. regularizer=L2Decay(0.),
  40. learning_rate=2.),
  41. lr_scale=2.,
  42. regularizer=L2Decay(0.))
  43. self.bn = batch_norm(
  44. ch_out, norm_type=norm_type, initializer=Constant(1.))
  45. def forward(self, feat):
  46. dcn = self.dcn(feat)
  47. bn = self.bn(dcn)
  48. relu = F.relu(bn)
  49. out = F.interpolate(relu, scale_factor=2., mode='bilinear')
  50. return out
  51. class DeConv(nn.Layer):
  52. def __init__(self, ch_in, ch_out, norm_type='bn'):
  53. super(DeConv, self).__init__()
  54. self.deconv = nn.Sequential()
  55. conv1 = ConvNormLayer(
  56. ch_in=ch_in,
  57. ch_out=ch_out,
  58. stride=1,
  59. filter_size=1,
  60. norm_type=norm_type,
  61. initializer=XavierUniform())
  62. conv2 = nn.Conv2DTranspose(
  63. in_channels=ch_out,
  64. out_channels=ch_out,
  65. kernel_size=4,
  66. padding=1,
  67. stride=2,
  68. groups=ch_out,
  69. weight_attr=ParamAttr(initializer=XavierUniform()),
  70. bias_attr=False)
  71. bn = batch_norm(ch_out, norm_type=norm_type, norm_decay=0.)
  72. conv3 = ConvNormLayer(
  73. ch_in=ch_out,
  74. ch_out=ch_out,
  75. stride=1,
  76. filter_size=1,
  77. norm_type=norm_type,
  78. initializer=XavierUniform())
  79. self.deconv.add_sublayer('conv1', conv1)
  80. self.deconv.add_sublayer('relu6_1', nn.ReLU6())
  81. self.deconv.add_sublayer('conv2', conv2)
  82. self.deconv.add_sublayer('bn', bn)
  83. self.deconv.add_sublayer('relu6_2', nn.ReLU6())
  84. self.deconv.add_sublayer('conv3', conv3)
  85. self.deconv.add_sublayer('relu6_3', nn.ReLU6())
  86. def forward(self, inputs):
  87. return self.deconv(inputs)
  88. class LiteUpsample(nn.Layer):
  89. def __init__(self, ch_in, ch_out, norm_type='bn'):
  90. super(LiteUpsample, self).__init__()
  91. self.deconv = DeConv(ch_in, ch_out, norm_type=norm_type)
  92. self.conv = LiteConv(ch_in, ch_out, norm_type=norm_type)
  93. def forward(self, inputs):
  94. deconv_up = self.deconv(inputs)
  95. conv = self.conv(inputs)
  96. interp_up = F.interpolate(conv, scale_factor=2., mode='bilinear')
  97. return deconv_up + interp_up
  98. class ShortCut(nn.Layer):
  99. def __init__(self,
  100. layer_num,
  101. ch_in,
  102. ch_out,
  103. norm_type='bn',
  104. lite_neck=False,
  105. name=None):
  106. super(ShortCut, self).__init__()
  107. shortcut_conv = nn.Sequential()
  108. for i in range(layer_num):
  109. fan_out = 3 * 3 * ch_out
  110. std = math.sqrt(2. / fan_out)
  111. in_channels = ch_in if i == 0 else ch_out
  112. shortcut_name = name + '.conv.{}'.format(i)
  113. if lite_neck:
  114. shortcut_conv.add_sublayer(
  115. shortcut_name,
  116. LiteConv(
  117. in_channels=in_channels,
  118. out_channels=ch_out,
  119. with_act=i < layer_num - 1,
  120. norm_type=norm_type))
  121. else:
  122. shortcut_conv.add_sublayer(
  123. shortcut_name,
  124. nn.Conv2D(
  125. in_channels=in_channels,
  126. out_channels=ch_out,
  127. kernel_size=3,
  128. padding=1,
  129. weight_attr=ParamAttr(initializer=Normal(0, std)),
  130. bias_attr=ParamAttr(
  131. learning_rate=2., regularizer=L2Decay(0.))))
  132. if i < layer_num - 1:
  133. shortcut_conv.add_sublayer(shortcut_name + '.act',
  134. nn.ReLU())
  135. self.shortcut = self.add_sublayer('shortcut', shortcut_conv)
  136. def forward(self, feat):
  137. out = self.shortcut(feat)
  138. return out
  139. @register
  140. @serializable
  141. class TTFFPN(nn.Layer):
  142. """
  143. Args:
  144. in_channels (list): number of input feature channels from backbone.
  145. [128,256,512,1024] by default, means the channels of DarkNet53
  146. backbone return_idx [1,2,3,4].
  147. planes (list): the number of output feature channels of FPN.
  148. [256, 128, 64] by default
  149. shortcut_num (list): the number of convolution layers in each shortcut.
  150. [3,2,1] by default, means DarkNet53 backbone return_idx_1 has 3 convs
  151. in its shortcut, return_idx_2 has 2 convs and return_idx_3 has 1 conv.
  152. norm_type (string): norm type, 'sync_bn', 'bn', 'gn' are optional.
  153. bn by default
  154. lite_neck (bool): whether to use lite conv in TTFNet FPN,
  155. False by default
  156. fusion_method (string): the method to fusion upsample and lateral layer.
  157. 'add' and 'concat' are optional, add by default
  158. """
  159. __shared__ = ['norm_type']
  160. def __init__(self,
  161. in_channels,
  162. planes=[256, 128, 64],
  163. shortcut_num=[3, 2, 1],
  164. norm_type='bn',
  165. lite_neck=False,
  166. fusion_method='add'):
  167. super(TTFFPN, self).__init__()
  168. self.planes = planes
  169. self.shortcut_num = shortcut_num[::-1]
  170. self.shortcut_len = len(shortcut_num)
  171. self.ch_in = in_channels[::-1]
  172. self.fusion_method = fusion_method
  173. self.upsample_list = []
  174. self.shortcut_list = []
  175. self.upper_list = []
  176. for i, out_c in enumerate(self.planes):
  177. in_c = self.ch_in[i] if i == 0 else self.upper_list[-1]
  178. upsample_module = LiteUpsample if lite_neck else Upsample
  179. upsample = self.add_sublayer(
  180. 'upsample.' + str(i),
  181. upsample_module(
  182. in_c, out_c, norm_type=norm_type))
  183. self.upsample_list.append(upsample)
  184. if i < self.shortcut_len:
  185. shortcut = self.add_sublayer(
  186. 'shortcut.' + str(i),
  187. ShortCut(
  188. self.shortcut_num[i],
  189. self.ch_in[i + 1],
  190. out_c,
  191. norm_type=norm_type,
  192. lite_neck=lite_neck,
  193. name='shortcut.' + str(i)))
  194. self.shortcut_list.append(shortcut)
  195. if self.fusion_method == 'add':
  196. upper_c = out_c
  197. elif self.fusion_method == 'concat':
  198. upper_c = out_c * 2
  199. else:
  200. raise ValueError('Illegal fusion method. Expected add or\
  201. concat, but received {}'.format(self.fusion_method))
  202. self.upper_list.append(upper_c)
  203. def forward(self, inputs):
  204. feat = inputs[-1]
  205. for i, out_c in enumerate(self.planes):
  206. feat = self.upsample_list[i](feat)
  207. if i < self.shortcut_len:
  208. shortcut = self.shortcut_list[i](inputs[-i - 2])
  209. if self.fusion_method == 'add':
  210. feat = feat + shortcut
  211. else:
  212. feat = paddle.concat([feat, shortcut], axis=1)
  213. return feat
  214. @classmethod
  215. def from_config(cls, cfg, input_shape):
  216. return {'in_channels': [i.channels for i in input_shape], }
  217. @property
  218. def out_shape(self):
  219. return [ShapeSpec(channels=self.upper_list[-1], )]