blazenet.py 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322
  1. # copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. from __future__ import absolute_import
  15. from __future__ import division
  16. from __future__ import print_function
  17. import paddle
  18. import paddle.nn as nn
  19. import paddle.nn.functional as F
  20. from paddle import ParamAttr
  21. from paddle.regularizer import L2Decay
  22. from paddle.nn.initializer import KaimingNormal
  23. from paddlex.ppdet.core.workspace import register, serializable
  24. from numbers import Integral
  25. from ..shape_spec import ShapeSpec
  26. __all__ = ['BlazeNet']
  27. class ConvBNLayer(nn.Layer):
  28. def __init__(self,
  29. in_channels,
  30. out_channels,
  31. kernel_size,
  32. stride,
  33. padding,
  34. num_groups=1,
  35. act='relu',
  36. conv_lr=0.1,
  37. conv_decay=0.,
  38. norm_decay=0.,
  39. norm_type='bn',
  40. name=None):
  41. super(ConvBNLayer, self).__init__()
  42. self.act = act
  43. self._conv = nn.Conv2D(
  44. in_channels,
  45. out_channels,
  46. kernel_size=kernel_size,
  47. stride=stride,
  48. padding=padding,
  49. groups=num_groups,
  50. weight_attr=ParamAttr(
  51. learning_rate=conv_lr,
  52. initializer=KaimingNormal(),
  53. name=name + "_weights"),
  54. bias_attr=False)
  55. param_attr = ParamAttr(name=name + "_bn_scale")
  56. bias_attr = ParamAttr(name=name + "_bn_offset")
  57. if norm_type == 'sync_bn':
  58. self._batch_norm = nn.SyncBatchNorm(
  59. out_channels, weight_attr=param_attr, bias_attr=bias_attr)
  60. else:
  61. self._batch_norm = nn.BatchNorm(
  62. out_channels,
  63. act=None,
  64. param_attr=param_attr,
  65. bias_attr=bias_attr,
  66. use_global_stats=False,
  67. moving_mean_name=name + '_bn_mean',
  68. moving_variance_name=name + '_bn_variance')
  69. def forward(self, x):
  70. x = self._conv(x)
  71. x = self._batch_norm(x)
  72. if self.act == "relu":
  73. x = F.relu(x)
  74. elif self.act == "relu6":
  75. x = F.relu6(x)
  76. return x
  77. class BlazeBlock(nn.Layer):
  78. def __init__(self,
  79. in_channels,
  80. out_channels1,
  81. out_channels2,
  82. double_channels=None,
  83. stride=1,
  84. use_5x5kernel=True,
  85. name=None):
  86. super(BlazeBlock, self).__init__()
  87. assert stride in [1, 2]
  88. self.use_pool = not stride == 1
  89. self.use_double_block = double_channels is not None
  90. self.conv_dw = []
  91. if use_5x5kernel:
  92. self.conv_dw.append(
  93. self.add_sublayer(
  94. name + "1_dw",
  95. ConvBNLayer(
  96. in_channels=in_channels,
  97. out_channels=out_channels1,
  98. kernel_size=5,
  99. stride=stride,
  100. padding=2,
  101. num_groups=out_channels1,
  102. name=name + "1_dw")))
  103. else:
  104. self.conv_dw.append(
  105. self.add_sublayer(
  106. name + "1_dw_1",
  107. ConvBNLayer(
  108. in_channels=in_channels,
  109. out_channels=out_channels1,
  110. kernel_size=3,
  111. stride=1,
  112. padding=1,
  113. num_groups=out_channels1,
  114. name=name + "1_dw_1")))
  115. self.conv_dw.append(
  116. self.add_sublayer(
  117. name + "1_dw_2",
  118. ConvBNLayer(
  119. in_channels=out_channels1,
  120. out_channels=out_channels1,
  121. kernel_size=3,
  122. stride=stride,
  123. padding=1,
  124. num_groups=out_channels1,
  125. name=name + "1_dw_2")))
  126. act = 'relu' if self.use_double_block else None
  127. self.conv_pw = ConvBNLayer(
  128. in_channels=out_channels1,
  129. out_channels=out_channels2,
  130. kernel_size=1,
  131. stride=1,
  132. padding=0,
  133. act=act,
  134. name=name + "1_sep")
  135. if self.use_double_block:
  136. self.conv_dw2 = []
  137. if use_5x5kernel:
  138. self.conv_dw2.append(
  139. self.add_sublayer(
  140. name + "2_dw",
  141. ConvBNLayer(
  142. in_channels=out_channels2,
  143. out_channels=out_channels2,
  144. kernel_size=5,
  145. stride=1,
  146. padding=2,
  147. num_groups=out_channels2,
  148. name=name + "2_dw")))
  149. else:
  150. self.conv_dw2.append(
  151. self.add_sublayer(
  152. name + "2_dw_1",
  153. ConvBNLayer(
  154. in_channels=out_channels2,
  155. out_channels=out_channels2,
  156. kernel_size=3,
  157. stride=1,
  158. padding=1,
  159. num_groups=out_channels2,
  160. name=name + "1_dw_1")))
  161. self.conv_dw2.append(
  162. self.add_sublayer(
  163. name + "2_dw_2",
  164. ConvBNLayer(
  165. in_channels=out_channels2,
  166. out_channels=out_channels2,
  167. kernel_size=3,
  168. stride=1,
  169. padding=1,
  170. num_groups=out_channels2,
  171. name=name + "2_dw_2")))
  172. self.conv_pw2 = ConvBNLayer(
  173. in_channels=out_channels2,
  174. out_channels=double_channels,
  175. kernel_size=1,
  176. stride=1,
  177. padding=0,
  178. name=name + "2_sep")
  179. # shortcut
  180. if self.use_pool:
  181. shortcut_channel = double_channels or out_channels2
  182. self._shortcut = []
  183. self._shortcut.append(
  184. self.add_sublayer(
  185. name + '_shortcut_pool',
  186. nn.MaxPool2D(
  187. kernel_size=stride, stride=stride, ceil_mode=True)))
  188. self._shortcut.append(
  189. self.add_sublayer(
  190. name + '_shortcut_conv',
  191. ConvBNLayer(
  192. in_channels=in_channels,
  193. out_channels=shortcut_channel,
  194. kernel_size=1,
  195. stride=1,
  196. padding=0,
  197. name="shortcut" + name)))
  198. def forward(self, x):
  199. y = x
  200. for conv_dw_block in self.conv_dw:
  201. y = conv_dw_block(y)
  202. y = self.conv_pw(y)
  203. if self.use_double_block:
  204. for conv_dw2_block in self.conv_dw2:
  205. y = conv_dw2_block(y)
  206. y = self.conv_pw2(y)
  207. if self.use_pool:
  208. for shortcut in self._shortcut:
  209. x = shortcut(x)
  210. return F.relu(paddle.add(x, y))
  211. @register
  212. @serializable
  213. class BlazeNet(nn.Layer):
  214. """
  215. BlazeFace, see https://arxiv.org/abs/1907.05047
  216. Args:
  217. blaze_filters (list): number of filter for each blaze block.
  218. double_blaze_filters (list): number of filter for each double_blaze block.
  219. use_5x5kernel (bool): whether or not filter size is 5x5 in depth-wise conv.
  220. """
  221. def __init__(self,
  222. blaze_filters=[[24, 24], [24, 24], [24, 48, 2], [48, 48],
  223. [48, 48]],
  224. double_blaze_filters=[[48, 24, 96, 2], [96, 24, 96],
  225. [96, 24, 96], [96, 24, 96, 2],
  226. [96, 24, 96], [96, 24, 96]],
  227. use_5x5kernel=True):
  228. super(BlazeNet, self).__init__()
  229. conv1_num_filters = blaze_filters[0][0]
  230. self.conv1 = ConvBNLayer(
  231. in_channels=3,
  232. out_channels=conv1_num_filters,
  233. kernel_size=3,
  234. stride=2,
  235. padding=1,
  236. name="conv1")
  237. in_channels = conv1_num_filters
  238. self.blaze_block = []
  239. self._out_channels = []
  240. for k, v in enumerate(blaze_filters):
  241. assert len(v) in [2, 3], \
  242. "blaze_filters {} not in [2, 3]"
  243. if len(v) == 2:
  244. self.blaze_block.append(
  245. self.add_sublayer(
  246. 'blaze_{}'.format(k),
  247. BlazeBlock(
  248. in_channels,
  249. v[0],
  250. v[1],
  251. use_5x5kernel=use_5x5kernel,
  252. name='blaze_{}'.format(k))))
  253. elif len(v) == 3:
  254. self.blaze_block.append(
  255. self.add_sublayer(
  256. 'blaze_{}'.format(k),
  257. BlazeBlock(
  258. in_channels,
  259. v[0],
  260. v[1],
  261. stride=v[2],
  262. use_5x5kernel=use_5x5kernel,
  263. name='blaze_{}'.format(k))))
  264. in_channels = v[1]
  265. for k, v in enumerate(double_blaze_filters):
  266. assert len(v) in [3, 4], \
  267. "blaze_filters {} not in [3, 4]"
  268. if len(v) == 3:
  269. self.blaze_block.append(
  270. self.add_sublayer(
  271. 'double_blaze_{}'.format(k),
  272. BlazeBlock(
  273. in_channels,
  274. v[0],
  275. v[1],
  276. double_channels=v[2],
  277. use_5x5kernel=use_5x5kernel,
  278. name='double_blaze_{}'.format(k))))
  279. elif len(v) == 4:
  280. self.blaze_block.append(
  281. self.add_sublayer(
  282. 'double_blaze_{}'.format(k),
  283. BlazeBlock(
  284. in_channels,
  285. v[0],
  286. v[1],
  287. double_channels=v[2],
  288. stride=v[3],
  289. use_5x5kernel=use_5x5kernel,
  290. name='double_blaze_{}'.format(k))))
  291. in_channels = v[2]
  292. self._out_channels.append(in_channels)
  293. def forward(self, inputs):
  294. outs = []
  295. y = self.conv1(inputs['image'])
  296. for block in self.blaze_block:
  297. y = block(y)
  298. outs.append(y)
  299. return [outs[-4], outs[-1]]
  300. @property
  301. def out_shape(self):
  302. return [
  303. ShapeSpec(channels=c)
  304. for c in [self._out_channels[-4], self._out_channels[-1]]
  305. ]