mobilenet_v3.py 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354
  1. # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import paddle.fluid as fluid
  15. from paddle.fluid.param_attr import ParamAttr
  16. from paddle.fluid.regularizer import L2Decay
  17. import math
  18. class MobileNetV3():
  19. """
  20. MobileNet v3, see https://arxiv.org/abs/1905.02244
  21. Args:
  22. scale (float): scaling factor for convolution groups proportion of mobilenet_v3.
  23. model_name (str): There are two modes, small and large.
  24. norm_type (str): normalization type, 'bn' and 'sync_bn' are supported.
  25. norm_decay (float): weight decay for normalization layer weights.
  26. conv_decay (float): weight decay for convolution layer weights.
  27. with_extra_blocks (bool): if extra blocks should be added.
  28. extra_block_filters (list): number of filter for each extra block.
  29. """
  30. def __init__(self,
  31. scale=1.0,
  32. model_name='small',
  33. with_extra_blocks=False,
  34. conv_decay=0.0,
  35. norm_type='bn',
  36. norm_decay=0.0,
  37. extra_block_filters=[[256, 512], [128, 256], [128, 256],
  38. [64, 128]],
  39. num_classes=None,
  40. lr_mult_list=[1.0, 1.0, 1.0, 1.0, 1.0]):
  41. assert len(lr_mult_list) == 5, \
  42. "lr_mult_list length in MobileNetV3 must be 5 but got {}!!".format(
  43. len(lr_mult_list))
  44. self.scale = scale
  45. self.with_extra_blocks = with_extra_blocks
  46. self.extra_block_filters = extra_block_filters
  47. self.conv_decay = conv_decay
  48. self.norm_decay = norm_decay
  49. self.inplanes = 16
  50. self.end_points = []
  51. self.block_stride = 1
  52. self.num_classes = num_classes
  53. self.lr_mult_list = lr_mult_list
  54. self.curr_stage = 0
  55. if model_name == "large":
  56. self.cfg = [
  57. # kernel_size, expand, channel, se_block, act_mode, stride
  58. [3, 16, 16, False, 'relu', 1],
  59. [3, 64, 24, False, 'relu', 2],
  60. [3, 72, 24, False, 'relu', 1],
  61. [5, 72, 40, True, 'relu', 2],
  62. [5, 120, 40, True, 'relu', 1],
  63. [5, 120, 40, True, 'relu', 1],
  64. [3, 240, 80, False, 'hard_swish', 2],
  65. [3, 200, 80, False, 'hard_swish', 1],
  66. [3, 184, 80, False, 'hard_swish', 1],
  67. [3, 184, 80, False, 'hard_swish', 1],
  68. [3, 480, 112, True, 'hard_swish', 1],
  69. [3, 672, 112, True, 'hard_swish', 1],
  70. [5, 672, 160, True, 'hard_swish', 2],
  71. [5, 960, 160, True, 'hard_swish', 1],
  72. [5, 960, 160, True, 'hard_swish', 1],
  73. ]
  74. self.cls_ch_squeeze = 960
  75. self.cls_ch_expand = 1280
  76. self.lr_interval = 3
  77. elif model_name == "small":
  78. self.cfg = [
  79. # kernel_size, expand, channel, se_block, act_mode, stride
  80. [3, 16, 16, True, 'relu', 2],
  81. [3, 72, 24, False, 'relu', 2],
  82. [3, 88, 24, False, 'relu', 1],
  83. [5, 96, 40, True, 'hard_swish', 2],
  84. [5, 240, 40, True, 'hard_swish', 1],
  85. [5, 240, 40, True, 'hard_swish', 1],
  86. [5, 120, 48, True, 'hard_swish', 1],
  87. [5, 144, 48, True, 'hard_swish', 1],
  88. [5, 288, 96, True, 'hard_swish', 2],
  89. [5, 576, 96, True, 'hard_swish', 1],
  90. [5, 576, 96, True, 'hard_swish', 1],
  91. ]
  92. self.cls_ch_squeeze = 576
  93. self.cls_ch_expand = 1280
  94. self.lr_interval = 2
  95. else:
  96. raise NotImplementedError
  97. def _conv_bn_layer(self,
  98. input,
  99. filter_size,
  100. num_filters,
  101. stride,
  102. padding,
  103. num_groups=1,
  104. if_act=True,
  105. act=None,
  106. name=None,
  107. use_cudnn=True):
  108. lr_idx = self.curr_stage // self.lr_interval
  109. lr_idx = min(lr_idx, len(self.lr_mult_list) - 1)
  110. lr_mult = self.lr_mult_list[lr_idx]
  111. conv_param_attr = ParamAttr(name=name + '_weights',
  112. learning_rate=lr_mult,
  113. regularizer=L2Decay(self.conv_decay))
  114. conv = fluid.layers.conv2d(input=input,
  115. num_filters=num_filters,
  116. filter_size=filter_size,
  117. stride=stride,
  118. padding=padding,
  119. groups=num_groups,
  120. act=None,
  121. use_cudnn=use_cudnn,
  122. param_attr=conv_param_attr,
  123. bias_attr=False)
  124. bn_name = name + '_bn'
  125. bn_param_attr = ParamAttr(name=bn_name + "_scale",
  126. regularizer=L2Decay(self.norm_decay))
  127. bn_bias_attr = ParamAttr(name=bn_name + "_offset",
  128. regularizer=L2Decay(self.norm_decay))
  129. bn = fluid.layers.batch_norm(input=conv,
  130. param_attr=bn_param_attr,
  131. bias_attr=bn_bias_attr,
  132. moving_mean_name=bn_name + '_mean',
  133. moving_variance_name=bn_name + '_variance')
  134. if if_act:
  135. if act == 'relu':
  136. bn = fluid.layers.relu(bn)
  137. elif act == 'hard_swish':
  138. bn = self._hard_swish(bn)
  139. elif act == 'relu6':
  140. bn = fluid.layers.relu6(bn)
  141. return bn
  142. def _hard_swish(self, x):
  143. return x * fluid.layers.relu6(x + 3) / 6.
  144. def _se_block(self, input, num_out_filter, ratio=4, name=None):
  145. lr_idx = self.curr_stage // self.lr_interval
  146. lr_idx = min(lr_idx, len(self.lr_mult_list) - 1)
  147. lr_mult = self.lr_mult_list[lr_idx]
  148. num_mid_filter = int(num_out_filter // ratio)
  149. pool = fluid.layers.pool2d(input=input,
  150. pool_type='avg',
  151. global_pooling=True,
  152. use_cudnn=False)
  153. conv1 = fluid.layers.conv2d(
  154. input=pool,
  155. filter_size=1,
  156. num_filters=num_mid_filter,
  157. act='relu',
  158. param_attr=ParamAttr(
  159. name=name + '_1_weights', learning_rate=lr_mult),
  160. bias_attr=ParamAttr(
  161. name=name + '_1_offset', learning_rate=lr_mult))
  162. conv2 = fluid.layers.conv2d(
  163. input=conv1,
  164. filter_size=1,
  165. num_filters=num_out_filter,
  166. act='hard_sigmoid',
  167. param_attr=ParamAttr(
  168. name=name + '_2_weights', learning_rate=lr_mult),
  169. bias_attr=ParamAttr(
  170. name=name + '_2_offset', learning_rate=lr_mult))
  171. scale = fluid.layers.elementwise_mul(x=input, y=conv2, axis=0)
  172. return scale
  173. def _residual_unit(self,
  174. input,
  175. num_in_filter,
  176. num_mid_filter,
  177. num_out_filter,
  178. stride,
  179. filter_size,
  180. act=None,
  181. use_se=False,
  182. name=None):
  183. input_data = input
  184. conv0 = self._conv_bn_layer(input=input,
  185. filter_size=1,
  186. num_filters=num_mid_filter,
  187. stride=1,
  188. padding=0,
  189. if_act=True,
  190. act=act,
  191. name=name + '_expand')
  192. if self.block_stride == 16 and stride == 2:
  193. self.end_points.append(conv0)
  194. conv1 = self._conv_bn_layer(input=conv0,
  195. filter_size=filter_size,
  196. num_filters=num_mid_filter,
  197. stride=stride,
  198. padding=int((filter_size - 1) // 2),
  199. if_act=True,
  200. act=act,
  201. num_groups=num_mid_filter,
  202. use_cudnn=False,
  203. name=name + '_depthwise')
  204. if use_se:
  205. conv1 = self._se_block(input=conv1,
  206. num_out_filter=num_mid_filter,
  207. name=name + '_se')
  208. conv2 = self._conv_bn_layer(input=conv1,
  209. filter_size=1,
  210. num_filters=num_out_filter,
  211. stride=1,
  212. padding=0,
  213. if_act=False,
  214. name=name + '_linear')
  215. if num_in_filter != num_out_filter or stride != 1:
  216. return conv2
  217. else:
  218. return fluid.layers.elementwise_add(x=input_data, y=conv2, act=None)
  219. def _extra_block_dw(self,
  220. input,
  221. num_filters1,
  222. num_filters2,
  223. stride,
  224. name=None):
  225. pointwise_conv = self._conv_bn_layer(input=input,
  226. filter_size=1,
  227. num_filters=int(num_filters1),
  228. stride=1,
  229. padding="SAME",
  230. act='relu6',
  231. name=name + "_extra1")
  232. depthwise_conv = self._conv_bn_layer(input=pointwise_conv,
  233. filter_size=3,
  234. num_filters=int(num_filters2),
  235. stride=stride,
  236. padding="SAME",
  237. num_groups=int(num_filters1),
  238. act='relu6',
  239. use_cudnn=False,
  240. name=name + "_extra2_dw")
  241. normal_conv = self._conv_bn_layer(input=depthwise_conv,
  242. filter_size=1,
  243. num_filters=int(num_filters2),
  244. stride=1,
  245. padding="SAME",
  246. act='relu6',
  247. name=name + "_extra2_sep")
  248. return normal_conv
  249. def __call__(self, input):
  250. scale = self.scale
  251. inplanes = self.inplanes
  252. cfg = self.cfg
  253. blocks = []
  254. #conv1
  255. conv = self._conv_bn_layer(
  256. input,
  257. filter_size=3,
  258. num_filters=inplanes if scale <= 1.0 else int(inplanes * scale),
  259. stride=2,
  260. padding=1,
  261. num_groups=1,
  262. if_act=True,
  263. act='hard_swish',
  264. name='conv1')
  265. i = 0
  266. for layer_cfg in cfg:
  267. self.block_stride *= layer_cfg[5]
  268. if layer_cfg[5] == 2:
  269. blocks.append(conv)
  270. conv = self._residual_unit(input=conv,
  271. num_in_filter=inplanes,
  272. num_mid_filter=int(scale * layer_cfg[1]),
  273. num_out_filter=int(scale * layer_cfg[2]),
  274. act=layer_cfg[4],
  275. stride=layer_cfg[5],
  276. filter_size=layer_cfg[0],
  277. use_se=layer_cfg[3],
  278. name='conv' + str(i + 2))
  279. inplanes = int(scale * layer_cfg[2])
  280. i += 1
  281. self.curr_stage = i
  282. blocks.append(conv)
  283. if self.num_classes:
  284. conv = self._conv_bn_layer(input=conv,
  285. filter_size=1,
  286. num_filters=int(scale * self.cls_ch_squeeze),
  287. stride=1,
  288. padding=0,
  289. num_groups=1,
  290. if_act=True,
  291. act='hard_swish',
  292. name='conv_last')
  293. conv = fluid.layers.pool2d(input=conv,
  294. pool_type='avg',
  295. global_pooling=True,
  296. use_cudnn=False)
  297. conv = fluid.layers.conv2d(
  298. input=conv,
  299. num_filters=self.cls_ch_expand,
  300. filter_size=1,
  301. stride=1,
  302. padding=0,
  303. act=None,
  304. param_attr=ParamAttr(name='last_1x1_conv_weights'),
  305. bias_attr=False)
  306. conv = self._hard_swish(conv)
  307. drop = fluid.layers.dropout(x=conv, dropout_prob=0.2)
  308. out = fluid.layers.fc(input=drop,
  309. size=self.num_classes,
  310. param_attr=ParamAttr(name='fc_weights'),
  311. bias_attr=ParamAttr(name='fc_offset'))
  312. return out
  313. if not self.with_extra_blocks:
  314. return blocks
  315. # extra block
  316. conv_extra = self._conv_bn_layer(conv,
  317. filter_size=1,
  318. num_filters=int(scale * cfg[-1][1]),
  319. stride=1,
  320. padding="SAME",
  321. num_groups=1,
  322. if_act=True,
  323. act='hard_swish',
  324. name='conv' + str(i + 2))
  325. self.end_points.append(conv_extra)
  326. i += 1
  327. for block_filter in self.extra_block_filters:
  328. conv_extra = self._extra_block_dw(conv_extra, block_filter[0],
  329. block_filter[1], 2,
  330. 'conv' + str(i + 2))
  331. self.end_points.append(conv_extra)
  332. i += 1
  333. return self.end_points