xception_deeplab.py 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421
  1. # copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import paddle
  15. from paddle import ParamAttr
  16. import paddle.nn as nn
  17. import paddle.nn.functional as F
  18. from paddle.nn import Conv2D, BatchNorm, Linear, Dropout
  19. from paddle.nn import AdaptiveAvgPool2D, MaxPool2D, AvgPool2D
  20. from paddlex.ppcls.utils.save_load import load_dygraph_pretrain, load_dygraph_pretrain_from_url
  21. MODEL_URLS = {
  22. "Xception41_deeplab":
  23. "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/Xception41_deeplab_pretrained.pdparams",
  24. "Xception65_deeplab":
  25. "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/Xception65_deeplab_pretrained.pdparams"
  26. }
  27. __all__ = list(MODEL_URLS.keys())
  28. def check_data(data, number):
  29. if type(data) == int:
  30. return [data] * number
  31. assert len(data) == number
  32. return data
  33. def check_stride(s, os):
  34. if s <= os:
  35. return True
  36. else:
  37. return False
  38. def check_points(count, points):
  39. if points is None:
  40. return False
  41. else:
  42. if isinstance(points, list):
  43. return (True if count in points else False)
  44. else:
  45. return (True if count == points else False)
  46. def gen_bottleneck_params(backbone='xception_65'):
  47. if backbone == 'xception_65':
  48. bottleneck_params = {
  49. "entry_flow": (3, [2, 2, 2], [128, 256, 728]),
  50. "middle_flow": (16, 1, 728),
  51. "exit_flow": (2, [2, 1], [[728, 1024, 1024], [1536, 1536, 2048]])
  52. }
  53. elif backbone == 'xception_41':
  54. bottleneck_params = {
  55. "entry_flow": (3, [2, 2, 2], [128, 256, 728]),
  56. "middle_flow": (8, 1, 728),
  57. "exit_flow": (2, [2, 1], [[728, 1024, 1024], [1536, 1536, 2048]])
  58. }
  59. elif backbone == 'xception_71':
  60. bottleneck_params = {
  61. "entry_flow": (5, [2, 1, 2, 1, 2], [128, 256, 256, 728, 728]),
  62. "middle_flow": (16, 1, 728),
  63. "exit_flow": (2, [2, 1], [[728, 1024, 1024], [1536, 1536, 2048]])
  64. }
  65. else:
  66. raise Exception(
  67. "xception backbont only support xception_41/xception_65/xception_71"
  68. )
  69. return bottleneck_params
  70. class ConvBNLayer(nn.Layer):
  71. def __init__(self,
  72. input_channels,
  73. output_channels,
  74. filter_size,
  75. stride=1,
  76. padding=0,
  77. act=None,
  78. name=None):
  79. super(ConvBNLayer, self).__init__()
  80. self._conv = Conv2D(
  81. in_channels=input_channels,
  82. out_channels=output_channels,
  83. kernel_size=filter_size,
  84. stride=stride,
  85. padding=padding,
  86. weight_attr=ParamAttr(name=name + "/weights"),
  87. bias_attr=False)
  88. self._bn = BatchNorm(
  89. num_channels=output_channels,
  90. act=act,
  91. epsilon=1e-3,
  92. momentum=0.99,
  93. param_attr=ParamAttr(name=name + "/BatchNorm/gamma"),
  94. bias_attr=ParamAttr(name=name + "/BatchNorm/beta"),
  95. moving_mean_name=name + "/BatchNorm/moving_mean",
  96. moving_variance_name=name + "/BatchNorm/moving_variance")
  97. def forward(self, inputs):
  98. return self._bn(self._conv(inputs))
  99. class Seperate_Conv(nn.Layer):
  100. def __init__(self,
  101. input_channels,
  102. output_channels,
  103. stride,
  104. filter,
  105. dilation=1,
  106. act=None,
  107. name=None):
  108. super(Seperate_Conv, self).__init__()
  109. self._conv1 = Conv2D(
  110. in_channels=input_channels,
  111. out_channels=input_channels,
  112. kernel_size=filter,
  113. stride=stride,
  114. groups=input_channels,
  115. padding=(filter) // 2 * dilation,
  116. dilation=dilation,
  117. weight_attr=ParamAttr(name=name + "/depthwise/weights"),
  118. bias_attr=False)
  119. self._bn1 = BatchNorm(
  120. input_channels,
  121. act=act,
  122. epsilon=1e-3,
  123. momentum=0.99,
  124. param_attr=ParamAttr(name=name + "/depthwise/BatchNorm/gamma"),
  125. bias_attr=ParamAttr(name=name + "/depthwise/BatchNorm/beta"),
  126. moving_mean_name=name + "/depthwise/BatchNorm/moving_mean",
  127. moving_variance_name=name + "/depthwise/BatchNorm/moving_variance")
  128. self._conv2 = Conv2D(
  129. input_channels,
  130. output_channels,
  131. 1,
  132. stride=1,
  133. groups=1,
  134. padding=0,
  135. weight_attr=ParamAttr(name=name + "/pointwise/weights"),
  136. bias_attr=False)
  137. self._bn2 = BatchNorm(
  138. output_channels,
  139. act=act,
  140. epsilon=1e-3,
  141. momentum=0.99,
  142. param_attr=ParamAttr(name=name + "/pointwise/BatchNorm/gamma"),
  143. bias_attr=ParamAttr(name=name + "/pointwise/BatchNorm/beta"),
  144. moving_mean_name=name + "/pointwise/BatchNorm/moving_mean",
  145. moving_variance_name=name + "/pointwise/BatchNorm/moving_variance")
  146. def forward(self, inputs):
  147. x = self._conv1(inputs)
  148. x = self._bn1(x)
  149. x = self._conv2(x)
  150. x = self._bn2(x)
  151. return x
  152. class Xception_Block(nn.Layer):
  153. def __init__(self,
  154. input_channels,
  155. output_channels,
  156. strides=1,
  157. filter_size=3,
  158. dilation=1,
  159. skip_conv=True,
  160. has_skip=True,
  161. activation_fn_in_separable_conv=False,
  162. name=None):
  163. super(Xception_Block, self).__init__()
  164. repeat_number = 3
  165. output_channels = check_data(output_channels, repeat_number)
  166. filter_size = check_data(filter_size, repeat_number)
  167. strides = check_data(strides, repeat_number)
  168. self.has_skip = has_skip
  169. self.skip_conv = skip_conv
  170. self.activation_fn_in_separable_conv = activation_fn_in_separable_conv
  171. if not activation_fn_in_separable_conv:
  172. self._conv1 = Seperate_Conv(
  173. input_channels,
  174. output_channels[0],
  175. stride=strides[0],
  176. filter=filter_size[0],
  177. dilation=dilation,
  178. name=name + "/separable_conv1")
  179. self._conv2 = Seperate_Conv(
  180. output_channels[0],
  181. output_channels[1],
  182. stride=strides[1],
  183. filter=filter_size[1],
  184. dilation=dilation,
  185. name=name + "/separable_conv2")
  186. self._conv3 = Seperate_Conv(
  187. output_channels[1],
  188. output_channels[2],
  189. stride=strides[2],
  190. filter=filter_size[2],
  191. dilation=dilation,
  192. name=name + "/separable_conv3")
  193. else:
  194. self._conv1 = Seperate_Conv(
  195. input_channels,
  196. output_channels[0],
  197. stride=strides[0],
  198. filter=filter_size[0],
  199. act="relu",
  200. dilation=dilation,
  201. name=name + "/separable_conv1")
  202. self._conv2 = Seperate_Conv(
  203. output_channels[0],
  204. output_channels[1],
  205. stride=strides[1],
  206. filter=filter_size[1],
  207. act="relu",
  208. dilation=dilation,
  209. name=name + "/separable_conv2")
  210. self._conv3 = Seperate_Conv(
  211. output_channels[1],
  212. output_channels[2],
  213. stride=strides[2],
  214. filter=filter_size[2],
  215. act="relu",
  216. dilation=dilation,
  217. name=name + "/separable_conv3")
  218. if has_skip and skip_conv:
  219. self._short = ConvBNLayer(
  220. input_channels,
  221. output_channels[-1],
  222. 1,
  223. stride=strides[-1],
  224. padding=0,
  225. name=name + "/shortcut")
  226. def forward(self, inputs):
  227. if not self.activation_fn_in_separable_conv:
  228. x = F.relu(inputs)
  229. x = self._conv1(x)
  230. x = F.relu(x)
  231. x = self._conv2(x)
  232. x = F.relu(x)
  233. x = self._conv3(x)
  234. else:
  235. x = self._conv1(inputs)
  236. x = self._conv2(x)
  237. x = self._conv3(x)
  238. if self.has_skip:
  239. if self.skip_conv:
  240. skip = self._short(inputs)
  241. else:
  242. skip = inputs
  243. return paddle.add(x, skip)
  244. else:
  245. return x
  246. class XceptionDeeplab(nn.Layer):
  247. def __init__(self, backbone, class_num=1000):
  248. super(XceptionDeeplab, self).__init__()
  249. bottleneck_params = gen_bottleneck_params(backbone)
  250. self.backbone = backbone
  251. self._conv1 = ConvBNLayer(
  252. 3,
  253. 32,
  254. 3,
  255. stride=2,
  256. padding=1,
  257. act="relu",
  258. name=self.backbone + "/entry_flow/conv1")
  259. self._conv2 = ConvBNLayer(
  260. 32,
  261. 64,
  262. 3,
  263. stride=1,
  264. padding=1,
  265. act="relu",
  266. name=self.backbone + "/entry_flow/conv2")
  267. self.block_num = bottleneck_params["entry_flow"][0]
  268. self.strides = bottleneck_params["entry_flow"][1]
  269. self.chns = bottleneck_params["entry_flow"][2]
  270. self.strides = check_data(self.strides, self.block_num)
  271. self.chns = check_data(self.chns, self.block_num)
  272. self.entry_flow = []
  273. self.middle_flow = []
  274. self.stride = 2
  275. self.output_stride = 32
  276. s = self.stride
  277. for i in range(self.block_num):
  278. stride = self.strides[i] if check_stride(s * self.strides[i],
  279. self.output_stride) else 1
  280. xception_block = self.add_sublayer(
  281. self.backbone + "/entry_flow/block" + str(i + 1),
  282. Xception_Block(
  283. input_channels=64 if i == 0 else self.chns[i - 1],
  284. output_channels=self.chns[i],
  285. strides=[1, 1, self.stride],
  286. name=self.backbone + "/entry_flow/block" + str(i + 1)))
  287. self.entry_flow.append(xception_block)
  288. s = s * stride
  289. self.stride = s
  290. self.block_num = bottleneck_params["middle_flow"][0]
  291. self.strides = bottleneck_params["middle_flow"][1]
  292. self.chns = bottleneck_params["middle_flow"][2]
  293. self.strides = check_data(self.strides, self.block_num)
  294. self.chns = check_data(self.chns, self.block_num)
  295. s = self.stride
  296. for i in range(self.block_num):
  297. stride = self.strides[i] if check_stride(s * self.strides[i],
  298. self.output_stride) else 1
  299. xception_block = self.add_sublayer(
  300. self.backbone + "/middle_flow/block" + str(i + 1),
  301. Xception_Block(
  302. input_channels=728,
  303. output_channels=728,
  304. strides=[1, 1, self.strides[i]],
  305. skip_conv=False,
  306. name=self.backbone + "/middle_flow/block" + str(i + 1)))
  307. self.middle_flow.append(xception_block)
  308. s = s * stride
  309. self.stride = s
  310. self.block_num = bottleneck_params["exit_flow"][0]
  311. self.strides = bottleneck_params["exit_flow"][1]
  312. self.chns = bottleneck_params["exit_flow"][2]
  313. self.strides = check_data(self.strides, self.block_num)
  314. self.chns = check_data(self.chns, self.block_num)
  315. s = self.stride
  316. stride = self.strides[0] if check_stride(s * self.strides[0],
  317. self.output_stride) else 1
  318. self._exit_flow_1 = Xception_Block(
  319. 728,
  320. self.chns[0], [1, 1, stride],
  321. name=self.backbone + "/exit_flow/block1")
  322. s = s * stride
  323. stride = self.strides[1] if check_stride(s * self.strides[1],
  324. self.output_stride) else 1
  325. self._exit_flow_2 = Xception_Block(
  326. self.chns[0][-1],
  327. self.chns[1], [1, 1, stride],
  328. dilation=2,
  329. has_skip=False,
  330. activation_fn_in_separable_conv=True,
  331. name=self.backbone + "/exit_flow/block2")
  332. s = s * stride
  333. self.stride = s
  334. self._drop = Dropout(p=0.5, mode="downscale_in_infer")
  335. self._pool = AdaptiveAvgPool2D(1)
  336. self._fc = Linear(
  337. self.chns[1][-1],
  338. class_num,
  339. weight_attr=ParamAttr(name="fc_weights"),
  340. bias_attr=ParamAttr(name="fc_bias"))
  341. def forward(self, inputs):
  342. x = self._conv1(inputs)
  343. x = self._conv2(x)
  344. for ef in self.entry_flow:
  345. x = ef(x)
  346. for mf in self.middle_flow:
  347. x = mf(x)
  348. x = self._exit_flow_1(x)
  349. x = self._exit_flow_2(x)
  350. x = self._drop(x)
  351. x = self._pool(x)
  352. x = paddle.squeeze(x, axis=[2, 3])
  353. x = self._fc(x)
  354. return x
  355. def _load_pretrained(pretrained, model, model_url, use_ssld=False):
  356. if pretrained is False:
  357. pass
  358. elif pretrained is True:
  359. load_dygraph_pretrain_from_url(model, model_url, use_ssld=use_ssld)
  360. elif isinstance(pretrained, str):
  361. load_dygraph_pretrain(model, pretrained)
  362. else:
  363. raise RuntimeError(
  364. "pretrained type is not available. Please use `string` or `boolean` type."
  365. )
  366. def Xception41_deeplab(pretrained=False, use_ssld=False, **kwargs):
  367. model = XceptionDeeplab('xception_41', **kwargs)
  368. _load_pretrained(
  369. pretrained, model, MODEL_URLS["Xception41_deeplab"], use_ssld=use_ssld)
  370. return model
  371. def Xception65_deeplab(pretrained=False, use_ssld=False, **kwargs):
  372. model = XceptionDeeplab("xception_65", **kwargs)
  373. _load_pretrained(
  374. pretrained, model, MODEL_URLS["Xception65_deeplab"], use_ssld=use_ssld)
  375. return model