xception_deeplab.py 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415
  1. # copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import paddle.nn as nn
  15. import paddle.nn.functional as F
  16. from paddlex.paddleseg.cvlibs import manager
  17. from paddlex.paddleseg.utils import utils
  18. from paddlex.paddleseg.models import layers
  19. __all__ = ["Xception41_deeplab", "Xception65_deeplab", "Xception71_deeplab"]
  20. def check_data(data, number):
  21. if type(data) == int:
  22. return [data] * number
  23. assert len(data) == number
  24. return data
  25. def check_stride(s, os):
  26. if s <= os:
  27. return True
  28. else:
  29. return False
  30. def check_points(count, points):
  31. if points is None:
  32. return False
  33. else:
  34. if isinstance(points, list):
  35. return (True if count in points else False)
  36. else:
  37. return (True if count == points else False)
  38. def gen_bottleneck_params(backbone='xception_65'):
  39. if backbone == 'xception_65':
  40. bottleneck_params = {
  41. "entry_flow": (3, [2, 2, 2], [128, 256, 728]),
  42. "middle_flow": (16, 1, 728),
  43. "exit_flow": (2, [2, 1], [[728, 1024, 1024], [1536, 1536, 2048]])
  44. }
  45. elif backbone == 'xception_41':
  46. bottleneck_params = {
  47. "entry_flow": (3, [2, 2, 2], [128, 256, 728]),
  48. "middle_flow": (8, 1, 728),
  49. "exit_flow": (2, [2, 1], [[728, 1024, 1024], [1536, 1536, 2048]])
  50. }
  51. elif backbone == 'xception_71':
  52. bottleneck_params = {
  53. "entry_flow": (5, [2, 1, 2, 1, 2], [128, 256, 256, 728, 728]),
  54. "middle_flow": (16, 1, 728),
  55. "exit_flow": (2, [2, 1], [[728, 1024, 1024], [1536, 1536, 2048]])
  56. }
  57. else:
  58. raise ValueError(
  59. "Xception backbont only support xception_41/xception_65/xception_71"
  60. )
  61. return bottleneck_params
  62. class ConvBNLayer(nn.Layer):
  63. def __init__(self,
  64. input_channels,
  65. output_channels,
  66. filter_size,
  67. stride=1,
  68. padding=0,
  69. act=None,
  70. name=None):
  71. super(ConvBNLayer, self).__init__()
  72. self._conv = nn.Conv2D(
  73. in_channels=input_channels,
  74. out_channels=output_channels,
  75. kernel_size=filter_size,
  76. stride=stride,
  77. padding=padding,
  78. bias_attr=False)
  79. self._bn = layers.SyncBatchNorm(
  80. num_features=output_channels, epsilon=1e-3, momentum=0.99)
  81. self._act_op = layers.Activation(act=act)
  82. def forward(self, inputs):
  83. return self._act_op(self._bn(self._conv(inputs)))
  84. class Seperate_Conv(nn.Layer):
  85. def __init__(self,
  86. input_channels,
  87. output_channels,
  88. stride,
  89. filter,
  90. dilation=1,
  91. act=None,
  92. name=None):
  93. super(Seperate_Conv, self).__init__()
  94. self._conv1 = nn.Conv2D(
  95. in_channels=input_channels,
  96. out_channels=input_channels,
  97. kernel_size=filter,
  98. stride=stride,
  99. groups=input_channels,
  100. padding=(filter) // 2 * dilation,
  101. dilation=dilation,
  102. bias_attr=False)
  103. self._bn1 = layers.SyncBatchNorm(
  104. input_channels, epsilon=1e-3, momentum=0.99)
  105. self._act_op1 = layers.Activation(act=act)
  106. self._conv2 = nn.Conv2D(
  107. input_channels,
  108. output_channels,
  109. 1,
  110. stride=1,
  111. groups=1,
  112. padding=0,
  113. bias_attr=False)
  114. self._bn2 = layers.SyncBatchNorm(
  115. output_channels, epsilon=1e-3, momentum=0.99)
  116. self._act_op2 = layers.Activation(act=act)
  117. def forward(self, inputs):
  118. x = self._conv1(inputs)
  119. x = self._bn1(x)
  120. x = self._act_op1(x)
  121. x = self._conv2(x)
  122. x = self._bn2(x)
  123. x = self._act_op2(x)
  124. return x
  125. class Xception_Block(nn.Layer):
  126. def __init__(self,
  127. input_channels,
  128. output_channels,
  129. strides=1,
  130. filter_size=3,
  131. dilation=1,
  132. skip_conv=True,
  133. has_skip=True,
  134. activation_fn_in_separable_conv=False,
  135. name=None):
  136. super(Xception_Block, self).__init__()
  137. repeat_number = 3
  138. output_channels = check_data(output_channels, repeat_number)
  139. filter_size = check_data(filter_size, repeat_number)
  140. strides = check_data(strides, repeat_number)
  141. self.has_skip = has_skip
  142. self.skip_conv = skip_conv
  143. self.activation_fn_in_separable_conv = activation_fn_in_separable_conv
  144. if not activation_fn_in_separable_conv:
  145. self._conv1 = Seperate_Conv(
  146. input_channels,
  147. output_channels[0],
  148. stride=strides[0],
  149. filter=filter_size[0],
  150. dilation=dilation,
  151. name=name + "/separable_conv1")
  152. self._conv2 = Seperate_Conv(
  153. output_channels[0],
  154. output_channels[1],
  155. stride=strides[1],
  156. filter=filter_size[1],
  157. dilation=dilation,
  158. name=name + "/separable_conv2")
  159. self._conv3 = Seperate_Conv(
  160. output_channels[1],
  161. output_channels[2],
  162. stride=strides[2],
  163. filter=filter_size[2],
  164. dilation=dilation,
  165. name=name + "/separable_conv3")
  166. else:
  167. self._conv1 = Seperate_Conv(
  168. input_channels,
  169. output_channels[0],
  170. stride=strides[0],
  171. filter=filter_size[0],
  172. act="relu",
  173. dilation=dilation,
  174. name=name + "/separable_conv1")
  175. self._conv2 = Seperate_Conv(
  176. output_channels[0],
  177. output_channels[1],
  178. stride=strides[1],
  179. filter=filter_size[1],
  180. act="relu",
  181. dilation=dilation,
  182. name=name + "/separable_conv2")
  183. self._conv3 = Seperate_Conv(
  184. output_channels[1],
  185. output_channels[2],
  186. stride=strides[2],
  187. filter=filter_size[2],
  188. act="relu",
  189. dilation=dilation,
  190. name=name + "/separable_conv3")
  191. if has_skip and skip_conv:
  192. self._short = ConvBNLayer(
  193. input_channels,
  194. output_channels[-1],
  195. 1,
  196. stride=strides[-1],
  197. padding=0,
  198. name=name + "/shortcut")
  199. def forward(self, inputs):
  200. if not self.activation_fn_in_separable_conv:
  201. x = F.relu(inputs)
  202. x = self._conv1(x)
  203. x = F.relu(x)
  204. x = self._conv2(x)
  205. x = F.relu(x)
  206. x = self._conv3(x)
  207. else:
  208. x = self._conv1(inputs)
  209. x = self._conv2(x)
  210. x = self._conv3(x)
  211. if self.has_skip is False:
  212. return x
  213. if self.skip_conv:
  214. skip = self._short(inputs)
  215. else:
  216. skip = inputs
  217. return x + skip
  218. class XceptionDeeplab(nn.Layer):
  219. """
  220. The Xception backobne of DeepLabv3+ implementation based on PaddlePaddle.
  221. The original article refers to
  222. Liang-Chieh Chen, et, al. "Encoder-Decoder with Atrous Separable Convolution for Semantic Image Segmentation"
  223. (https://arxiv.org/abs/1802.02611)
  224. Args:
  225. backbone (str): Which type of Xception_DeepLab to select. It should be one of ('xception_41', 'xception_65', 'xception_71').
  226. pretrained (str, optional): The path of pretrained model.
  227. output_stride (int, optional): The stride of output features compared to input images. It is 8 or 16. Default: 16.
  228. """
  229. def __init__(self, backbone, pretrained=None, output_stride=16):
  230. super(XceptionDeeplab, self).__init__()
  231. bottleneck_params = gen_bottleneck_params(backbone)
  232. self.backbone = backbone
  233. self.feat_channels = [128, 2048]
  234. self._conv1 = ConvBNLayer(
  235. 3,
  236. 32,
  237. 3,
  238. stride=2,
  239. padding=1,
  240. act="relu",
  241. name=self.backbone + "/entry_flow/conv1")
  242. self._conv2 = ConvBNLayer(
  243. 32,
  244. 64,
  245. 3,
  246. stride=1,
  247. padding=1,
  248. act="relu",
  249. name=self.backbone + "/entry_flow/conv2")
  250. """
  251. bottleneck_params = {
  252. "entry_flow": (3, [2, 2, 2], [128, 256, 728]),
  253. "middle_flow": (16, 1, 728),
  254. "exit_flow": (2, [2, 1], [[728, 1024, 1024], [1536, 1536, 2048]])
  255. }
  256. if output_stride == 16:
  257. entry_block3_stride = 2
  258. middle_block_dilation = 1
  259. exit_block_dilations = (1, 2)
  260. elif output_stride == 8:
  261. entry_block3_stride = 1
  262. middle_block_dilation = 2
  263. exit_block_dilations = (2, 4)
  264. """
  265. self.block_num = bottleneck_params["entry_flow"][0]
  266. self.strides = bottleneck_params["entry_flow"][1]
  267. self.chns = bottleneck_params["entry_flow"][2]
  268. self.strides = check_data(self.strides, self.block_num)
  269. self.chns = check_data(self.chns, self.block_num)
  270. self.entry_flow = []
  271. self.middle_flow = []
  272. self.stride = 2
  273. self.output_stride = output_stride
  274. s = self.stride
  275. for i in range(self.block_num):
  276. stride = self.strides[i] if check_stride(s * self.strides[i],
  277. self.output_stride) else 1
  278. xception_block = self.add_sublayer(
  279. self.backbone + "/entry_flow/block" + str(i + 1),
  280. Xception_Block(
  281. input_channels=64 if i == 0 else self.chns[i - 1],
  282. output_channels=self.chns[i],
  283. strides=[1, 1, self.stride],
  284. name=self.backbone + "/entry_flow/block" + str(i + 1)))
  285. self.entry_flow.append(xception_block)
  286. s = s * stride
  287. self.stride = s
  288. self.block_num = bottleneck_params["middle_flow"][0]
  289. self.strides = bottleneck_params["middle_flow"][1]
  290. self.chns = bottleneck_params["middle_flow"][2]
  291. self.strides = check_data(self.strides, self.block_num)
  292. self.chns = check_data(self.chns, self.block_num)
  293. s = self.stride
  294. for i in range(self.block_num):
  295. stride = self.strides[i] if check_stride(s * self.strides[i],
  296. self.output_stride) else 1
  297. xception_block = self.add_sublayer(
  298. self.backbone + "/middle_flow/block" + str(i + 1),
  299. Xception_Block(
  300. input_channels=728,
  301. output_channels=728,
  302. strides=[1, 1, self.strides[i]],
  303. skip_conv=False,
  304. name=self.backbone + "/middle_flow/block" + str(i + 1)))
  305. self.middle_flow.append(xception_block)
  306. s = s * stride
  307. self.stride = s
  308. self.block_num = bottleneck_params["exit_flow"][0]
  309. self.strides = bottleneck_params["exit_flow"][1]
  310. self.chns = bottleneck_params["exit_flow"][2]
  311. self.strides = check_data(self.strides, self.block_num)
  312. self.chns = check_data(self.chns, self.block_num)
  313. s = self.stride
  314. stride = self.strides[0] if check_stride(s * self.strides[0],
  315. self.output_stride) else 1
  316. self._exit_flow_1 = Xception_Block(
  317. 728,
  318. self.chns[0], [1, 1, stride],
  319. name=self.backbone + "/exit_flow/block1")
  320. s = s * stride
  321. stride = self.strides[1] if check_stride(s * self.strides[1],
  322. self.output_stride) else 1
  323. self._exit_flow_2 = Xception_Block(
  324. self.chns[0][-1],
  325. self.chns[1], [1, 1, stride],
  326. dilation=2,
  327. has_skip=False,
  328. activation_fn_in_separable_conv=True,
  329. name=self.backbone + "/exit_flow/block2")
  330. self.pretrained = pretrained
  331. self.init_weight()
  332. def forward(self, inputs):
  333. x = self._conv1(inputs)
  334. x = self._conv2(x)
  335. feat_list = []
  336. for i, ef in enumerate(self.entry_flow):
  337. x = ef(x)
  338. if i == 0:
  339. feat_list.append(x)
  340. for mf in self.middle_flow:
  341. x = mf(x)
  342. x = self._exit_flow_1(x)
  343. x = self._exit_flow_2(x)
  344. feat_list.append(x)
  345. return feat_list
  346. def init_weight(self):
  347. if self.pretrained is not None:
  348. utils.load_pretrained_model(self, self.pretrained)
  349. @manager.BACKBONES.add_component
  350. def Xception41_deeplab(**args):
  351. model = XceptionDeeplab('xception_41', **args)
  352. return model
  353. @manager.BACKBONES.add_component
  354. def Xception65_deeplab(**args):
  355. model = XceptionDeeplab("xception_65", **args)
  356. return model
  357. @manager.BACKBONES.add_component
  358. def Xception71_deeplab(**args):
  359. model = XceptionDeeplab("xception_71", **args)
  360. return model