deeplabv3p.py 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383
  1. # coding: utf8
  2. # copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. from __future__ import absolute_import
  16. from __future__ import division
  17. from __future__ import print_function
  18. from collections import OrderedDict
  19. import paddle.fluid as fluid
  20. from .model_utils.libs import scope, name_scope
  21. from .model_utils.libs import bn, bn_relu, relu
  22. from .model_utils.libs import conv, max_pool, deconv
  23. from .model_utils.libs import separate_conv
  24. from .model_utils.libs import sigmoid_to_softmax
  25. from .model_utils.loss import softmax_with_loss
  26. from .model_utils.loss import dice_loss
  27. from .model_utils.loss import bce_loss
  28. import paddlex.utils.logging as logging
  29. from paddlex.cv.nets.xception import Xception
  30. from paddlex.cv.nets.mobilenet_v2 import MobileNetV2
  31. class DeepLabv3p(object):
  32. """实现DeepLabv3+模型
  33. `"Encoder-Decoder with Atrous Separable Convolution for Semantic Image Segmentation"
  34. <https://arxiv.org/abs/1802.02611>`
  35. Args:
  36. num_classes (int): 类别数。
  37. backbone (paddlex.cv.nets): 神经网络,实现DeepLabv3+特征图的计算。
  38. mode (str): 网络运行模式,根据mode构建网络的输入和返回。
  39. 当mode为'train'时,输入为image(-1, 3, -1, -1)和label (-1, 1, -1, -1) 返回loss。
  40. 当mode为'train'时,输入为image (-1, 3, -1, -1)和label (-1, 1, -1, -1),返回loss,
  41. pred (与网络输入label 相同大小的预测结果,值代表相应的类别),label,mask(非忽略值的mask,
  42. 与label相同大小,bool类型)。
  43. 当mode为'test'时,输入为image(-1, 3, -1, -1)返回pred (-1, 1, -1, -1)和
  44. logit (-1, num_classes, -1, -1) 通道维上代表每一类的概率值。
  45. output_stride (int): backbone 输出特征图相对于输入的下采样倍数,一般取值为8或16。
  46. aspp_with_sep_conv (bool): 在asspp模块是否采用separable convolutions。
  47. decoder_use_sep_conv (bool): decoder模块是否采用separable convolutions。
  48. encoder_with_aspp (bool): 是否在encoder阶段采用aspp模块。
  49. enable_decoder (bool): 是否使用decoder模块。
  50. use_bce_loss (bool): 是否使用bce loss作为网络的损失函数,只能用于两类分割。可与dice loss同时使用。
  51. use_dice_loss (bool): 是否使用dice loss作为网络的损失函数,只能用于两类分割,可与bce loss同时使用。
  52. 当use_bce_loss和use_dice_loss都为False时,使用交叉熵损失函数。
  53. class_weight (list/str): 交叉熵损失函数各类损失的权重。当class_weight为list的时候,长度应为
  54. num_classes。当class_weight为str时, weight.lower()应为'dynamic',这时会根据每一轮各类像素的比重
  55. 自行计算相应的权重,每一类的权重为:每类的比例 * num_classes。class_weight取默认值None是,各类的权重1,
  56. 即平时使用的交叉熵损失函数。
  57. ignore_index (int): label上忽略的值,label为ignore_index的像素不参与损失函数的计算。
  58. Raises:
  59. ValueError: use_bce_loss或use_dice_loss为真且num_calsses > 2。
  60. ValueError: class_weight为list, 但长度不等于num_class。
  61. class_weight为str, 但class_weight.low()不等于dynamic。
  62. TypeError: class_weight不为None时,其类型不是list或str。
  63. """
  64. def __init__(self,
  65. num_classes,
  66. backbone,
  67. mode='train',
  68. output_stride=16,
  69. aspp_with_sep_conv=True,
  70. decoder_use_sep_conv=True,
  71. encoder_with_aspp=True,
  72. enable_decoder=True,
  73. use_bce_loss=False,
  74. use_dice_loss=False,
  75. class_weight=None,
  76. ignore_index=255):
  77. # dice_loss或bce_loss只适用两类分割中
  78. if num_classes > 2 and (use_bce_loss or use_dice_loss):
  79. raise ValueError(
  80. "dice loss and bce loss is only applicable to binary classfication"
  81. )
  82. if class_weight is not None:
  83. if isinstance(class_weight, list):
  84. if len(class_weight) != num_classes:
  85. raise ValueError(
  86. "Length of class_weight should be equal to number of classes"
  87. )
  88. elif isinstance(class_weight, str):
  89. if class_weight.lower() != 'dynamic':
  90. raise ValueError(
  91. "if class_weight is string, must be dynamic!")
  92. else:
  93. raise TypeError(
  94. 'Expect class_weight is a list or string but receive {}'.
  95. format(type(class_weight)))
  96. self.num_classes = num_classes
  97. self.backbone = backbone
  98. self.mode = mode
  99. self.use_bce_loss = use_bce_loss
  100. self.use_dice_loss = use_dice_loss
  101. self.class_weight = class_weight
  102. self.ignore_index = ignore_index
  103. self.output_stride = output_stride
  104. self.aspp_with_sep_conv = aspp_with_sep_conv
  105. self.decoder_use_sep_conv = decoder_use_sep_conv
  106. self.encoder_with_aspp = encoder_with_aspp
  107. self.enable_decoder = enable_decoder
  108. def _encoder(self, input):
  109. # 编码器配置,采用ASPP架构,pooling + 1x1_conv + 三个不同尺度的空洞卷积并行, concat后1x1conv
  110. # ASPP_WITH_SEP_CONV:默认为真,使用depthwise可分离卷积,否则使用普通卷积
  111. # OUTPUT_STRIDE: 下采样倍数,8或16,决定aspp_ratios大小
  112. # aspp_ratios:ASPP模块空洞卷积的采样率
  113. if self.output_stride == 16:
  114. aspp_ratios = [6, 12, 18]
  115. elif self.output_stride == 8:
  116. aspp_ratios = [12, 24, 36]
  117. else:
  118. raise Exception("DeepLabv3p only support stride 8 or 16")
  119. param_attr = fluid.ParamAttr(
  120. name=name_scope + 'weights',
  121. regularizer=None,
  122. initializer=fluid.initializer.TruncatedNormal(loc=0.0, scale=0.06))
  123. with scope('encoder'):
  124. channel = 256
  125. with scope("image_pool"):
  126. image_avg = fluid.layers.reduce_mean(
  127. input, [2, 3], keep_dim=True)
  128. image_avg = bn_relu(
  129. conv(
  130. image_avg,
  131. channel,
  132. 1,
  133. 1,
  134. groups=1,
  135. padding=0,
  136. param_attr=param_attr))
  137. input_shape = fluid.layers.shape(input)
  138. image_avg = fluid.layers.resize_bilinear(
  139. image_avg, input_shape[2:])
  140. with scope("aspp0"):
  141. aspp0 = bn_relu(
  142. conv(
  143. input,
  144. channel,
  145. 1,
  146. 1,
  147. groups=1,
  148. padding=0,
  149. param_attr=param_attr))
  150. with scope("aspp1"):
  151. if self.aspp_with_sep_conv:
  152. aspp1 = separate_conv(
  153. input,
  154. channel,
  155. 1,
  156. 3,
  157. dilation=aspp_ratios[0],
  158. act=relu)
  159. else:
  160. aspp1 = bn_relu(
  161. conv(
  162. input,
  163. channel,
  164. stride=1,
  165. filter_size=3,
  166. dilation=aspp_ratios[0],
  167. padding=aspp_ratios[0],
  168. param_attr=param_attr))
  169. with scope("aspp2"):
  170. if self.aspp_with_sep_conv:
  171. aspp2 = separate_conv(
  172. input,
  173. channel,
  174. 1,
  175. 3,
  176. dilation=aspp_ratios[1],
  177. act=relu)
  178. else:
  179. aspp2 = bn_relu(
  180. conv(
  181. input,
  182. channel,
  183. stride=1,
  184. filter_size=3,
  185. dilation=aspp_ratios[1],
  186. padding=aspp_ratios[1],
  187. param_attr=param_attr))
  188. with scope("aspp3"):
  189. if self.aspp_with_sep_conv:
  190. aspp3 = separate_conv(
  191. input,
  192. channel,
  193. 1,
  194. 3,
  195. dilation=aspp_ratios[2],
  196. act=relu)
  197. else:
  198. aspp3 = bn_relu(
  199. conv(
  200. input,
  201. channel,
  202. stride=1,
  203. filter_size=3,
  204. dilation=aspp_ratios[2],
  205. padding=aspp_ratios[2],
  206. param_attr=param_attr))
  207. with scope("concat"):
  208. data = fluid.layers.concat(
  209. [image_avg, aspp0, aspp1, aspp2, aspp3], axis=1)
  210. data = bn_relu(
  211. conv(
  212. data,
  213. channel,
  214. 1,
  215. 1,
  216. groups=1,
  217. padding=0,
  218. param_attr=param_attr))
  219. data = fluid.layers.dropout(data, 0.9)
  220. return data
  221. def _decoder(self, encode_data, decode_shortcut):
  222. # 解码器配置
  223. # encode_data:编码器输出
  224. # decode_shortcut: 从backbone引出的分支, resize后与encode_data concat
  225. # decoder_use_sep_conv: 默认为真,则concat后连接两个可分离卷积,否则为普通卷积
  226. param_attr = fluid.ParamAttr(
  227. name=name_scope + 'weights',
  228. regularizer=None,
  229. initializer=fluid.initializer.TruncatedNormal(loc=0.0, scale=0.06))
  230. with scope('decoder'):
  231. with scope('concat'):
  232. decode_shortcut = bn_relu(
  233. conv(
  234. decode_shortcut,
  235. 48,
  236. 1,
  237. 1,
  238. groups=1,
  239. padding=0,
  240. param_attr=param_attr))
  241. decode_shortcut_shape = fluid.layers.shape(decode_shortcut)
  242. encode_data = fluid.layers.resize_bilinear(
  243. encode_data, decode_shortcut_shape[2:])
  244. encode_data = fluid.layers.concat(
  245. [encode_data, decode_shortcut], axis=1)
  246. if self.decoder_use_sep_conv:
  247. with scope("separable_conv1"):
  248. encode_data = separate_conv(
  249. encode_data, 256, 1, 3, dilation=1, act=relu)
  250. with scope("separable_conv2"):
  251. encode_data = separate_conv(
  252. encode_data, 256, 1, 3, dilation=1, act=relu)
  253. else:
  254. with scope("decoder_conv1"):
  255. encode_data = bn_relu(
  256. conv(
  257. encode_data,
  258. 256,
  259. stride=1,
  260. filter_size=3,
  261. dilation=1,
  262. padding=1,
  263. param_attr=param_attr))
  264. with scope("decoder_conv2"):
  265. encode_data = bn_relu(
  266. conv(
  267. encode_data,
  268. 256,
  269. stride=1,
  270. filter_size=3,
  271. dilation=1,
  272. padding=1,
  273. param_attr=param_attr))
  274. return encode_data
  275. def _get_loss(self, logit, label, mask):
  276. avg_loss = 0
  277. if not (self.use_dice_loss or self.use_bce_loss):
  278. avg_loss += softmax_with_loss(
  279. logit,
  280. label,
  281. mask,
  282. num_classes=self.num_classes,
  283. weight=self.class_weight,
  284. ignore_index=self.ignore_index)
  285. else:
  286. if self.use_dice_loss:
  287. avg_loss += dice_loss(logit, label, mask)
  288. if self.use_bce_loss:
  289. avg_loss += bce_loss(
  290. logit, label, mask, ignore_index=self.ignore_index)
  291. return avg_loss
  292. def generate_inputs(self):
  293. inputs = OrderedDict()
  294. inputs['image'] = fluid.data(
  295. dtype='float32', shape=[None, 3, None, None], name='image')
  296. if self.mode == 'train':
  297. inputs['label'] = fluid.data(
  298. dtype='int32', shape=[None, 1, None, None], name='label')
  299. elif self.mode == 'eval':
  300. inputs['label'] = fluid.data(
  301. dtype='int32', shape=[None, 1, None, None], name='label')
  302. return inputs
  303. def build_net(self, inputs):
  304. # 在两类分割情况下,当loss函数选择dice_loss或bce_loss的时候,最后logit输出通道数设置为1
  305. if self.use_dice_loss or self.use_bce_loss:
  306. self.num_classes = 1
  307. image = inputs['image']
  308. data, decode_shortcuts = self.backbone(image)
  309. decode_shortcut = decode_shortcuts[self.backbone.decode_points]
  310. # 编码器解码器设置
  311. if self.encoder_with_aspp:
  312. data = self._encoder(data)
  313. if self.enable_decoder:
  314. data = self._decoder(data, decode_shortcut)
  315. # 根据类别数设置最后一个卷积层输出,并resize到图片原始尺寸
  316. param_attr = fluid.ParamAttr(
  317. name=name_scope + 'weights',
  318. regularizer=fluid.regularizer.L2DecayRegularizer(
  319. regularization_coeff=0.0),
  320. initializer=fluid.initializer.TruncatedNormal(loc=0.0, scale=0.01))
  321. with scope('logit'):
  322. with fluid.name_scope('last_conv'):
  323. logit = conv(
  324. data,
  325. self.num_classes,
  326. 1,
  327. stride=1,
  328. padding=0,
  329. bias_attr=True,
  330. param_attr=param_attr)
  331. image_shape = fluid.layers.shape(image)
  332. logit = fluid.layers.resize_bilinear(logit, image_shape[2:])
  333. if self.num_classes == 1:
  334. out = sigmoid_to_softmax(logit)
  335. out = fluid.layers.transpose(out, [0, 2, 3, 1])
  336. else:
  337. out = fluid.layers.transpose(logit, [0, 2, 3, 1])
  338. pred = fluid.layers.argmax(out, axis=3)
  339. pred = fluid.layers.unsqueeze(pred, axes=[3])
  340. if self.mode == 'train':
  341. label = inputs['label']
  342. mask = label != self.ignore_index
  343. return self._get_loss(logit, label, mask)
  344. elif self.mode == 'eval':
  345. label = inputs['label']
  346. mask = label != self.ignore_index
  347. loss = self._get_loss(logit, label, mask)
  348. return loss, pred, label, mask
  349. else:
  350. if self.num_classes == 1:
  351. logit = sigmoid_to_softmax(logit)
  352. else:
  353. logit = fluid.layers.softmax(logit, axis=1)
  354. return pred, logit
  355. return logit