rpn_head.py 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305
  1. # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import paddle
  15. import paddle.nn as nn
  16. import paddle.nn.functional as F
  17. from paddle.nn.initializer import Normal
  18. from paddlex.ppdet.core.workspace import register
  19. from .anchor_generator import AnchorGenerator
  20. from .target_layer import RPNTargetAssign
  21. from .proposal_generator import ProposalGenerator
  22. from ..cls_utils import _get_class_default_kwargs
  23. class RPNFeat(nn.Layer):
  24. """
  25. Feature extraction in RPN head
  26. Args:
  27. in_channel (int): Input channel
  28. out_channel (int): Output channel
  29. """
  30. def __init__(self, in_channel=1024, out_channel=1024):
  31. super(RPNFeat, self).__init__()
  32. # rpn feat is shared with each level
  33. self.rpn_conv = nn.Conv2D(
  34. in_channels=in_channel,
  35. out_channels=out_channel,
  36. kernel_size=3,
  37. padding=1,
  38. weight_attr=paddle.ParamAttr(initializer=Normal(
  39. mean=0., std=0.01)))
  40. self.rpn_conv.skip_quant = True
  41. def forward(self, feats):
  42. rpn_feats = []
  43. for feat in feats:
  44. rpn_feats.append(F.relu(self.rpn_conv(feat)))
  45. return rpn_feats
  46. @register
  47. class RPNHead(nn.Layer):
  48. """
  49. Region Proposal Network
  50. Args:
  51. anchor_generator (dict): configure of anchor generation
  52. rpn_target_assign (dict): configure of rpn targets assignment
  53. train_proposal (dict): configure of proposals generation
  54. at the stage of training
  55. test_proposal (dict): configure of proposals generation
  56. at the stage of prediction
  57. in_channel (int): channel of input feature maps which can be
  58. derived by from_config
  59. """
  60. __shared__ = ['export_onnx']
  61. def __init__(self,
  62. anchor_generator=_get_class_default_kwargs(AnchorGenerator),
  63. rpn_target_assign=_get_class_default_kwargs(RPNTargetAssign),
  64. train_proposal=_get_class_default_kwargs(ProposalGenerator,
  65. 12000, 2000),
  66. test_proposal=_get_class_default_kwargs(ProposalGenerator),
  67. in_channel=1024,
  68. export_onnx=False):
  69. super(RPNHead, self).__init__()
  70. self.anchor_generator = anchor_generator
  71. self.rpn_target_assign = rpn_target_assign
  72. self.train_proposal = train_proposal
  73. self.test_proposal = test_proposal
  74. self.export_onnx = export_onnx
  75. if isinstance(anchor_generator, dict):
  76. self.anchor_generator = AnchorGenerator(**anchor_generator)
  77. if isinstance(rpn_target_assign, dict):
  78. self.rpn_target_assign = RPNTargetAssign(**rpn_target_assign)
  79. if isinstance(train_proposal, dict):
  80. self.train_proposal = ProposalGenerator(**train_proposal)
  81. if isinstance(test_proposal, dict):
  82. self.test_proposal = ProposalGenerator(**test_proposal)
  83. num_anchors = self.anchor_generator.num_anchors
  84. self.rpn_feat = RPNFeat(in_channel, in_channel)
  85. # rpn head is shared with each level
  86. # rpn roi classification scores
  87. self.rpn_rois_score = nn.Conv2D(
  88. in_channels=in_channel,
  89. out_channels=num_anchors,
  90. kernel_size=1,
  91. padding=0,
  92. weight_attr=paddle.ParamAttr(initializer=Normal(
  93. mean=0., std=0.01)))
  94. self.rpn_rois_score.skip_quant = True
  95. # rpn roi bbox regression deltas
  96. self.rpn_rois_delta = nn.Conv2D(
  97. in_channels=in_channel,
  98. out_channels=4 * num_anchors,
  99. kernel_size=1,
  100. padding=0,
  101. weight_attr=paddle.ParamAttr(initializer=Normal(
  102. mean=0., std=0.01)))
  103. self.rpn_rois_delta.skip_quant = True
  104. @classmethod
  105. def from_config(cls, cfg, input_shape):
  106. # FPN share same rpn head
  107. if isinstance(input_shape, (list, tuple)):
  108. input_shape = input_shape[0]
  109. return {'in_channel': input_shape.channels}
  110. def forward(self, feats, inputs):
  111. rpn_feats = self.rpn_feat(feats)
  112. scores = []
  113. deltas = []
  114. for rpn_feat in rpn_feats:
  115. rrs = self.rpn_rois_score(rpn_feat)
  116. rrd = self.rpn_rois_delta(rpn_feat)
  117. scores.append(rrs)
  118. deltas.append(rrd)
  119. anchors = self.anchor_generator(rpn_feats)
  120. rois, rois_num = self._gen_proposal(scores, deltas, anchors, inputs)
  121. if self.training:
  122. loss = self.get_loss(scores, deltas, anchors, inputs)
  123. return rois, rois_num, loss
  124. else:
  125. return rois, rois_num, None
  126. def _gen_proposal(self, scores, bbox_deltas, anchors, inputs):
  127. """
  128. scores (list[Tensor]): Multi-level scores prediction
  129. bbox_deltas (list[Tensor]): Multi-level deltas prediction
  130. anchors (list[Tensor]): Multi-level anchors
  131. inputs (dict): ground truth info
  132. """
  133. prop_gen = self.train_proposal if self.training else self.test_proposal
  134. im_shape = inputs['im_shape']
  135. # Collect multi-level proposals for each batch
  136. # Get 'topk' of them as final output
  137. if self.export_onnx:
  138. # bs = 1 when exporting onnx
  139. onnx_rpn_rois_list = []
  140. onnx_rpn_prob_list = []
  141. onnx_rpn_rois_num_list = []
  142. for rpn_score, rpn_delta, anchor in zip(scores, bbox_deltas,
  143. anchors):
  144. onnx_rpn_rois, onnx_rpn_rois_prob, onnx_rpn_rois_num, onnx_post_nms_top_n = prop_gen(
  145. scores=rpn_score[0:1],
  146. bbox_deltas=rpn_delta[0:1],
  147. anchors=anchor,
  148. im_shape=im_shape[0:1])
  149. onnx_rpn_rois_list.append(onnx_rpn_rois)
  150. onnx_rpn_prob_list.append(onnx_rpn_rois_prob)
  151. onnx_rpn_rois_num_list.append(onnx_rpn_rois_num)
  152. onnx_rpn_rois = paddle.concat(onnx_rpn_rois_list)
  153. onnx_rpn_prob = paddle.concat(onnx_rpn_prob_list).flatten()
  154. onnx_top_n = paddle.to_tensor(onnx_post_nms_top_n).cast('int32')
  155. onnx_num_rois = paddle.shape(onnx_rpn_prob)[0].cast('int32')
  156. k = paddle.minimum(onnx_top_n, onnx_num_rois)
  157. onnx_topk_prob, onnx_topk_inds = paddle.topk(onnx_rpn_prob, k)
  158. onnx_topk_rois = paddle.gather(onnx_rpn_rois, onnx_topk_inds)
  159. # TODO(wangguanzhong): Now bs_rois_collect in export_onnx is moved outside conditional branch
  160. # due to problems in dy2static of paddle. Will fix it when updating paddle framework.
  161. # bs_rois_collect = [onnx_topk_rois]
  162. # bs_rois_num_collect = paddle.shape(onnx_topk_rois)[0]
  163. else:
  164. bs_rois_collect = []
  165. bs_rois_num_collect = []
  166. batch_size = paddle.slice(paddle.shape(im_shape), [0], [0], [1])
  167. # Generate proposals for each level and each batch.
  168. # Discard batch-computing to avoid sorting bbox cross different batches.
  169. for i in range(batch_size):
  170. rpn_rois_list = []
  171. rpn_prob_list = []
  172. rpn_rois_num_list = []
  173. for rpn_score, rpn_delta, anchor in zip(scores, bbox_deltas,
  174. anchors):
  175. rpn_rois, rpn_rois_prob, rpn_rois_num, post_nms_top_n = prop_gen(
  176. scores=rpn_score[i:i + 1],
  177. bbox_deltas=rpn_delta[i:i + 1],
  178. anchors=anchor,
  179. im_shape=im_shape[i:i + 1])
  180. rpn_rois_list.append(rpn_rois)
  181. rpn_prob_list.append(rpn_rois_prob)
  182. rpn_rois_num_list.append(rpn_rois_num)
  183. if len(scores) > 1:
  184. rpn_rois = paddle.concat(rpn_rois_list)
  185. rpn_prob = paddle.concat(rpn_prob_list).flatten()
  186. num_rois = paddle.shape(rpn_prob)[0].cast('int32')
  187. if num_rois > post_nms_top_n:
  188. topk_prob, topk_inds = paddle.topk(rpn_prob,
  189. post_nms_top_n)
  190. topk_rois = paddle.gather(rpn_rois, topk_inds)
  191. else:
  192. topk_rois = rpn_rois
  193. topk_prob = rpn_prob
  194. else:
  195. topk_rois = rpn_rois_list[0]
  196. topk_prob = rpn_prob_list[0].flatten()
  197. bs_rois_collect.append(topk_rois)
  198. bs_rois_num_collect.append(paddle.shape(topk_rois)[0])
  199. bs_rois_num_collect = paddle.concat(bs_rois_num_collect)
  200. if self.export_onnx:
  201. output_rois = [onnx_topk_rois]
  202. output_rois_num = paddle.shape(onnx_topk_rois)[0]
  203. else:
  204. output_rois = bs_rois_collect
  205. output_rois_num = bs_rois_num_collect
  206. return output_rois, output_rois_num
  207. def get_loss(self, pred_scores, pred_deltas, anchors, inputs):
  208. """
  209. pred_scores (list[Tensor]): Multi-level scores prediction
  210. pred_deltas (list[Tensor]): Multi-level deltas prediction
  211. anchors (list[Tensor]): Multi-level anchors
  212. inputs (dict): ground truth info, including im, gt_bbox, gt_score
  213. """
  214. anchors = [paddle.reshape(a, shape=(-1, 4)) for a in anchors]
  215. anchors = paddle.concat(anchors)
  216. scores = [
  217. paddle.reshape(
  218. paddle.transpose(
  219. v, perm=[0, 2, 3, 1]),
  220. shape=(v.shape[0], -1, 1)) for v in pred_scores
  221. ]
  222. scores = paddle.concat(scores, axis=1)
  223. deltas = [
  224. paddle.reshape(
  225. paddle.transpose(
  226. v, perm=[0, 2, 3, 1]),
  227. shape=(v.shape[0], -1, 4)) for v in pred_deltas
  228. ]
  229. deltas = paddle.concat(deltas, axis=1)
  230. score_tgt, bbox_tgt, loc_tgt, norm = self.rpn_target_assign(inputs,
  231. anchors)
  232. scores = paddle.reshape(x=scores, shape=(-1, ))
  233. deltas = paddle.reshape(x=deltas, shape=(-1, 4))
  234. score_tgt = paddle.concat(score_tgt)
  235. score_tgt.stop_gradient = True
  236. pos_mask = score_tgt == 1
  237. pos_ind = paddle.nonzero(pos_mask)
  238. valid_mask = score_tgt >= 0
  239. valid_ind = paddle.nonzero(valid_mask)
  240. # cls loss
  241. if valid_ind.shape[0] == 0:
  242. loss_rpn_cls = paddle.zeros([1], dtype='float32')
  243. else:
  244. score_pred = paddle.gather(scores, valid_ind)
  245. score_label = paddle.gather(score_tgt, valid_ind).cast('float32')
  246. score_label.stop_gradient = True
  247. loss_rpn_cls = F.binary_cross_entropy_with_logits(
  248. logit=score_pred, label=score_label, reduction="sum")
  249. # reg loss
  250. if pos_ind.shape[0] == 0:
  251. loss_rpn_reg = paddle.zeros([1], dtype='float32')
  252. else:
  253. loc_pred = paddle.gather(deltas, pos_ind)
  254. loc_tgt = paddle.concat(loc_tgt)
  255. loc_tgt = paddle.gather(loc_tgt, pos_ind)
  256. loc_tgt.stop_gradient = True
  257. loss_rpn_reg = paddle.abs(loc_pred - loc_tgt).sum()
  258. return {
  259. 'loss_rpn_cls': loss_rpn_cls / norm,
  260. 'loss_rpn_reg': loss_rpn_reg / norm
  261. }