sparsercnn_head.py 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370
  1. # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. from __future__ import absolute_import
  15. from __future__ import division
  16. from __future__ import print_function
  17. import math
  18. import copy
  19. import paddle
  20. import paddle.nn as nn
  21. from paddlex.ppdet.core.workspace import register
  22. from paddlex.ppdet.modeling.heads.roi_extractor import RoIAlign
  23. from paddlex.ppdet.modeling.bbox_utils import delta2bbox
  24. from .. import initializer as init
  25. _DEFAULT_SCALE_CLAMP = math.log(100000. / 16)
  26. class DynamicConv(nn.Layer):
  27. def __init__(
  28. self,
  29. head_hidden_dim,
  30. head_dim_dynamic,
  31. head_num_dynamic, ):
  32. super().__init__()
  33. self.hidden_dim = head_hidden_dim
  34. self.dim_dynamic = head_dim_dynamic
  35. self.num_dynamic = head_num_dynamic
  36. self.num_params = self.hidden_dim * self.dim_dynamic
  37. self.dynamic_layer = nn.Linear(self.hidden_dim,
  38. self.num_dynamic * self.num_params)
  39. self.norm1 = nn.LayerNorm(self.dim_dynamic)
  40. self.norm2 = nn.LayerNorm(self.hidden_dim)
  41. self.activation = nn.ReLU()
  42. pooler_resolution = 7
  43. num_output = self.hidden_dim * pooler_resolution**2
  44. self.out_layer = nn.Linear(num_output, self.hidden_dim)
  45. self.norm3 = nn.LayerNorm(self.hidden_dim)
  46. def forward(self, pro_features, roi_features):
  47. '''
  48. pro_features: (1, N * nr_boxes, self.d_model)
  49. roi_features: (49, N * nr_boxes, self.d_model)
  50. '''
  51. features = roi_features.transpose(perm=[1, 0, 2])
  52. parameters = self.dynamic_layer(pro_features).transpose(perm=[1, 0, 2])
  53. param1 = parameters[:, :, :self.num_params].reshape(
  54. [-1, self.hidden_dim, self.dim_dynamic])
  55. param2 = parameters[:, :, self.num_params:].reshape(
  56. [-1, self.dim_dynamic, self.hidden_dim])
  57. features = paddle.bmm(features, param1)
  58. features = self.norm1(features)
  59. features = self.activation(features)
  60. features = paddle.bmm(features, param2)
  61. features = self.norm2(features)
  62. features = self.activation(features)
  63. features = features.flatten(1)
  64. features = self.out_layer(features)
  65. features = self.norm3(features)
  66. features = self.activation(features)
  67. return features
  68. class RCNNHead(nn.Layer):
  69. def __init__(
  70. self,
  71. d_model,
  72. num_classes,
  73. dim_feedforward,
  74. nhead,
  75. dropout,
  76. head_cls,
  77. head_reg,
  78. head_dim_dynamic,
  79. head_num_dynamic,
  80. scale_clamp: float=_DEFAULT_SCALE_CLAMP,
  81. bbox_weights=(2.0, 2.0, 1.0, 1.0), ):
  82. super().__init__()
  83. self.d_model = d_model
  84. # dynamic.
  85. self.self_attn = nn.MultiHeadAttention(d_model, nhead, dropout=dropout)
  86. self.inst_interact = DynamicConv(d_model, head_dim_dynamic,
  87. head_num_dynamic)
  88. self.linear1 = nn.Linear(d_model, dim_feedforward)
  89. self.dropout = nn.Dropout(dropout)
  90. self.linear2 = nn.Linear(dim_feedforward, d_model)
  91. self.norm1 = nn.LayerNorm(d_model)
  92. self.norm2 = nn.LayerNorm(d_model)
  93. self.norm3 = nn.LayerNorm(d_model)
  94. self.dropout1 = nn.Dropout(dropout)
  95. self.dropout2 = nn.Dropout(dropout)
  96. self.dropout3 = nn.Dropout(dropout)
  97. self.activation = nn.ReLU()
  98. # cls.
  99. num_cls = head_cls
  100. cls_module = list()
  101. for _ in range(num_cls):
  102. cls_module.append(nn.Linear(d_model, d_model, bias_attr=False))
  103. cls_module.append(nn.LayerNorm(d_model))
  104. cls_module.append(nn.ReLU())
  105. self.cls_module = nn.LayerList(cls_module)
  106. # reg.
  107. num_reg = head_reg
  108. reg_module = list()
  109. for _ in range(num_reg):
  110. reg_module.append(nn.Linear(d_model, d_model, bias_attr=False))
  111. reg_module.append(nn.LayerNorm(d_model))
  112. reg_module.append(nn.ReLU())
  113. self.reg_module = nn.LayerList(reg_module)
  114. # pred.
  115. self.class_logits = nn.Linear(d_model, num_classes)
  116. self.bboxes_delta = nn.Linear(d_model, 4)
  117. self.scale_clamp = scale_clamp
  118. self.bbox_weights = bbox_weights
  119. def forward(self, features, bboxes, pro_features, pooler):
  120. """
  121. :param bboxes: (N, nr_boxes, 4)
  122. :param pro_features: (N, nr_boxes, d_model)
  123. """
  124. N, nr_boxes = bboxes.shape[:2]
  125. proposal_boxes = list()
  126. for b in range(N):
  127. proposal_boxes.append(bboxes[b])
  128. roi_num = paddle.full([N], nr_boxes).astype("int32")
  129. roi_features = pooler(features, proposal_boxes, roi_num)
  130. roi_features = roi_features.reshape(
  131. [N * nr_boxes, self.d_model, -1]).transpose(perm=[2, 0, 1])
  132. # self_att.
  133. pro_features = pro_features.reshape([N, nr_boxes, self.d_model])
  134. pro_features2 = self.self_attn(
  135. pro_features, pro_features, value=pro_features)
  136. pro_features = pro_features.transpose(perm=[1, 0, 2]) + self.dropout1(
  137. pro_features2.transpose(perm=[1, 0, 2]))
  138. pro_features = self.norm1(pro_features)
  139. # inst_interact.
  140. pro_features = pro_features.reshape(
  141. [nr_boxes, N, self.d_model]).transpose(perm=[1, 0, 2]).reshape(
  142. [1, N * nr_boxes, self.d_model])
  143. pro_features2 = self.inst_interact(pro_features, roi_features)
  144. pro_features = pro_features + self.dropout2(pro_features2)
  145. obj_features = self.norm2(pro_features)
  146. # obj_feature.
  147. obj_features2 = self.linear2(
  148. self.dropout(self.activation(self.linear1(obj_features))))
  149. obj_features = obj_features + self.dropout3(obj_features2)
  150. obj_features = self.norm3(obj_features)
  151. fc_feature = obj_features.transpose(perm=[1, 0, 2]).reshape(
  152. [N * nr_boxes, -1])
  153. cls_feature = fc_feature.clone()
  154. reg_feature = fc_feature.clone()
  155. for cls_layer in self.cls_module:
  156. cls_feature = cls_layer(cls_feature)
  157. for reg_layer in self.reg_module:
  158. reg_feature = reg_layer(reg_feature)
  159. class_logits = self.class_logits(cls_feature)
  160. bboxes_deltas = self.bboxes_delta(reg_feature)
  161. pred_bboxes = delta2bbox(bboxes_deltas,
  162. bboxes.reshape([-1, 4]), self.bbox_weights)
  163. return class_logits.reshape([N, nr_boxes, -1]), pred_bboxes.reshape(
  164. [N, nr_boxes, -1]), obj_features
  165. @register
  166. class SparseRCNNHead(nn.Layer):
  167. '''
  168. SparsercnnHead
  169. Args:
  170. roi_input_shape (list[ShapeSpec]): The output shape of fpn
  171. num_classes (int): Number of classes,
  172. head_hidden_dim (int): The param of MultiHeadAttention,
  173. head_dim_feedforward (int): The param of MultiHeadAttention,
  174. nhead (int): The param of MultiHeadAttention,
  175. head_dropout (float): The p of dropout,
  176. head_cls (int): The number of class head,
  177. head_reg (int): The number of regressionhead,
  178. head_num_dynamic (int): The number of DynamicConv's param,
  179. head_num_heads (int): The number of RCNNHead,
  180. deep_supervision (int): wheather supervise the intermediate results,
  181. num_proposals (int): the number of proposals boxes and features
  182. '''
  183. __inject__ = ['loss_func']
  184. __shared__ = ['num_classes']
  185. def __init__(
  186. self,
  187. head_hidden_dim,
  188. head_dim_feedforward,
  189. nhead,
  190. head_dropout,
  191. head_cls,
  192. head_reg,
  193. head_dim_dynamic,
  194. head_num_dynamic,
  195. head_num_heads,
  196. deep_supervision,
  197. num_proposals,
  198. num_classes=80,
  199. loss_func="SparseRCNNLoss",
  200. roi_input_shape=None, ):
  201. super().__init__()
  202. # Build RoI.
  203. box_pooler = self._init_box_pooler(roi_input_shape)
  204. self.box_pooler = box_pooler
  205. # Build heads.
  206. rcnn_head = RCNNHead(
  207. head_hidden_dim,
  208. num_classes,
  209. head_dim_feedforward,
  210. nhead,
  211. head_dropout,
  212. head_cls,
  213. head_reg,
  214. head_dim_dynamic,
  215. head_num_dynamic, )
  216. self.head_series = nn.LayerList(
  217. [copy.deepcopy(rcnn_head) for i in range(head_num_heads)])
  218. self.return_intermediate = deep_supervision
  219. self.num_classes = num_classes
  220. # build init proposal
  221. self.init_proposal_features = nn.Embedding(num_proposals,
  222. head_hidden_dim)
  223. self.init_proposal_boxes = nn.Embedding(num_proposals, 4)
  224. self.lossfunc = loss_func
  225. # Init parameters.
  226. init.reset_initialized_parameter(self)
  227. self._reset_parameters()
  228. def _reset_parameters(self):
  229. # init all parameters.
  230. prior_prob = 0.01
  231. bias_value = -math.log((1 - prior_prob) / prior_prob)
  232. for m in self.sublayers():
  233. if isinstance(m, nn.Linear):
  234. init.xavier_normal_(m.weight, reverse=True)
  235. elif not isinstance(m, nn.Embedding) and hasattr(
  236. m, "weight") and m.weight.dim() > 1:
  237. init.xavier_normal_(m.weight, reverse=False)
  238. if hasattr(m, "bias") and m.bias is not None and m.bias.shape[
  239. -1] == self.num_classes:
  240. init.constant_(m.bias, bias_value)
  241. init_bboxes = paddle.empty_like(self.init_proposal_boxes.weight)
  242. init_bboxes[:, :2] = 0.5
  243. init_bboxes[:, 2:] = 1.0
  244. self.init_proposal_boxes.weight.set_value(init_bboxes)
  245. @staticmethod
  246. def _init_box_pooler(input_shape):
  247. pooler_resolution = 7
  248. sampling_ratio = 2
  249. if input_shape is not None:
  250. pooler_scales = tuple(1.0 / input_shape[k].stride
  251. for k in range(len(input_shape)))
  252. in_channels = [
  253. input_shape[f].channels for f in range(len(input_shape))
  254. ]
  255. end_level = len(input_shape) - 1
  256. # Check all channel counts are equal
  257. assert len(set(in_channels)) == 1, in_channels
  258. else:
  259. pooler_scales = [1.0 / 4.0, 1.0 / 8.0, 1.0 / 16.0, 1.0 / 32.0]
  260. end_level = 3
  261. box_pooler = RoIAlign(
  262. resolution=pooler_resolution,
  263. spatial_scale=pooler_scales,
  264. sampling_ratio=sampling_ratio,
  265. end_level=end_level,
  266. aligned=True)
  267. return box_pooler
  268. def forward(self, features, input_whwh):
  269. bs = len(features[0])
  270. bboxes = box_cxcywh_to_xyxy(self.init_proposal_boxes.weight.clone(
  271. )).unsqueeze(0)
  272. bboxes = bboxes * input_whwh.unsqueeze(-2)
  273. init_features = self.init_proposal_features.weight.unsqueeze(0).tile(
  274. [1, bs, 1])
  275. proposal_features = init_features.clone()
  276. inter_class_logits = []
  277. inter_pred_bboxes = []
  278. for rcnn_head in self.head_series:
  279. class_logits, pred_bboxes, proposal_features = rcnn_head(
  280. features, bboxes, proposal_features, self.box_pooler)
  281. if self.return_intermediate:
  282. inter_class_logits.append(class_logits)
  283. inter_pred_bboxes.append(pred_bboxes)
  284. bboxes = pred_bboxes.detach()
  285. output = {
  286. 'pred_logits': inter_class_logits[-1],
  287. 'pred_boxes': inter_pred_bboxes[-1]
  288. }
  289. if self.return_intermediate:
  290. output['aux_outputs'] = [{
  291. 'pred_logits': a,
  292. 'pred_boxes': b
  293. } for a, b in zip(inter_class_logits[:-1], inter_pred_bboxes[:-1])]
  294. return output
  295. def get_loss(self, outputs, targets):
  296. losses = self.lossfunc(outputs, targets)
  297. weight_dict = self.lossfunc.weight_dict
  298. for k in losses.keys():
  299. if k in weight_dict:
  300. losses[k] *= weight_dict[k]
  301. return losses
  302. def box_cxcywh_to_xyxy(x):
  303. x_c, y_c, w, h = x.unbind(-1)
  304. b = [(x_c - 0.5 * w), (y_c - 0.5 * h), (x_c + 0.5 * w), (y_c + 0.5 * h)]
  305. return paddle.stack(b, axis=-1)