detr_transformer.py 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353
  1. # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. #
  15. # Modified from DETR (https://github.com/facebookresearch/detr)
  16. # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
  17. from __future__ import absolute_import
  18. from __future__ import division
  19. from __future__ import print_function
  20. import paddle
  21. import paddle.nn as nn
  22. import paddle.nn.functional as F
  23. from paddlex.ppdet.core.workspace import register
  24. from ..layers import MultiHeadAttention, _convert_attention_mask
  25. from .position_encoding import PositionEmbedding
  26. from .utils import _get_clones
  27. from ..initializer import linear_init_, conv_init_, xavier_uniform_, normal_
  28. __all__ = ['DETRTransformer']
  29. class TransformerEncoderLayer(nn.Layer):
  30. def __init__(self,
  31. d_model,
  32. nhead,
  33. dim_feedforward=2048,
  34. dropout=0.1,
  35. activation="relu",
  36. attn_dropout=None,
  37. act_dropout=None,
  38. normalize_before=False):
  39. super(TransformerEncoderLayer, self).__init__()
  40. attn_dropout = dropout if attn_dropout is None else attn_dropout
  41. act_dropout = dropout if act_dropout is None else act_dropout
  42. self.normalize_before = normalize_before
  43. self.self_attn = MultiHeadAttention(d_model, nhead, attn_dropout)
  44. # Implementation of Feedforward model
  45. self.linear1 = nn.Linear(d_model, dim_feedforward)
  46. self.dropout = nn.Dropout(act_dropout, mode="upscale_in_train")
  47. self.linear2 = nn.Linear(dim_feedforward, d_model)
  48. self.norm1 = nn.LayerNorm(d_model)
  49. self.norm2 = nn.LayerNorm(d_model)
  50. self.dropout1 = nn.Dropout(dropout, mode="upscale_in_train")
  51. self.dropout2 = nn.Dropout(dropout, mode="upscale_in_train")
  52. self.activation = getattr(F, activation)
  53. self._reset_parameters()
  54. def _reset_parameters(self):
  55. linear_init_(self.linear1)
  56. linear_init_(self.linear2)
  57. @staticmethod
  58. def with_pos_embed(tensor, pos_embed):
  59. return tensor if pos_embed is None else tensor + pos_embed
  60. def forward(self, src, src_mask=None, pos_embed=None):
  61. src_mask = _convert_attention_mask(src_mask, src.dtype)
  62. residual = src
  63. if self.normalize_before:
  64. src = self.norm1(src)
  65. q = k = self.with_pos_embed(src, pos_embed)
  66. src = self.self_attn(q, k, value=src, attn_mask=src_mask)
  67. src = residual + self.dropout1(src)
  68. if not self.normalize_before:
  69. src = self.norm1(src)
  70. residual = src
  71. if self.normalize_before:
  72. src = self.norm2(src)
  73. src = self.linear2(self.dropout(self.activation(self.linear1(src))))
  74. src = residual + self.dropout2(src)
  75. if not self.normalize_before:
  76. src = self.norm2(src)
  77. return src
  78. class TransformerEncoder(nn.Layer):
  79. def __init__(self, encoder_layer, num_layers, norm=None):
  80. super(TransformerEncoder, self).__init__()
  81. self.layers = _get_clones(encoder_layer, num_layers)
  82. self.num_layers = num_layers
  83. self.norm = norm
  84. def forward(self, src, src_mask=None, pos_embed=None):
  85. src_mask = _convert_attention_mask(src_mask, src.dtype)
  86. output = src
  87. for layer in self.layers:
  88. output = layer(output, src_mask=src_mask, pos_embed=pos_embed)
  89. if self.norm is not None:
  90. output = self.norm(output)
  91. return output
  92. class TransformerDecoderLayer(nn.Layer):
  93. def __init__(self,
  94. d_model,
  95. nhead,
  96. dim_feedforward=2048,
  97. dropout=0.1,
  98. activation="relu",
  99. attn_dropout=None,
  100. act_dropout=None,
  101. normalize_before=False):
  102. super(TransformerDecoderLayer, self).__init__()
  103. attn_dropout = dropout if attn_dropout is None else attn_dropout
  104. act_dropout = dropout if act_dropout is None else act_dropout
  105. self.normalize_before = normalize_before
  106. self.self_attn = MultiHeadAttention(d_model, nhead, attn_dropout)
  107. self.cross_attn = MultiHeadAttention(d_model, nhead, attn_dropout)
  108. # Implementation of Feedforward model
  109. self.linear1 = nn.Linear(d_model, dim_feedforward)
  110. self.dropout = nn.Dropout(act_dropout, mode="upscale_in_train")
  111. self.linear2 = nn.Linear(dim_feedforward, d_model)
  112. self.norm1 = nn.LayerNorm(d_model)
  113. self.norm2 = nn.LayerNorm(d_model)
  114. self.norm3 = nn.LayerNorm(d_model)
  115. self.dropout1 = nn.Dropout(dropout, mode="upscale_in_train")
  116. self.dropout2 = nn.Dropout(dropout, mode="upscale_in_train")
  117. self.dropout3 = nn.Dropout(dropout, mode="upscale_in_train")
  118. self.activation = getattr(F, activation)
  119. self._reset_parameters()
  120. def _reset_parameters(self):
  121. linear_init_(self.linear1)
  122. linear_init_(self.linear2)
  123. @staticmethod
  124. def with_pos_embed(tensor, pos_embed):
  125. return tensor if pos_embed is None else tensor + pos_embed
  126. def forward(self,
  127. tgt,
  128. memory,
  129. tgt_mask=None,
  130. memory_mask=None,
  131. pos_embed=None,
  132. query_pos_embed=None):
  133. tgt_mask = _convert_attention_mask(tgt_mask, tgt.dtype)
  134. memory_mask = _convert_attention_mask(memory_mask, memory.dtype)
  135. residual = tgt
  136. if self.normalize_before:
  137. tgt = self.norm1(tgt)
  138. q = k = self.with_pos_embed(tgt, query_pos_embed)
  139. tgt = self.self_attn(q, k, value=tgt, attn_mask=tgt_mask)
  140. tgt = residual + self.dropout1(tgt)
  141. if not self.normalize_before:
  142. tgt = self.norm1(tgt)
  143. residual = tgt
  144. if self.normalize_before:
  145. tgt = self.norm2(tgt)
  146. q = self.with_pos_embed(tgt, query_pos_embed)
  147. k = self.with_pos_embed(memory, pos_embed)
  148. tgt = self.cross_attn(q, k, value=memory, attn_mask=memory_mask)
  149. tgt = residual + self.dropout2(tgt)
  150. if not self.normalize_before:
  151. tgt = self.norm2(tgt)
  152. residual = tgt
  153. if self.normalize_before:
  154. tgt = self.norm3(tgt)
  155. tgt = self.linear2(self.dropout(self.activation(self.linear1(tgt))))
  156. tgt = residual + self.dropout3(tgt)
  157. if not self.normalize_before:
  158. tgt = self.norm3(tgt)
  159. return tgt
  160. class TransformerDecoder(nn.Layer):
  161. def __init__(self,
  162. decoder_layer,
  163. num_layers,
  164. norm=None,
  165. return_intermediate=False):
  166. super(TransformerDecoder, self).__init__()
  167. self.layers = _get_clones(decoder_layer, num_layers)
  168. self.num_layers = num_layers
  169. self.norm = norm
  170. self.return_intermediate = return_intermediate
  171. def forward(self,
  172. tgt,
  173. memory,
  174. tgt_mask=None,
  175. memory_mask=None,
  176. pos_embed=None,
  177. query_pos_embed=None):
  178. tgt_mask = _convert_attention_mask(tgt_mask, tgt.dtype)
  179. memory_mask = _convert_attention_mask(memory_mask, memory.dtype)
  180. output = tgt
  181. intermediate = []
  182. for layer in self.layers:
  183. output = layer(
  184. output,
  185. memory,
  186. tgt_mask=tgt_mask,
  187. memory_mask=memory_mask,
  188. pos_embed=pos_embed,
  189. query_pos_embed=query_pos_embed)
  190. if self.return_intermediate:
  191. intermediate.append(self.norm(output))
  192. if self.norm is not None:
  193. output = self.norm(output)
  194. if self.return_intermediate:
  195. return paddle.stack(intermediate)
  196. return output.unsqueeze(0)
  197. @register
  198. class DETRTransformer(nn.Layer):
  199. __shared__ = ['hidden_dim']
  200. def __init__(self,
  201. num_queries=100,
  202. position_embed_type='sine',
  203. return_intermediate_dec=True,
  204. backbone_num_channels=2048,
  205. hidden_dim=256,
  206. nhead=8,
  207. num_encoder_layers=6,
  208. num_decoder_layers=6,
  209. dim_feedforward=2048,
  210. dropout=0.1,
  211. activation="relu",
  212. attn_dropout=None,
  213. act_dropout=None,
  214. normalize_before=False):
  215. super(DETRTransformer, self).__init__()
  216. assert position_embed_type in ['sine', 'learned'],\
  217. f'ValueError: position_embed_type not supported {position_embed_type}!'
  218. self.hidden_dim = hidden_dim
  219. self.nhead = nhead
  220. encoder_layer = TransformerEncoderLayer(
  221. hidden_dim, nhead, dim_feedforward, dropout, activation,
  222. attn_dropout, act_dropout, normalize_before)
  223. encoder_norm = nn.LayerNorm(hidden_dim) if normalize_before else None
  224. self.encoder = TransformerEncoder(encoder_layer, num_encoder_layers,
  225. encoder_norm)
  226. decoder_layer = TransformerDecoderLayer(
  227. hidden_dim, nhead, dim_feedforward, dropout, activation,
  228. attn_dropout, act_dropout, normalize_before)
  229. decoder_norm = nn.LayerNorm(hidden_dim)
  230. self.decoder = TransformerDecoder(
  231. decoder_layer,
  232. num_decoder_layers,
  233. decoder_norm,
  234. return_intermediate=return_intermediate_dec)
  235. self.input_proj = nn.Conv2D(
  236. backbone_num_channels, hidden_dim, kernel_size=1)
  237. self.query_pos_embed = nn.Embedding(num_queries, hidden_dim)
  238. self.position_embedding = PositionEmbedding(
  239. hidden_dim // 2,
  240. normalize=True if position_embed_type == 'sine' else False,
  241. embed_type=position_embed_type)
  242. self._reset_parameters()
  243. def _reset_parameters(self):
  244. for p in self.parameters():
  245. if p.dim() > 1:
  246. xavier_uniform_(p)
  247. conv_init_(self.input_proj)
  248. normal_(self.query_pos_embed.weight)
  249. @classmethod
  250. def from_config(cls, cfg, input_shape):
  251. return {
  252. 'backbone_num_channels': [i.channels for i in input_shape][-1],
  253. }
  254. def forward(self, src, src_mask=None):
  255. r"""
  256. Applies a Transformer model on the inputs.
  257. Parameters:
  258. src (List(Tensor)): Backbone feature maps with shape [[bs, c, h, w]].
  259. src_mask (Tensor, optional): A tensor used in multi-head attention
  260. to prevents attention to some unwanted positions, usually the
  261. paddings or the subsequent positions. It is a tensor with shape
  262. [bs, H, W]`. When the data type is bool, the unwanted positions
  263. have `False` values and the others have `True` values. When the
  264. data type is int, the unwanted positions have 0 values and the
  265. others have 1 values. When the data type is float, the unwanted
  266. positions have `-INF` values and the others have 0 values. It
  267. can be None when nothing wanted or needed to be prevented
  268. attention to. Default None.
  269. Returns:
  270. output (Tensor): [num_levels, batch_size, num_queries, hidden_dim]
  271. memory (Tensor): [batch_size, hidden_dim, h, w]
  272. """
  273. # use last level feature map
  274. src_proj = self.input_proj(src[-1])
  275. bs, c, h, w = src_proj.shape
  276. # flatten [B, C, H, W] to [B, HxW, C]
  277. src_flatten = src_proj.flatten(2).transpose([0, 2, 1])
  278. if src_mask is not None:
  279. src_mask = F.interpolate(
  280. src_mask.unsqueeze(0).astype(src_flatten.dtype),
  281. size=(h, w))[0].astype('bool')
  282. else:
  283. src_mask = paddle.ones([bs, h, w], dtype='bool')
  284. pos_embed = self.position_embedding(src_mask).flatten(2).transpose(
  285. [0, 2, 1])
  286. src_mask = _convert_attention_mask(src_mask, src_flatten.dtype)
  287. src_mask = src_mask.reshape([bs, 1, 1, -1])
  288. memory = self.encoder(
  289. src_flatten, src_mask=src_mask, pos_embed=pos_embed)
  290. query_pos_embed = self.query_pos_embed.weight.unsqueeze(0).tile(
  291. [bs, 1, 1])
  292. tgt = paddle.zeros_like(query_pos_embed)
  293. output = self.decoder(
  294. tgt,
  295. memory,
  296. memory_mask=src_mask,
  297. pos_embed=pos_embed,
  298. query_pos_embed=query_pos_embed)
  299. return (output, memory.transpose([0, 2, 1]).reshape([bs, c, h, w]),
  300. src_proj, src_mask.reshape([bs, 1, 1, h, w]))