Browse Source

clean fluid

FlyingQianMM 3 years ago
parent
commit
e1fbfae787
100 changed files with 5691 additions and 763 deletions
  1. 1 0
      paddlex/cv/datasets/__init__.py
  2. 72 0
      paddlex/cv/datasets/utils.py
  3. 104 0
      paddlex/cv/models/utils/third_party/cocoapi/coco.py
  4. 2 1
      paddlex/cv/transforms/batch_operators.py
  5. 4 4
      paddlex/paddleseg/models/losses/lovasz_loss.py
  6. 0 1
      paddlex/paddleseg/utils/env/sys_env.py
  7. 1 1
      paddlex/ppcls/engine/engine.py
  8. 2 2
      paddlex/ppcls/static/save_load.py
  9. 1 1
      paddlex/ppcls/utils/download.py
  10. 9 0
      paddlex/ppdet/__init__.py
  11. 11 3
      paddlex/ppdet/core/workspace.py
  12. 1 1
      paddlex/ppdet/data/reader.py
  13. 4 1
      paddlex/ppdet/data/shm_utils.py
  14. 1 0
      paddlex/ppdet/data/source/__init__.py
  15. 42 9
      paddlex/ppdet/data/source/category.py
  16. 50 6
      paddlex/ppdet/data/source/dataset.py
  17. 5 0
      paddlex/ppdet/data/source/mot.py
  18. 4 4
      paddlex/ppdet/data/transform/autoaugment_utils.py
  19. 54 0
      paddlex/ppdet/data/transform/batch_operators.py
  20. 2 2
      paddlex/ppdet/data/transform/keypoint_operators.py
  21. 1 1
      paddlex/ppdet/data/transform/mot_operators.py
  22. 435 27
      paddlex/ppdet/data/transform/operators.py
  23. 72 0
      paddlex/ppdet/data/utils.py
  24. 168 6
      paddlex/ppdet/engine/callbacks.py
  25. 25 6
      paddlex/ppdet/engine/export_utils.py
  26. 141 73
      paddlex/ppdet/engine/tracker.py
  27. 194 64
      paddlex/ppdet/engine/trainer.py
  28. 38 0
      paddlex/ppdet/ext_op/README.md
  29. 97 0
      paddlex/ppdet/ext_op/rbox_iou_op.cc
  30. 120 0
      paddlex/ppdet/ext_op/rbox_iou_op.cu
  31. 356 0
      paddlex/ppdet/ext_op/rbox_iou_op.h
  32. 14 0
      paddlex/ppdet/ext_op/setup.py
  33. 156 0
      paddlex/ppdet/ext_op/test.py
  34. 1 1
      paddlex/ppdet/metrics/coco_utils.py
  35. 11 1
      paddlex/ppdet/metrics/json_results.py
  36. 1 1
      paddlex/ppdet/metrics/map_utils.py
  37. 5 4
      paddlex/ppdet/metrics/mcmot_metrics.py
  38. 9 2
      paddlex/ppdet/metrics/metrics.py
  39. 14 8
      paddlex/ppdet/metrics/mot_metrics.py
  40. 1 0
      paddlex/ppdet/model_zoo/.gitignore
  41. 13 0
      paddlex/ppdet/model_zoo/tests/__init__.py
  42. 48 0
      paddlex/ppdet/model_zoo/tests/test_get_model.py
  43. 68 0
      paddlex/ppdet/model_zoo/tests/test_list_model.py
  44. 13 0
      paddlex/ppdet/modeling/architectures/__init__.py
  45. 79 0
      paddlex/ppdet/modeling/architectures/bytetrack.py
  46. 4 4
      paddlex/ppdet/modeling/architectures/cascade_rcnn.py
  47. 3 2
      paddlex/ppdet/modeling/architectures/deepsort.py
  48. 2 2
      paddlex/ppdet/modeling/architectures/faster_rcnn.py
  49. 1 1
      paddlex/ppdet/modeling/architectures/keypoint_hrhrnet.py
  50. 4 4
      paddlex/ppdet/modeling/architectures/mask_rcnn.py
  51. 20 28
      paddlex/ppdet/modeling/architectures/meta_arch.py
  52. 12 8
      paddlex/ppdet/modeling/architectures/picodet.py
  53. 69 0
      paddlex/ppdet/modeling/architectures/retinanet.py
  54. 5 1
      paddlex/ppdet/modeling/architectures/yolo.py
  55. 139 0
      paddlex/ppdet/modeling/architectures/yolox.py
  56. 2 0
      paddlex/ppdet/modeling/assigners/__init__.py
  57. 33 29
      paddlex/ppdet/modeling/assigners/atss_assigner.py
  58. 54 0
      paddlex/ppdet/modeling/assigners/max_iou_assigner.py
  59. 19 17
      paddlex/ppdet/modeling/assigners/task_aligned_assigner.py
  60. 49 19
      paddlex/ppdet/modeling/assigners/utils.py
  61. 6 2
      paddlex/ppdet/modeling/backbones/__init__.py
  62. 439 0
      paddlex/ppdet/modeling/backbones/csp_darknet.py
  63. 283 0
      paddlex/ppdet/modeling/backbones/cspresnet.py
  64. 15 10
      paddlex/ppdet/modeling/backbones/darknet.py
  65. 1 1
      paddlex/ppdet/modeling/backbones/esnet.py
  66. 5 5
      paddlex/ppdet/modeling/backbones/lcnet.py
  67. 2 2
      paddlex/ppdet/modeling/backbones/mobilenet_v3.py
  68. 4 5
      paddlex/ppdet/modeling/backbones/swin_transformer.py
  69. 134 18
      paddlex/ppdet/modeling/bbox_utils.py
  70. 40 0
      paddlex/ppdet/modeling/cls_utils.py
  71. 4 0
      paddlex/ppdet/modeling/heads/__init__.py
  72. 2 1
      paddlex/ppdet/modeling/heads/bbox_head.py
  73. 2 1
      paddlex/ppdet/modeling/heads/cascade_head.py
  74. 10 8
      paddlex/ppdet/modeling/heads/face_head.py
  75. 2 0
      paddlex/ppdet/modeling/heads/fcos_head.py
  76. 36 76
      paddlex/ppdet/modeling/heads/gfl_head.py
  77. 17 16
      paddlex/ppdet/modeling/heads/mask_head.py
  78. 517 11
      paddlex/ppdet/modeling/heads/pico_head.py
  79. 385 0
      paddlex/ppdet/modeling/heads/ppyoloe_head.py
  80. 249 0
      paddlex/ppdet/modeling/heads/retina_head.py
  81. 2 1
      paddlex/ppdet/modeling/heads/s2anet_head.py
  82. 12 10
      paddlex/ppdet/modeling/heads/ssd_head.py
  83. 18 13
      paddlex/ppdet/modeling/heads/tood_head.py
  84. 2 2
      paddlex/ppdet/modeling/heads/ttf_head.py
  85. 295 0
      paddlex/ppdet/modeling/heads/yolo_head.py
  86. 2 1
      paddlex/ppdet/modeling/initializer.py
  87. 98 155
      paddlex/ppdet/modeling/layers.py
  88. 4 2
      paddlex/ppdet/modeling/losses/__init__.py
  89. 1 1
      paddlex/ppdet/modeling/losses/detr_loss.py
  90. 66 0
      paddlex/ppdet/modeling/losses/focal_loss.py
  91. 61 0
      paddlex/ppdet/modeling/losses/smooth_l1_loss.py
  92. 1 1
      paddlex/ppdet/modeling/losses/sparsercnn_loss.py
  93. 1 2
      paddlex/ppdet/modeling/losses/ssd_loss.py
  94. 2 2
      paddlex/ppdet/modeling/losses/yolo_loss.py
  95. 23 17
      paddlex/ppdet/modeling/mot/matching/jde_matching.py
  96. 1 1
      paddlex/ppdet/modeling/mot/tracker/base_jde_tracker.py
  97. 9 7
      paddlex/ppdet/modeling/mot/tracker/deepsort_tracker.py
  98. 50 28
      paddlex/ppdet/modeling/mot/tracker/jde_tracker.py
  99. 6 0
      paddlex/ppdet/modeling/necks/__init__.py
  100. 18 19
      paddlex/ppdet/modeling/necks/csp_pan.py

+ 1 - 0
paddlex/cv/datasets/__init__.py

@@ -16,3 +16,4 @@ from .seg_dataset import SegDataset
 from .imagenet import ImageNet
 from .voc import VOCDetection
 from .coco import CocoDetection
+from .utils import default_collate_fn

+ 72 - 0
paddlex/cv/datasets/utils.py

@@ -0,0 +1,72 @@
+# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import paddle
+import numbers
+import numpy as np
+
+try:
+    from collections.abc import Sequence, Mapping
+except:
+    from collections import Sequence, Mapping
+
+
+def default_collate_fn(batch):
+    """
+    Default batch collating function for :code:`paddle.io.DataLoader`,
+    get input data as a list of sample datas, each element in list
+    if the data of a sample, and sample data should composed of list,
+    dictionary, string, number, numpy array, this
+    function will parse input data recursively and stack number,
+    numpy array and paddle.Tensor datas as batch datas. e.g. for
+    following input data:
+    [{'image': np.array(shape=[3, 224, 224]), 'label': 1},
+     {'image': np.array(shape=[3, 224, 224]), 'label': 3},
+     {'image': np.array(shape=[3, 224, 224]), 'label': 4},
+     {'image': np.array(shape=[3, 224, 224]), 'label': 5},]
+    
+    
+    This default collate function zipped each number and numpy array
+    field together and stack each field as the batch field as follows:
+    {'image': np.array(shape=[4, 3, 224, 224]), 'label': np.array([1, 3, 4, 5])}
+    Args:  
+        batch(list of sample data): batch should be a list of sample data.
+    
+    Returns:
+        Batched data: batched each number, numpy array and paddle.Tensor
+                      in input data.
+    """
+    sample = batch[0]
+    if isinstance(sample, np.ndarray):
+        batch = np.stack(batch, axis=0)
+        return batch
+    elif isinstance(sample, numbers.Number):
+        batch = np.array(batch)
+        return batch
+    elif isinstance(sample, (str, bytes)):
+        return batch
+    elif isinstance(sample, Mapping):
+        return {
+            key: default_collate_fn([d[key] for d in batch])
+            for key in sample
+        }
+    elif isinstance(sample, Sequence):
+        sample_fields_num = len(sample)
+        if not all(len(sample) == sample_fields_num for sample in iter(batch)):
+            raise RuntimeError(
+                "fileds number not same among samples in a batch")
+        return [default_collate_fn(fields) for fields in zip(*batch)]
+
+    raise TypeError("batch data con only contains: tensor, numpy.ndarray, "
+                    "dict, list, number, but got {}".format(type(sample)))

+ 104 - 0
paddlex/cv/models/utils/third_party/cocoapi/coco.py

@@ -0,0 +1,104 @@
+# This file is made availabel under the Apache license
+# This file is based on code availabel under Simplified BSD Licens:
+#   https://github.com/cocodataset/cocoapi/blob/8c9bcc3cf640524c4c20a9c40e89cb6a2f2fa0e9/PythonAPI/pycocotools/coco.py#L305
+#
+# Copyright (c) 2014, Piotr Dollar and Tsung-Yi Lin
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met: 
+# 
+# 1. Redistributions of source code must retain the above copyright notice, this
+#    list of conditions and the following disclaimer. 
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+#    this list of conditions and the following disclaimer in the documentation
+#    and/or other materials provided with the distribution. 
+# 
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+# The views and conclusions contained in the software and documentation are those
+# of the authors and should not be interpreted as representing official policies, 
+# either expressed or implied, of the FreeBSD Project.
+
+
+def loadRes(coco_obj, anns):
+    """
+    Load result file and return a result api object.
+    :param   resFile (str)     : file name of result file
+    :return: res (obj)         : result api object
+    """
+
+    # This function has the same functionality as pycocotools.COCO.loadRes,
+    # except that the input anns is list of results rather than a json file.
+    # Refer to
+    # https://github.com/cocodataset/cocoapi/blob/8c9bcc3cf640524c4c20a9c40e89cb6a2f2fa0e9/PythonAPI/pycocotools/coco.py#L305,
+
+    # matplotlib.use() must be called *before* pylab, matplotlib.pyplot,
+    # or matplotlib.backends is imported for the first time
+    # pycocotools import matplotlib
+    import matplotlib
+    matplotlib.use('Agg')
+    from pycocotools.coco import COCO
+    import pycocotools.mask as maskUtils
+    import time
+    res = COCO()
+    res.dataset['images'] = [img for img in coco_obj.dataset['images']]
+
+    tic = time.time()
+    assert type(anns) == list, 'results in not an array of objects'
+    annsImgIds = [ann['image_id'] for ann in anns]
+    assert set(annsImgIds) == (set(annsImgIds) & set(coco_obj.getImgIds())), \
+        'Results do not correspond to current coco set'
+    if 'caption' in anns[0]:
+        imgIds = set([img['id'] for img in res.dataset['images']]) & set(
+            [ann['image_id'] for ann in anns])
+        res.dataset['images'] = [
+            img for img in res.dataset['images'] if img['id'] in imgIds
+        ]
+        for id, ann in enumerate(anns):
+            ann['id'] = id + 1
+    elif 'bbox' in anns[0] and not anns[0]['bbox'] == []:
+        res.dataset['categories'] = copy.deepcopy(coco_obj.dataset[
+            'categories'])
+        for id, ann in enumerate(anns):
+            bb = ann['bbox']
+            x1, x2, y1, y2 = [bb[0], bb[0] + bb[2], bb[1], bb[1] + bb[3]]
+            if not 'segmentation' in ann:
+                ann['segmentation'] = [[x1, y1, x1, y2, x2, y2, x2, y1]]
+            ann['area'] = bb[2] * bb[3]
+            ann['id'] = id + 1
+            ann['iscrowd'] = 0
+    elif 'segmentation' in anns[0]:
+        res.dataset['categories'] = copy.deepcopy(coco_obj.dataset[
+            'categories'])
+        for id, ann in enumerate(anns):
+            # now only support compressed RLE format as segmentation results
+            ann['area'] = maskUtils.area(ann['segmentation'])
+            if not 'bbox' in ann:
+                ann['bbox'] = maskUtils.toBbox(ann['segmentation'])
+            ann['id'] = id + 1
+            ann['iscrowd'] = 0
+    elif 'keypoints' in anns[0]:
+        res.dataset['categories'] = copy.deepcopy(coco_obj.dataset[
+            'categories'])
+        for id, ann in enumerate(anns):
+            s = ann['keypoints']
+            x = s[0::3]
+            y = s[1::3]
+            x0, x1, y0, y1 = np.min(x), np.max(x), np.min(y), np.max(y)
+            ann['area'] = (x1 - x0) * (y1 - y0)
+            ann['id'] = id + 1
+            ann['bbox'] = [x0, y0, x1 - x0, y1 - y0]
+
+    res.dataset['annotations'] = anns
+    res.createIndex()
+    return res

+ 2 - 1
paddlex/cv/transforms/batch_operators.py

@@ -19,10 +19,11 @@ try:
     from collections.abc import Sequence
 except Exception:
     from collections import Sequence
-from paddle.fluid.dataloader.collate import default_collate_fn
+
 from .operators import Transform, Resize, ResizeByShort, _Permute, interp_dict
 from .box_utils import jaccard_overlap
 from paddlex.utils import logging
+from paddlex.cv.datasets.utils import default_collate_fn
 
 
 class BatchCompose(Transform):

+ 4 - 4
paddlex/paddleseg/models/losses/lovasz_loss.py

@@ -124,8 +124,8 @@ def lovasz_hinge_flat(logits, labels):
     signs = 2. * labels - 1.
     signs.stop_gradient = True
     errors = 1. - logits * signs
-    errors_sorted, perm = paddle.fluid.core.ops.argsort(errors, 'axis', 0,
-                                                        'descending', True)
+    errors_sorted, perm = paddle._C_ops.argsort(errors, 'axis', 0,
+                                                'descending', True)
     errors_sorted.stop_gradient = False
     gt_sorted = paddle.gather(labels, perm)
     grad = lovasz_grad(gt_sorted)
@@ -181,8 +181,8 @@ def lovasz_softmax_flat(probas, labels, classes='present'):
         else:
             class_pred = probas[:, c]
         errors = paddle.abs(fg - class_pred)
-        errors_sorted, perm = paddle.fluid.core.ops.argsort(errors, 'axis', 0,
-                                                            'descending', True)
+        errors_sorted, perm = paddle._C_ops.argsort(errors, 'axis', 0,
+                                                    'descending', True)
         errors_sorted.stop_gradient = False
 
         fg_sorted = paddle.gather(fg, perm)

+ 0 - 1
paddlex/paddleseg/utils/env/sys_env.py

@@ -93,7 +93,6 @@ def get_sys_env():
     if compiled_with_cuda:
         cuda_home = _find_cuda_home()
         env_info['NVCC'] = _get_nvcc_info(cuda_home)
-        # refer to https://github.com/PaddlePaddle/Paddle/blob/release/2.0-rc/paddle/fluid/platform/device_context.cc#L327
         v = paddle.get_cudnn_version()
         v = str(v // 1000) + '.' + str(v % 1000 // 100)
         env_info['cudnn'] = v

+ 1 - 1
paddlex/ppcls/engine/engine.py

@@ -110,7 +110,7 @@ class Engine(object):
                 'FLAGS_cudnn_batchnorm_spatial_persistent': 1,
                 'FLAGS_max_inplace_grad_add': 8,
             }
-            paddle.fluid.set_flags(AMP_RELATED_FLAGS_SETTING)
+            paddle.set_flags(AMP_RELATED_FLAGS_SETTING)
 
         #TODO(gaotingquan): support rec
         class_num = config["Arch"].get("class_num", None)

+ 2 - 2
paddlex/ppcls/static/save_load.py

@@ -62,8 +62,8 @@ def load_params(exe, prog, path, ignore_params=None):
     """
     Load model from the given path.
     Args:
-        exe (fluid.Executor): The fluid.Executor object.
-        prog (fluid.Program): load weight to which Program object.
+        exe (paddle.static.Executor): The paddle.static.Executor object.
+        prog (paddle.static.Program): load weight to which Program object.
         path (string): URL string or loca model path.
         ignore_params (list): ignore variable to load when finetuning.
             It can be specified by finetune_exclude_pretrained_params

+ 1 - 1
paddlex/ppcls/utils/download.py

@@ -112,7 +112,7 @@ def get_path_from_url(url,
         str: a local path to save downloaded models & weights & datasets.
     """
 
-    from paddle.fluid.dygraph.parallel import ParallelEnv
+    from paddle.distributed import ParallelEnv
 
     assert is_url(url), "downloading from {} not a url".format(url)
     # parse path after download to decompress under root_dir

+ 9 - 0
paddlex/ppdet/__init__.py

@@ -14,3 +14,12 @@
 
 from . import (core, data, engine, modeling, model_zoo, optimizer, metrics,
                utils, slim)
+
+try:
+    from .version import full_version as __version__
+    from .version import commit as __git_commit__
+except ImportError:
+    import sys
+    sys.stderr.write("Warning: import ppdet from source directory " \
+            "without installing, run 'python setup.py install' to " \
+            "install ppdet firstly\n")

+ 11 - 3
paddlex/ppdet/core/workspace.py

@@ -213,9 +213,17 @@ def create(cls_or_name, **kwargs):
     assert type(cls_or_name) in [type, str
                                  ], "should be a class or name of a class"
     name = type(cls_or_name) == str and cls_or_name or cls_or_name.__name__
-    assert name in global_config and \
-        isinstance(global_config[name], SchemaDict), \
-        "the module {} is not registered".format(name)
+    if name in global_config:
+        if isinstance(global_config[name], SchemaDict):
+            pass
+        elif hasattr(global_config[name], "__dict__"):
+            # support instance return directly
+            return global_config[name]
+        else:
+            raise ValueError("The module {} is not registered".format(name))
+    else:
+        raise ValueError("The module {} is not registered".format(name))
+
     config = global_config[name]
     cls = getattr(config.pymodule, name)
     cls_kwargs = {}

+ 1 - 1
paddlex/ppdet/data/reader.py

@@ -23,7 +23,7 @@ else:
 import numpy as np
 
 from paddle.io import DataLoader, DistributedBatchSampler
-from paddle.fluid.dataloader.collate import default_collate_fn
+from .utils import default_collate_fn
 
 from paddlex.ppdet.core.workspace import register
 from . import transform

+ 4 - 1
paddlex/ppdet/data/shm_utils.py

@@ -34,7 +34,10 @@ SHM_DEFAULT_MOUNT = '/dev/shm'
 
 
 def _parse_size_in_M(size_str):
-    num, unit = size_str[:-1], size_str[-1]
+    if size_str[-1] == 'B':
+        num, unit = size_str[:-2], size_str[-2]
+    else:
+        num, unit = size_str[:-1], size_str[-1]
     assert unit in SIZE_UNIT, \
             "unknown shm size unit {}".format(unit)
     return float(num) * \

+ 1 - 0
paddlex/ppdet/data/source/__init__.py

@@ -27,3 +27,4 @@ from .category import *
 from .keypoint_coco import *
 from .mot import *
 from .sniper_coco import SniperCOCODataSet
+from .dataset import ImageFolder

+ 42 - 9
paddlex/ppdet/data/source/category.py

@@ -39,24 +39,49 @@ def get_categories(metric_type, anno_file=None, arch=None):
     if arch == 'keypoint_arch':
         return (None, {'id': 'keypoint'})
 
+    if anno_file == None or (not os.path.isfile(anno_file)):
+        logger.warning(
+            "anno_file '{}' is None or not set or not exist, "
+            "please recheck TrainDataset/EvalDataset/TestDataset.anno_path, "
+            "otherwise the default categories will be used by metric_type.".
+            format(anno_file))
+
     if metric_type.lower() == 'coco' or metric_type.lower(
     ) == 'rbox' or metric_type.lower() == 'snipercoco':
         if anno_file and os.path.isfile(anno_file):
-            # lazy import pycocotools here
-            from pycocotools.coco import COCO
-
-            coco = COCO(anno_file)
-            cats = coco.loadCats(coco.getCatIds())
-
-            clsid2catid = {i: cat['id'] for i, cat in enumerate(cats)}
-            catid2name = {cat['id']: cat['name'] for cat in cats}
+            if anno_file.endswith('json'):
+                # lazy import pycocotools here
+                from pycocotools.coco import COCO
+                coco = COCO(anno_file)
+                cats = coco.loadCats(coco.getCatIds())
+
+                clsid2catid = {i: cat['id'] for i, cat in enumerate(cats)}
+                catid2name = {cat['id']: cat['name'] for cat in cats}
+
+            elif anno_file.endswith('txt'):
+                cats = []
+                with open(anno_file) as f:
+                    for line in f.readlines():
+                        cats.append(line.strip())
+                if cats[0] == 'background': cats = cats[1:]
+
+                clsid2catid = {i: i for i in range(len(cats))}
+                catid2name = {i: name for i, name in enumerate(cats)}
+
+            else:
+                raise ValueError("anno_file {} should be json or txt.".format(
+                    anno_file))
             return clsid2catid, catid2name
 
         # anno file not exist, load default categories of COCO17
         else:
             if metric_type.lower() == 'rbox':
+                logger.warning(
+                    "metric_type: {}, load default categories of DOTA.".format(
+                        metric_type))
                 return _dota_category()
-
+            logger.warning("metric_type: {}, load default categories of COCO.".
+                           format(metric_type))
             return _coco17_category()
 
     elif metric_type.lower() == 'voc':
@@ -77,6 +102,8 @@ def get_categories(metric_type, anno_file=None, arch=None):
         # anno file not exist, load default categories of
         # VOC all 20 categories
         else:
+            logger.warning("metric_type: {}, load default categories of VOC.".
+                           format(metric_type))
             return _vocall_category()
 
     elif metric_type.lower() == 'oid':
@@ -104,6 +131,9 @@ def get_categories(metric_type, anno_file=None, arch=None):
             return clsid2catid, catid2name
         # anno file not exist, load default category 'pedestrian'.
         else:
+            logger.warning(
+                "metric_type: {}, load default categories of pedestrian MOT.".
+                format(metric_type))
             return _mot_category(category='pedestrian')
 
     elif metric_type.lower() in ['kitti', 'bdd100kmot']:
@@ -122,6 +152,9 @@ def get_categories(metric_type, anno_file=None, arch=None):
             return clsid2catid, catid2name
         # anno file not exist, load default categories of visdrone all 10 categories
         else:
+            logger.warning(
+                "metric_type: {}, load default categories of VisDrone.".format(
+                    metric_type))
             return _visdrone_category()
 
     else:

+ 50 - 6
paddlex/ppdet/data/source/dataset.py

@@ -23,6 +23,7 @@ from paddle.io import Dataset
 from paddlex.ppdet.core.workspace import register, serializable
 from paddlex.ppdet.utils.download import get_dataset_path
 import copy
+from paddlex.ppdet.data import source
 
 
 @serializable
@@ -60,6 +61,9 @@ class DetDataset(Dataset):
     def __len__(self, ):
         return len(self.roidbs)
 
+    def __call__(self, *args, **kwargs):
+        return self
+
     def __getitem__(self, idx):
         # data batch
         roidb = copy.deepcopy(self.roidbs[idx])
@@ -75,7 +79,7 @@ class DetDataset(Dataset):
             n = len(self.roidbs)
             roidb = [roidb, ] + [
                 copy.deepcopy(self.roidbs[np.random.randint(n)])
-                for _ in range(3)
+                for _ in range(4)
             ]
         if isinstance(roidb, Sequence):
             for r in roidb:
@@ -149,12 +153,15 @@ class ImageFolder(DetDataset):
         self.sample_num = sample_num
 
     def check_or_download_dataset(self):
+        return
+
+    def get_anno(self):
+        if self.anno_path is None:
+            return
         if self.dataset_dir:
-            # NOTE: ImageFolder is only used for prediction, in
-            #       infer mode, image_dir is set by set_images
-            #       so we only check anno_path here
-            self.dataset_dir = get_dataset_path(self.dataset_dir,
-                                                self.anno_path, None)
+            return os.path.join(self.dataset_dir, self.anno_path)
+        else:
+            return self.anno_path
 
     def parse_dataset(self, ):
         if not self.roidbs:
@@ -195,3 +202,40 @@ class ImageFolder(DetDataset):
     def set_images(self, images):
         self.image_dir = images
         self.roidbs = self._load_images()
+
+
+@register
+class CommonDataset(object):
+    def __init__(self, **dataset_args):
+        super(CommonDataset, self).__init__()
+        dataset_args = copy.deepcopy(dataset_args)
+        type = dataset_args.pop("name")
+        self.dataset = getattr(source, type)(**dataset_args)
+
+    def __call__(self):
+        return self.dataset
+
+
+@register
+class TrainDataset(CommonDataset):
+    pass
+
+
+@register
+class EvalMOTDataset(CommonDataset):
+    pass
+
+
+@register
+class TestMOTDataset(CommonDataset):
+    pass
+
+
+@register
+class EvalDataset(CommonDataset):
+    pass
+
+
+@register
+class TestDataset(CommonDataset):
+    pass

+ 5 - 0
paddlex/ppdet/data/source/mot.py

@@ -480,6 +480,7 @@ class MOTImageFolder(DetDataset):
                  image_dir=None,
                  sample_num=-1,
                  keep_ori_im=False,
+                 anno_path=None,
                  **kwargs):
         super(MOTImageFolder, self).__init__(
             dataset_dir, image_dir, sample_num=sample_num)
@@ -489,6 +490,7 @@ class MOTImageFolder(DetDataset):
         self._imid2path = {}
         self.roidbs = None
         self.frame_rate = frame_rate
+        self.anno_path = anno_path
 
     def check_or_download_dataset(self):
         return
@@ -579,6 +581,9 @@ class MOTImageFolder(DetDataset):
                 "wrong or unsupported file format: {}".format(self.video_file)
         self.roidbs = self._load_video_images()
 
+    def get_anno(self):
+        return self.anno_path
+
 
 def _is_valid_video(f, extensions=('.mp4', '.avi', '.mov', '.rmvb', 'flv')):
     return f.lower().endswith(extensions)

+ 4 - 4
paddlex/ppdet/data/transform/autoaugment_utils.py

@@ -111,8 +111,8 @@ def policy_v2():
         [('Cutout', 0.8, 8), ('Brightness', 0.8, 8), ('Cutout', 0.2, 2)],
         [('Color', 0.8, 4), ('TranslateY_BBox', 1.0, 6),
          ('Rotate_BBox', 0.6, 6)],
-        [('Rotate_BBox', 0.6, 10), ('BBox_Cutout', 1.0, 4),
-         ('Cutout', 0.2, 8)],
+        [('Rotate_BBox', 0.6, 10), ('BBox_Cutout', 1.0, 4), ('Cutout', 0.2, 8)
+         ],
         [('Rotate_BBox', 0.0, 0), ('Equalize', 0.6, 6),
          ('ShearY_BBox', 0.6, 8)],
         [('Brightness', 0.8, 8), ('AutoContrast', 0.4, 2),
@@ -1392,8 +1392,8 @@ def _translate_level_to_arg(level, translate_const):
 def _bbox_cutout_level_to_arg(level, hparams):
     cutout_pad_fraction = (
         level / _MAX_LEVEL) * 0.75  # hparams.cutout_max_pad_fraction
-    return (cutout_pad_fraction,
-            False)  # hparams.cutout_bbox_replace_with_mean
+    return (cutout_pad_fraction, False
+            )  # hparams.cutout_bbox_replace_with_mean
 
 
 def level_to_arg(hparams):

+ 54 - 0
paddlex/ppdet/data/transform/batch_operators.py

@@ -47,6 +47,7 @@ __all__ = [
     'PadMaskBatch',
     'Gt2GFLTarget',
     'Gt2CenterNetTarget',
+    'PadGT',
 ]
 
 
@@ -1078,3 +1079,56 @@ class Gt2CenterNetTarget(BaseOperator):
         sample['size'] = wh
         sample['offset'] = reg
         return sample
+
+
+@register_op
+class PadGT(BaseOperator):
+    """
+    Pad 0 to `gt_class`, `gt_bbox`, `gt_score`...
+    The num_max_boxes is the largest for batch.
+    Args:
+        return_gt_mask (bool): If true, return `pad_gt_mask`,
+                                1 means bbox, 0 means no bbox.
+    """
+
+    def __init__(self, return_gt_mask=True):
+        super(PadGT, self).__init__()
+        self.return_gt_mask = return_gt_mask
+
+    def __call__(self, samples, context=None):
+        num_max_boxes = max([len(s['gt_bbox']) for s in samples])
+        for sample in samples:
+            if self.return_gt_mask:
+                sample['pad_gt_mask'] = np.zeros(
+                    (num_max_boxes, 1), dtype=np.float32)
+            if num_max_boxes == 0:
+                continue
+
+            num_gt = len(sample['gt_bbox'])
+            pad_gt_class = np.zeros((num_max_boxes, 1), dtype=np.int32)
+            pad_gt_bbox = np.zeros((num_max_boxes, 4), dtype=np.float32)
+            if num_gt > 0:
+                pad_gt_class[:num_gt] = sample['gt_class']
+                pad_gt_bbox[:num_gt] = sample['gt_bbox']
+            sample['gt_class'] = pad_gt_class
+            sample['gt_bbox'] = pad_gt_bbox
+            # pad_gt_mask
+            if 'pad_gt_mask' in sample:
+                sample['pad_gt_mask'][:num_gt] = 1
+            # gt_score
+            if 'gt_score' in sample:
+                pad_gt_score = np.zeros((num_max_boxes, 1), dtype=np.float32)
+                if num_gt > 0:
+                    pad_gt_score[:num_gt] = sample['gt_score']
+                sample['gt_score'] = pad_gt_score
+            if 'is_crowd' in sample:
+                pad_is_crowd = np.zeros((num_max_boxes, 1), dtype=np.int32)
+                if num_gt > 0:
+                    pad_is_crowd[:num_gt] = sample['is_crowd']
+                sample['is_crowd'] = pad_is_crowd
+            if 'difficult' in sample:
+                pad_diff = np.zeros((num_max_boxes, 1), dtype=np.int32)
+                if num_gt > 0:
+                    pad_diff[:num_gt] = sample['difficult']
+                sample['difficult'] = pad_diff
+        return samples

+ 2 - 2
paddlex/ppdet/data/transform/keypoint_operators.py

@@ -700,8 +700,8 @@ class ToHeatmapsTopDown(object):
         tmp_size = self.sigma * 3
         feat_stride = image_size / self.hmsize
         for joint_id in range(num_joints):
-            mu_x = int(joints[joint_id][0] + 0.5) / feat_stride[0]
-            mu_y = int(joints[joint_id][1] + 0.5) / feat_stride[1]
+            mu_x = int(joints[joint_id][0] / feat_stride[0] + 0.5)
+            mu_y = int(joints[joint_id][1] / feat_stride[1] + 0.5)
             # Check that any part of the gaussian is in-bounds
             ul = [int(mu_x - tmp_size), int(mu_y - tmp_size)]
             br = [int(mu_x + tmp_size + 1), int(mu_y + tmp_size + 1)]

+ 1 - 1
paddlex/ppdet/data/transform/mot_operators.py

@@ -530,7 +530,7 @@ class Gt2FairMOTTarget(Gt2TTFTarget):
     Generate FairMOT targets by ground truth data.
     Difference between Gt2FairMOTTarget and Gt2TTFTarget are:
         1. the gaussian kernal radius to generate a heatmap.
-        2. the targets needed during traing.
+        2. the targets needed during training.
 
     Args:
         num_classes(int): the number of classes.

+ 435 - 27
paddlex/ppdet/data/transform/operators.py

@@ -830,7 +830,7 @@ class Resize(BaseOperator):
             im_scale_x = resize_w / im_shape[1]
 
         im = self.apply_image(sample['image'], [im_scale_x, im_scale_y])
-        sample['image'] = im
+        sample['image'] = im.astype(np.float32)
         sample['im_shape'] = np.asarray([resize_h, resize_w], dtype=np.float32)
         if 'scale_factor' in sample:
             scale_factor = sample['scale_factor']
@@ -1060,7 +1060,7 @@ class CropWithSampling(BaseOperator):
            [max sample, max trial, min scale, max scale,
             min aspect ratio, max aspect ratio,
             min overlap, max overlap]
-            avoid_no_bbox (bool): whether to to avoid the
+            avoid_no_bbox (bool): whether to avoid the
                                   situation where the box does not appear.
         """
         super(CropWithSampling, self).__init__()
@@ -1151,7 +1151,7 @@ class CropWithDataAchorSampling(BaseOperator):
             das_anchor_scales (list[float]): a list of anchor scales in data
                 anchor smapling.
             min_size (float): minimum size of sampled bbox.
-            avoid_no_bbox (bool): whether to to avoid the
+            avoid_no_bbox (bool): whether to avoid the
                                   situation where the box does not appear.
         """
         super(CropWithDataAchorSampling, self).__init__()
@@ -1511,6 +1511,11 @@ class RandomCrop(BaseOperator):
                 if 'is_crowd' in sample:
                     sample['is_crowd'] = np.take(
                         sample['is_crowd'], valid_ids, axis=0)
+
+                if 'difficult' in sample:
+                    sample['difficult'] = np.take(
+                        sample['difficult'], valid_ids, axis=0)
+
                 return sample
 
         return sample
@@ -1754,7 +1759,7 @@ class Mixup(BaseOperator):
             gt_score2 = np.ones_like(sample[1]['gt_class'])
             gt_score = np.concatenate(
                 (gt_score1 * factor, gt_score2 * (1. - factor)), axis=0)
-            result['gt_score'] = gt_score
+            result['gt_score'] = gt_score.astype('float32')
         if 'is_crowd' in sample[0]:
             is_crowd1 = sample[0]['is_crowd']
             is_crowd2 = sample[1]['is_crowd']
@@ -2038,13 +2043,14 @@ class Pad(BaseOperator):
         if self.size:
             h, w = self.size
             assert (
-                im_h < h and im_w < w
+                im_h <= h and im_w <= w
             ), '(h, w) of target size should be greater than (im_h, im_w)'
         else:
-            h = np.ceil(im_h / self.size_divisor) * self.size_divisor
-            w = np.ceil(im_w / self.size_divisor) * self.size_divisor
+            h = int(np.ceil(im_h / self.size_divisor) * self.size_divisor)
+            w = int(np.ceil(im_w / self.size_divisor) * self.size_divisor)
 
         if h == im_h and w == im_w:
+            sample['image'] = im.astype(np.float32)
             return sample
 
         if self.pad_mode == -1:
@@ -2143,16 +2149,29 @@ class Rbox2Poly(BaseOperator):
 
 @register_op
 class AugmentHSV(BaseOperator):
-    def __init__(self, fraction=0.50, is_bgr=True):
-        """
-        Augment the SV channel of image data.
-        Args:
-            fraction (float): the fraction for augment. Default: 0.5.
-            is_bgr (bool): whether the image is BGR mode. Default: True.
-        """
+    """
+    Augment the SV channel of image data.
+    Args:
+        fraction (float): the fraction for augment. Default: 0.5.
+        is_bgr (bool): whether the image is BGR mode. Default: True.
+        hgain (float): H channel gains
+        sgain (float): S channel gains
+        vgain (float): V channel gains
+    """
+
+    def __init__(self,
+                 fraction=0.50,
+                 is_bgr=True,
+                 hgain=None,
+                 sgain=None,
+                 vgain=None):
         super(AugmentHSV, self).__init__()
         self.fraction = fraction
         self.is_bgr = is_bgr
+        self.hgain = hgain
+        self.sgain = sgain
+        self.vgain = vgain
+        self.use_hsvgain = False if hgain is None else True
 
     def apply(self, sample, context=None):
         img = sample['image']
@@ -2160,27 +2179,39 @@ class AugmentHSV(BaseOperator):
             img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
         else:
             img_hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
-        S = img_hsv[:, :, 1].astype(np.float32)
-        V = img_hsv[:, :, 2].astype(np.float32)
 
-        a = (random.random() * 2 - 1) * self.fraction + 1
-        S *= a
-        if a > 1:
-            np.clip(S, a_min=0, a_max=255, out=S)
+        if self.use_hsvgain:
+            hsv_augs = np.random.uniform(
+                -1, 1, 3) * [self.hgain, self.sgain, self.vgain]
+            # random selection of h, s, v
+            hsv_augs *= np.random.randint(0, 2, 3)
+            img_hsv[..., 0] = (img_hsv[..., 0] + hsv_augs[0]) % 180
+            img_hsv[..., 1] = np.clip(img_hsv[..., 1] + hsv_augs[1], 0, 255)
+            img_hsv[..., 2] = np.clip(img_hsv[..., 2] + hsv_augs[2], 0, 255)
+
+        else:
+            S = img_hsv[:, :, 1].astype(np.float32)
+            V = img_hsv[:, :, 2].astype(np.float32)
+
+            a = (random.random() * 2 - 1) * self.fraction + 1
+            S *= a
+            if a > 1:
+                np.clip(S, a_min=0, a_max=255, out=S)
+
+            a = (random.random() * 2 - 1) * self.fraction + 1
+            V *= a
+            if a > 1:
+                np.clip(V, a_min=0, a_max=255, out=V)
 
-        a = (random.random() * 2 - 1) * self.fraction + 1
-        V *= a
-        if a > 1:
-            np.clip(V, a_min=0, a_max=255, out=V)
+            img_hsv[:, :, 1] = S.astype(np.uint8)
+            img_hsv[:, :, 2] = V.astype(np.uint8)
 
-        img_hsv[:, :, 1] = S.astype(np.uint8)
-        img_hsv[:, :, 2] = V.astype(np.uint8)
         if self.is_bgr:
             cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img)
         else:
             cv2.cvtColor(img_hsv, cv2.COLOR_HSV2RGB, dst=img)
 
-        sample['image'] = img
+        sample['image'] = img.astype(np.float32)
         return sample
 
 
@@ -3023,3 +3054,380 @@ class CenterRandColor(BaseOperator):
             img = func(img, img_gray)
         sample['image'] = img
         return sample
+
+
+@register_op
+class Mosaic(BaseOperator):
+    """ Mosaic operator for image and gt_bboxes
+    The code is based on https://github.com/Megvii-BaseDetection/YOLOX/blob/main/yolox/data/datasets/mosaicdetection.py
+
+    1. get mosaic coords
+    2. clip bbox and get mosaic_labels
+    3. random_affine augment
+    4. Mixup augment as copypaste (optinal), not used in tiny/nano
+
+    Args:
+        prob (float): probability of using Mosaic, 1.0 as default
+        input_dim (list[int]): input shape
+        degrees (list[2]): the rotate range to apply, transform range is [min, max]
+        translate (list[2]): the translate range to apply, transform range is [min, max]
+        scale (list[2]): the scale range to apply, transform range is [min, max]
+        shear (list[2]): the shear range to apply, transform range is [min, max]
+        enable_mixup (bool): whether to enable Mixup or not
+        mixup_prob (float): probability of using Mixup, 1.0 as default
+        mixup_scale (list[int]): scale range of Mixup
+        remove_outside_box (bool): whether remove outside boxes, False as
+            default in COCO dataset, True in MOT dataset
+    """
+
+    def __init__(self,
+                 prob=1.0,
+                 input_dim=[640, 640],
+                 degrees=[-10, 10],
+                 translate=[-0.1, 0.1],
+                 scale=[0.1, 2],
+                 shear=[-2, 2],
+                 enable_mixup=True,
+                 mixup_prob=1.0,
+                 mixup_scale=[0.5, 1.5],
+                 remove_outside_box=False):
+        super(Mosaic, self).__init__()
+        self.prob = prob
+        if isinstance(input_dim, Integral):
+            input_dim = [input_dim, input_dim]
+        self.input_dim = input_dim
+        self.degrees = degrees
+        self.translate = translate
+        self.scale = scale
+        self.shear = shear
+        self.enable_mixup = enable_mixup
+        self.mixup_prob = mixup_prob
+        self.mixup_scale = mixup_scale
+        self.remove_outside_box = remove_outside_box
+
+    def get_mosaic_coords(self, mosaic_idx, xc, yc, w, h, input_h, input_w):
+        # (x1, y1, x2, y2) means coords in large image,
+        # small_coords means coords in small image in mosaic aug.
+        if mosaic_idx == 0:
+            # top left
+            x1, y1, x2, y2 = max(xc - w, 0), max(yc - h, 0), xc, yc
+            small_coords = w - (x2 - x1), h - (y2 - y1), w, h
+        elif mosaic_idx == 1:
+            # top right
+            x1, y1, x2, y2 = xc, max(yc - h, 0), min(xc + w, input_w * 2), yc
+            small_coords = 0, h - (y2 - y1), min(w, x2 - x1), h
+        elif mosaic_idx == 2:
+            # bottom left
+            x1, y1, x2, y2 = max(xc - w, 0), yc, xc, min(input_h * 2, yc + h)
+            small_coords = w - (x2 - x1), 0, w, min(y2 - y1, h)
+        elif mosaic_idx == 3:
+            # bottom right
+            x1, y1, x2, y2 = xc, yc, min(xc + w, input_w * 2), min(input_h * 2,
+                                                                   yc + h)
+            small_coords = 0, 0, min(w, x2 - x1), min(y2 - y1, h)
+
+        return (x1, y1, x2, y2), small_coords
+
+    def random_affine_augment(self,
+                              img,
+                              labels=[],
+                              input_dim=[640, 640],
+                              degrees=[-10, 10],
+                              scales=[0.1, 2],
+                              shears=[-2, 2],
+                              translates=[-0.1, 0.1]):
+        # random rotation and scale
+        degree = random.uniform(degrees[0], degrees[1])
+        scale = random.uniform(scales[0], scales[1])
+        assert scale > 0, "Argument scale should be positive."
+        R = cv2.getRotationMatrix2D(angle=degree, center=(0, 0), scale=scale)
+        M = np.ones([2, 3])
+
+        # random shear
+        shear = random.uniform(shears[0], shears[1])
+        shear_x = math.tan(shear * math.pi / 180)
+        shear_y = math.tan(shear * math.pi / 180)
+        M[0] = R[0] + shear_y * R[1]
+        M[1] = R[1] + shear_x * R[0]
+
+        # random translation
+        translate = random.uniform(translates[0], translates[1])
+        translation_x = translate * input_dim[0]
+        translation_y = translate * input_dim[1]
+        M[0, 2] = translation_x
+        M[1, 2] = translation_y
+
+        # warpAffine
+        img = cv2.warpAffine(
+            img, M, dsize=tuple(input_dim), borderValue=(114, 114, 114))
+
+        num_gts = len(labels)
+        if num_gts > 0:
+            # warp corner points
+            corner_points = np.ones((4 * num_gts, 3))
+            corner_points[:, :2] = labels[:, [0, 1, 2, 3, 0, 3, 2, 1]].reshape(
+                4 * num_gts, 2)  # x1y1, x2y2, x1y2, x2y1
+            # apply affine transform
+            corner_points = corner_points @M.T
+            corner_points = corner_points.reshape(num_gts, 8)
+
+            # create new boxes
+            corner_xs = corner_points[:, 0::2]
+            corner_ys = corner_points[:, 1::2]
+            new_bboxes = np.concatenate((corner_xs.min(1), corner_ys.min(1),
+                                         corner_xs.max(1), corner_ys.max(1)))
+            new_bboxes = new_bboxes.reshape(4, num_gts).T
+
+            # clip boxes
+            new_bboxes[:, 0::2] = np.clip(new_bboxes[:, 0::2], 0, input_dim[0])
+            new_bboxes[:, 1::2] = np.clip(new_bboxes[:, 1::2], 0, input_dim[1])
+            labels[:, :4] = new_bboxes
+
+        return img, labels
+
+    def __call__(self, sample, context=None):
+        if not isinstance(sample, Sequence):
+            return sample
+
+        assert len(
+            sample
+        ) == 5, "Mosaic needs 5 samples, 4 for mosaic and 1 for mixup."
+        if np.random.uniform(0., 1.) > self.prob:
+            return sample[0]
+
+        mosaic_gt_bbox, mosaic_gt_class, mosaic_is_crowd = [], [], []
+        input_h, input_w = self.input_dim
+        yc = int(random.uniform(0.5 * input_h, 1.5 * input_h))
+        xc = int(random.uniform(0.5 * input_w, 1.5 * input_w))
+        mosaic_img = np.full(
+            (input_h * 2, input_w * 2, 3), 114, dtype=np.uint8)
+
+        # 1. get mosaic coords
+        for mosaic_idx, sp in enumerate(sample[:4]):
+            img = sp['image']
+            gt_bbox = sp['gt_bbox']
+            h0, w0 = img.shape[:2]
+            scale = min(1. * input_h / h0, 1. * input_w / w0)
+            img = cv2.resize(
+                img, (int(w0 * scale), int(h0 * scale)),
+                interpolation=cv2.INTER_LINEAR)
+            (h, w, c) = img.shape[:3]
+
+            # suffix l means large image, while s means small image in mosaic aug.
+            (l_x1, l_y1, l_x2, l_y2), (
+                s_x1, s_y1, s_x2, s_y2) = self.get_mosaic_coords(
+                    mosaic_idx, xc, yc, w, h, input_h, input_w)
+
+            mosaic_img[l_y1:l_y2, l_x1:l_x2] = img[s_y1:s_y2, s_x1:s_x2]
+            padw, padh = l_x1 - s_x1, l_y1 - s_y1
+
+            # Normalized xywh to pixel xyxy format
+            _gt_bbox = gt_bbox.copy()
+            if len(gt_bbox) > 0:
+                _gt_bbox[:, 0] = scale * gt_bbox[:, 0] + padw
+                _gt_bbox[:, 1] = scale * gt_bbox[:, 1] + padh
+                _gt_bbox[:, 2] = scale * gt_bbox[:, 2] + padw
+                _gt_bbox[:, 3] = scale * gt_bbox[:, 3] + padh
+
+            is_crowd = sp['is_crowd'] if 'is_crowd' in sp else np.zeros(
+                (len(_gt_bbox), 1), dtype=np.int32)
+            mosaic_gt_bbox.append(_gt_bbox)
+            mosaic_gt_class.append(sp['gt_class'])
+            mosaic_is_crowd.append(is_crowd)
+
+        # 2. clip bbox and get mosaic_labels([gt_bbox, gt_class, is_crowd])
+        if len(mosaic_gt_bbox):
+            mosaic_gt_bbox = np.concatenate(mosaic_gt_bbox, 0)
+            mosaic_gt_class = np.concatenate(mosaic_gt_class, 0)
+            mosaic_is_crowd = np.concatenate(mosaic_is_crowd, 0)
+            mosaic_labels = np.concatenate([
+                mosaic_gt_bbox, mosaic_gt_class.astype(mosaic_gt_bbox.dtype),
+                mosaic_is_crowd.astype(mosaic_gt_bbox.dtype)
+            ], 1)
+            if self.remove_outside_box:
+                # for MOT dataset
+                flag1 = mosaic_gt_bbox[:, 0] < 2 * input_w
+                flag2 = mosaic_gt_bbox[:, 2] > 0
+                flag3 = mosaic_gt_bbox[:, 1] < 2 * input_h
+                flag4 = mosaic_gt_bbox[:, 3] > 0
+                flag_all = flag1 * flag2 * flag3 * flag4
+                mosaic_labels = mosaic_labels[flag_all]
+            else:
+                mosaic_labels[:, 0] = np.clip(mosaic_labels[:, 0], 0,
+                                              2 * input_w)
+                mosaic_labels[:, 1] = np.clip(mosaic_labels[:, 1], 0,
+                                              2 * input_h)
+                mosaic_labels[:, 2] = np.clip(mosaic_labels[:, 2], 0,
+                                              2 * input_w)
+                mosaic_labels[:, 3] = np.clip(mosaic_labels[:, 3], 0,
+                                              2 * input_h)
+        else:
+            mosaic_labels = np.zeros((1, 6))
+
+        # 3. random_affine augment
+        mosaic_img, mosaic_labels = self.random_affine_augment(
+            mosaic_img,
+            mosaic_labels,
+            input_dim=self.input_dim,
+            degrees=self.degrees,
+            translates=self.translate,
+            scales=self.scale,
+            shears=self.shear)
+
+        # 4. Mixup augment as copypaste, https://arxiv.org/abs/2012.07177
+        # optinal, not used(enable_mixup=False) in tiny/nano
+        if (self.enable_mixup and not len(mosaic_labels) == 0 and
+                random.random() < self.mixup_prob):
+            sample_mixup = sample[4]
+            mixup_img = sample_mixup['image']
+            cp_labels = np.concatenate([
+                sample_mixup['gt_bbox'],
+                sample_mixup['gt_class'].astype(mosaic_labels.dtype),
+                sample_mixup['is_crowd'].astype(mosaic_labels.dtype)
+            ], 1)
+            mosaic_img, mosaic_labels = self.mixup_augment(
+                mosaic_img, mosaic_labels, self.input_dim, cp_labels,
+                mixup_img)
+
+        sample0 = sample[0]
+        sample0['image'] = mosaic_img.astype(np.uint8)  # can not be float32
+        sample0['h'] = float(mosaic_img.shape[0])
+        sample0['w'] = float(mosaic_img.shape[1])
+        sample0['im_shape'][0] = sample0['h']
+        sample0['im_shape'][1] = sample0['w']
+        sample0['gt_bbox'] = mosaic_labels[:, :4].astype(np.float32)
+        sample0['gt_class'] = mosaic_labels[:, 4:5].astype(np.float32)
+        sample0['is_crowd'] = mosaic_labels[:, 5:6].astype(np.float32)
+        return sample0
+
+    def mixup_augment(self, origin_img, origin_labels, input_dim, cp_labels,
+                      img):
+        jit_factor = random.uniform(*self.mixup_scale)
+        FLIP = random.uniform(0, 1) > 0.5
+        if len(img.shape) == 3:
+            cp_img = np.ones(
+                (input_dim[0], input_dim[1], 3), dtype=np.uint8) * 114
+        else:
+            cp_img = np.ones(input_dim, dtype=np.uint8) * 114
+
+        cp_scale_ratio = min(input_dim[0] / img.shape[0],
+                             input_dim[1] / img.shape[1])
+        resized_img = cv2.resize(
+            img, (int(img.shape[1] * cp_scale_ratio),
+                  int(img.shape[0] * cp_scale_ratio)),
+            interpolation=cv2.INTER_LINEAR)
+
+        cp_img[:int(img.shape[0] * cp_scale_ratio), :int(img.shape[
+            1] * cp_scale_ratio)] = resized_img
+
+        cp_img = cv2.resize(cp_img, (int(cp_img.shape[1] * jit_factor),
+                                     int(cp_img.shape[0] * jit_factor)))
+        cp_scale_ratio *= jit_factor
+
+        if FLIP:
+            cp_img = cp_img[:, ::-1, :]
+
+        origin_h, origin_w = cp_img.shape[:2]
+        target_h, target_w = origin_img.shape[:2]
+        padded_img = np.zeros(
+            (max(origin_h, target_h), max(origin_w, target_w), 3),
+            dtype=np.uint8)
+        padded_img[:origin_h, :origin_w] = cp_img
+
+        x_offset, y_offset = 0, 0
+        if padded_img.shape[0] > target_h:
+            y_offset = random.randint(0, padded_img.shape[0] - target_h - 1)
+        if padded_img.shape[1] > target_w:
+            x_offset = random.randint(0, padded_img.shape[1] - target_w - 1)
+        padded_cropped_img = padded_img[y_offset:y_offset + target_h, x_offset:
+                                        x_offset + target_w]
+
+        # adjust boxes
+        cp_bboxes_origin_np = cp_labels[:, :4].copy()
+        cp_bboxes_origin_np[:, 0::2] = np.clip(cp_bboxes_origin_np[:, 0::2] *
+                                               cp_scale_ratio, 0, origin_w)
+        cp_bboxes_origin_np[:, 1::2] = np.clip(cp_bboxes_origin_np[:, 1::2] *
+                                               cp_scale_ratio, 0, origin_h)
+
+        if FLIP:
+            cp_bboxes_origin_np[:, 0::2] = (
+                origin_w - cp_bboxes_origin_np[:, 0::2][:, ::-1])
+        cp_bboxes_transformed_np = cp_bboxes_origin_np.copy()
+        if self.remove_outside_box:
+            # for MOT dataset
+            cp_bboxes_transformed_np[:, 0::2] -= x_offset
+            cp_bboxes_transformed_np[:, 1::2] -= y_offset
+        else:
+            cp_bboxes_transformed_np[:, 0::2] = np.clip(
+                cp_bboxes_transformed_np[:, 0::2] - x_offset, 0, target_w)
+            cp_bboxes_transformed_np[:, 1::2] = np.clip(
+                cp_bboxes_transformed_np[:, 1::2] - y_offset, 0, target_h)
+
+        cls_labels = cp_labels[:, 4:5].copy()
+        crd_labels = cp_labels[:, 5:6].copy()
+        box_labels = cp_bboxes_transformed_np
+        labels = np.hstack((box_labels, cls_labels, crd_labels))
+        if self.remove_outside_box:
+            labels = labels[labels[:, 0] < target_w]
+            labels = labels[labels[:, 2] > 0]
+            labels = labels[labels[:, 1] < target_h]
+            labels = labels[labels[:, 3] > 0]
+
+        origin_labels = np.vstack((origin_labels, labels))
+        origin_img = origin_img.astype(np.float32)
+        origin_img = 0.5 * origin_img + 0.5 * padded_cropped_img.astype(
+            np.float32)
+
+        return origin_img.astype(np.uint8), origin_labels
+
+
+@register_op
+class PadResize(BaseOperator):
+    """ PadResize for image and gt_bbbox
+
+    Args:
+        target_size (list[int]): input shape
+        fill_value (float): pixel value of padded image
+    """
+
+    def __init__(self, target_size, fill_value=114):
+        super(PadResize, self).__init__()
+        if isinstance(target_size, Integral):
+            target_size = [target_size, target_size]
+        self.target_size = target_size
+        self.fill_value = fill_value
+
+    def _resize(self, img, bboxes, labels):
+        ratio = min(self.target_size[0] / img.shape[0],
+                    self.target_size[1] / img.shape[1])
+        w, h = int(img.shape[1] * ratio), int(img.shape[0] * ratio)
+        resized_img = cv2.resize(img, (w, h), interpolation=cv2.INTER_LINEAR)
+
+        if len(bboxes) > 0:
+            bboxes *= ratio
+            mask = np.minimum(bboxes[:, 2] - bboxes[:, 0],
+                              bboxes[:, 3] - bboxes[:, 1]) > 1
+            bboxes = bboxes[mask]
+            labels = labels[mask]
+        return resized_img, bboxes, labels
+
+    def _pad(self, img):
+        h, w, _ = img.shape
+        if h == self.target_size[0] and w == self.target_size[1]:
+            return img
+        padded_img = np.full(
+            (self.target_size[0], self.target_size[1], 3),
+            self.fill_value,
+            dtype=np.uint8)
+        padded_img[:h, :w] = img
+        return padded_img
+
+    def apply(self, sample, context=None):
+        image = sample['image']
+        bboxes = sample['gt_bbox']
+        labels = sample['gt_class']
+        image, bboxes, labels = self._resize(image, bboxes, labels)
+        sample['image'] = self._pad(image).astype(np.float32)
+        sample['gt_bbox'] = bboxes
+        sample['gt_class'] = labels
+        return sample

+ 72 - 0
paddlex/ppdet/data/utils.py

@@ -0,0 +1,72 @@
+# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import paddle
+import numbers
+import numpy as np
+
+try:
+    from collections.abc import Sequence, Mapping
+except:
+    from collections import Sequence, Mapping
+
+
+def default_collate_fn(batch):
+    """
+    Default batch collating function for :code:`paddle.io.DataLoader`,
+    get input data as a list of sample datas, each element in list
+    if the data of a sample, and sample data should composed of list,
+    dictionary, string, number, numpy array, this
+    function will parse input data recursively and stack number,
+    numpy array and paddle.Tensor datas as batch datas. e.g. for
+    following input data:
+    [{'image': np.array(shape=[3, 224, 224]), 'label': 1},
+     {'image': np.array(shape=[3, 224, 224]), 'label': 3},
+     {'image': np.array(shape=[3, 224, 224]), 'label': 4},
+     {'image': np.array(shape=[3, 224, 224]), 'label': 5},]
+
+
+    This default collate function zipped each number and numpy array
+    field together and stack each field as the batch field as follows:
+    {'image': np.array(shape=[4, 3, 224, 224]), 'label': np.array([1, 3, 4, 5])}
+    Args:
+        batch(list of sample data): batch should be a list of sample data.
+
+    Returns:
+        Batched data: batched each number, numpy array and paddle.Tensor
+                      in input data.
+    """
+    sample = batch[0]
+    if isinstance(sample, np.ndarray):
+        batch = np.stack(batch, axis=0)
+        return batch
+    elif isinstance(sample, numbers.Number):
+        batch = np.array(batch)
+        return batch
+    elif isinstance(sample, (str, bytes)):
+        return batch
+    elif isinstance(sample, Mapping):
+        return {
+            key: default_collate_fn([d[key] for d in batch])
+            for key in sample
+        }
+    elif isinstance(sample, Sequence):
+        sample_fields_num = len(sample)
+        if not all(len(sample) == sample_fields_num for sample in iter(batch)):
+            raise RuntimeError(
+                "fileds number not same among samples in a batch")
+        return [default_collate_fn(fields) for fields in zip(*batch)]
+
+    raise TypeError("batch data con only contains: tensor, numpy.ndarray, "
+                    "dict, list, number, but got {}".format(type(sample)))

+ 168 - 6
paddlex/ppdet/engine/callbacks.py

@@ -184,7 +184,7 @@ class Checkpointer(Callback):
                     save_name = str(
                         epoch_id
                     ) if epoch_id != end_epoch - 1 else "model_final"
-                    weight = self.weight
+                    weight = self.weight.state_dict()
             elif mode == 'eval':
                 if 'save_best_model' in status and status['save_best_model']:
                     for metric in self.model._metrics:
@@ -200,15 +200,25 @@ class Checkpointer(Callback):
                                         "training iterations being too few or not " \
                                         "loading the correct weights.")
                             return
-                        if map_res[key][0] > self.best_ap:
+                        if map_res[key][0] >= self.best_ap:
                             self.best_ap = map_res[key][0]
                             save_name = 'best_model'
-                            weight = self.weight
+                            weight = self.weight.state_dict()
                         logger.info("Best test {} ap is {:0.3f}.".format(
                             key, self.best_ap))
             if weight:
-                save_model(weight, self.model.optimizer, self.save_dir,
-                           save_name, epoch_id + 1)
+                if self.model.use_ema:
+                    # save model and ema_model
+                    save_model(
+                        status['weight'],
+                        self.model.optimizer,
+                        self.save_dir,
+                        save_name,
+                        epoch_id + 1,
+                        ema_model=weight)
+                else:
+                    save_model(weight, self.model.optimizer, self.save_dir,
+                               save_name, epoch_id + 1)
 
 
 class WiferFaceEval(Callback):
@@ -253,7 +263,7 @@ class VisualDLWriter(Callback):
                 for loss_name, loss_value in training_staus.get().items():
                     self.vdl_writer.add_scalar(loss_name, loss_value,
                                                self.vdl_loss_step)
-                    self.vdl_loss_step += 1
+                self.vdl_loss_step += 1
             elif mode == 'test':
                 ori_image = status['original_image']
                 result_image = status['result_image']
@@ -281,6 +291,158 @@ class VisualDLWriter(Callback):
                 self.vdl_mAP_step += 1
 
 
+class WandbCallback(Callback):
+    def __init__(self, model):
+        super(WandbCallback, self).__init__(model)
+
+        try:
+            import wandb
+            self.wandb = wandb
+        except Exception as e:
+            logger.error('wandb not found, please install wandb. '
+                         'Use: `pip install wandb`.')
+            raise e
+
+        self.wandb_params = model.cfg.get('wandb', None)
+        self.save_dir = os.path.join(self.model.cfg.save_dir,
+                                     self.model.cfg.filename)
+        if self.wandb_params is None:
+            self.wandb_params = {}
+        for k, v in model.cfg.items():
+            if k.startswith("wandb_"):
+                self.wandb_params.update({k.lstrip("wandb_"): v})
+
+        self._run = None
+        if dist.get_world_size() < 2 or dist.get_rank() == 0:
+            _ = self.run
+            self.run.config.update(self.model.cfg)
+            self.run.define_metric("epoch")
+            self.run.define_metric("eval/*", step_metric="epoch")
+
+        self.best_ap = 0
+
+    @property
+    def run(self):
+        if self._run is None:
+            if self.wandb.run is not None:
+                logger.info(
+                    "There is an ongoing wandb run which will be used"
+                    "for logging. Please use `wandb.finish()` to end that"
+                    "if the behaviour is not intended")
+                self._run = self.wandb.run
+            else:
+                self._run = self.wandb.init(**self.wandb_params)
+        return self._run
+
+    def save_model(self,
+                   optimizer,
+                   save_dir,
+                   save_name,
+                   last_epoch,
+                   ema_model=None,
+                   ap=None,
+                   tags=None):
+        if dist.get_world_size() < 2 or dist.get_rank() == 0:
+            model_path = os.path.join(save_dir, save_name)
+            metadata = {}
+            metadata["last_epoch"] = last_epoch
+            if ap:
+                metadata["ap"] = ap
+            if ema_model is None:
+                ema_artifact = self.wandb.Artifact(
+                    name="ema_model-{}".format(self.run.id),
+                    type="model",
+                    metadata=metadata)
+                model_artifact = self.wandb.Artifact(
+                    name="model-{}".format(self.run.id),
+                    type="model",
+                    metadata=metadata)
+
+                ema_artifact.add_file(model_path + ".pdema", name="model_ema")
+                model_artifact.add_file(model_path + ".pdparams", name="model")
+
+                self.run.log_artifact(ema_artifact, aliases=tags)
+                self.run.log_artfact(model_artifact, aliases=tags)
+            else:
+                model_artifact = self.wandb.Artifact(
+                    name="model-{}".format(self.run.id),
+                    type="model",
+                    metadata=metadata)
+                model_artifact.add_file(model_path + ".pdparams", name="model")
+                self.run.log_artifact(model_artifact, aliases=tags)
+
+    def on_step_end(self, status):
+
+        mode = status['mode']
+        if dist.get_world_size() < 2 or dist.get_rank() == 0:
+            if mode == 'train':
+                training_status = status['training_staus'].get()
+                for k, v in training_status.items():
+                    training_status[k] = float(v)
+                metrics = {"train/" + k: v for k, v in training_status.items()}
+                self.run.log(metrics)
+
+    def on_epoch_end(self, status):
+        mode = status['mode']
+        epoch_id = status['epoch_id']
+        save_name = None
+        if dist.get_world_size() < 2 or dist.get_rank() == 0:
+            if mode == 'train':
+                end_epoch = self.model.cfg.epoch
+                if (
+                        epoch_id + 1
+                ) % self.model.cfg.snapshot_epoch == 0 or epoch_id == end_epoch - 1:
+                    save_name = str(
+                        epoch_id
+                    ) if epoch_id != end_epoch - 1 else "model_final"
+                    tags = ["latest", "epoch_{}".format(epoch_id)]
+                    self.save_model(
+                        self.model.optimizer,
+                        self.save_dir,
+                        save_name,
+                        epoch_id + 1,
+                        self.model.use_ema,
+                        tags=tags)
+            if mode == 'eval':
+                merged_dict = {}
+                for metric in self.model._metrics:
+                    for key, map_value in metric.get_results().items():
+                        merged_dict["eval/{}-mAP".format(key)] = map_value[0]
+                merged_dict["epoch"] = status["epoch_id"]
+                self.run.log(merged_dict)
+
+                if 'save_best_model' in status and status['save_best_model']:
+                    for metric in self.model._metrics:
+                        map_res = metric.get_results()
+                        if 'bbox' in map_res:
+                            key = 'bbox'
+                        elif 'keypoint' in map_res:
+                            key = 'keypoint'
+                        else:
+                            key = 'mask'
+                        if key not in map_res:
+                            logger.warning("Evaluation results empty, this may be due to " \
+                                        "training iterations being too few or not " \
+                                        "loading the correct weights.")
+                            return
+                        if map_res[key][0] >= self.best_ap:
+                            self.best_ap = map_res[key][0]
+                            save_name = 'best_model'
+                            tags = ["best", "epoch_{}".format(epoch_id)]
+
+                            self.save_model(
+                                self.model.optimizer,
+                                self.save_dir,
+                                save_name,
+                                last_epoch=epoch_id + 1,
+                                ema_model=self.model.use_ema,
+                                ap=self.best_ap,
+                                tags=tags)
+
+    def on_train_end(self, status):
+        self.run.finish()
+
+
 class SniperProposalsGenerator(Callback):
     def __init__(self, model):
         super(SniperProposalsGenerator, self).__init__(model)

+ 25 - 6
paddlex/ppdet/engine/export_utils.py

@@ -41,16 +41,18 @@ TRT_MIN_SUBGRAPH = {
     'HigherHRNet': 3,
     'HRNet': 3,
     'DeepSORT': 3,
+    'ByteTrack': 10,
     'JDE': 10,
     'FairMOT': 5,
     'GFL': 16,
     'PicoDet': 3,
     'CenterNet': 5,
     'TOOD': 5,
+    'YOLOX': 8,
 }
 
 KEYPOINT_ARCH = ['HigherHRNet', 'TopDownHRNet']
-MOT_ARCH = ['DeepSORT', 'JDE', 'FairMOT']
+MOT_ARCH = ['DeepSORT', 'JDE', 'FairMOT', 'ByteTrack']
 
 
 def _prune_input_spec(input_spec, program, targets):
@@ -120,12 +122,18 @@ def _dump_infer_config(config, path, image_shape, model):
     setup_orderdict()
     use_dynamic_shape = True if image_shape[2] == -1 else False
     infer_cfg = OrderedDict({
-        'mode': 'fluid',
+        'mode': 'paddle',
         'draw_threshold': 0.5,
         'metric': config['metric'],
         'use_dynamic_shape': use_dynamic_shape
     })
+    export_onnx = config.get('export_onnx', False)
+
     infer_arch = config['architecture']
+    if 'RCNN' in infer_arch and export_onnx:
+        logger.warning(
+            "Exporting RCNN model to ONNX only support batch_size = 1")
+        infer_cfg['export_onnx'] = True
 
     if infer_arch in MOT_ARCH:
         if infer_arch == 'DeepSORT':
@@ -140,6 +148,12 @@ def _dump_infer_config(config, path, image_shape, model):
             infer_cfg['min_subgraph_size'] = min_subgraph_size
             arch_state = True
             break
+
+    if infer_arch == 'YOLOX':
+        infer_cfg['arch'] = infer_arch
+        infer_cfg['min_subgraph_size'] = TRT_MIN_SUBGRAPH[infer_arch]
+        arch_state = True
+
     if not arch_state:
         logger.error(
             'Architecture: {} is not supported for exporting model now.\n'.
@@ -165,12 +179,17 @@ def _dump_infer_config(config, path, image_shape, model):
         reader_cfg, dataset_cfg, config['metric'], label_arch, image_shape[1:])
 
     if infer_arch == 'PicoDet':
-        infer_cfg['NMS'] = config['PicoHead']['nms']
+        if hasattr(config, 'export') and config['export'].get(
+                'post_process',
+                False) and not config['export'].get('benchmark', False):
+            infer_cfg['arch'] = 'GFL'
+        head_name = 'PicoHeadV2' if config['PicoHeadV2'] else 'PicoHead'
+        infer_cfg['NMS'] = config[head_name]['nms']
         # In order to speed up the prediction, the threshold of nms
         # is adjusted here, which can be changed in infer_cfg.yml
-        config['PicoHead']['nms']["score_threshold"] = 0.3
-        config['PicoHead']['nms']["nms_threshold"] = 0.5
-        infer_cfg['fpn_stride'] = config['PicoHead']['fpn_stride']
+        config[head_name]['nms']["score_threshold"] = 0.3
+        config[head_name]['nms']["nms_threshold"] = 0.5
+        infer_cfg['fpn_stride'] = config[head_name]['fpn_stride']
 
     yaml.dump(infer_cfg, open(path, 'w'))
     logger.info("Export inference config file to {}".format(

+ 141 - 73
paddlex/ppdet/engine/tracker.py

@@ -17,27 +17,32 @@ from __future__ import division
 from __future__ import print_function
 
 import os
-import cv2
 import glob
 import re
 import paddle
 import numpy as np
-import os.path as osp
+from tqdm import tqdm
 from collections import defaultdict
 
 from paddlex.ppdet.core.workspace import create
 from paddlex.ppdet.utils.checkpoint import load_weight, load_pretrain_weight
 from paddlex.ppdet.modeling.mot.utils import Detection, get_crops, scale_coords, clip_box
 from paddlex.ppdet.modeling.mot.utils import MOTTimer, load_det_results, write_mot_results, save_vis_results
+from paddlex.ppdet.modeling.mot.tracker import JDETracker, DeepSORTTracker
 
-from paddlex.ppdet.metrics import Metric, MOTMetric, KITTIMOTMetric
-from paddlex.ppdet.metrics import MCMOTMetric
+from paddlex.ppdet.metrics import Metric, MOTMetric, KITTIMOTMetric, MCMOTMetric
+import paddlex.ppdet.utils.stats as stats
 
 from .callbacks import Callback, ComposeCallback
 
 from paddlex.ppdet.utils.logger import setup_logger
 logger = setup_logger(__name__)
 
+MOT_ARCH = ['DeepSORT', 'JDE', 'FairMOT', 'ByteTrack']
+MOT_ARCH_JDE = ['JDE', 'FairMOT']
+MOT_ARCH_SDE = ['DeepSORT', 'ByteTrack']
+MOT_DATA_TYPE = ['mot', 'mcmot', 'kitti']
+
 __all__ = ['Tracker']
 
 
@@ -108,11 +113,15 @@ class Tracker(object):
         load_weight(self.model, weights, self.optimizer)
 
     def load_weights_sde(self, det_weights, reid_weights):
-        if self.model.detector:
+        with_detector = self.model.detector is not None
+        with_reid = self.model.reid is not None
+
+        if with_detector:
             load_weight(self.model.detector, det_weights)
-            load_weight(self.model.reid, reid_weights)
+            if with_reid:
+                load_weight(self.model.reid, reid_weights)
         else:
-            load_weight(self.model.reid, reid_weights, self.optimizer)
+            load_weight(self.model.reid, reid_weights)
 
     def _eval_seq_jde(self,
                       dataloader,
@@ -131,11 +140,8 @@ class Tracker(object):
         self.model.eval()
         results = defaultdict(list)  # support single class and multi classes
 
-        for step_id, data in enumerate(dataloader):
+        for step_id, data in enumerate(tqdm(dataloader)):
             self.status['step_id'] = step_id
-            if frame_id % 40 == 0:
-                logger.info('Processing frame {} ({:.2f} fps)'.format(
-                    frame_id, 1. / max(1e-5, timer.average_time)))
             # forward
             timer.tic()
             pred_dets, pred_embs = self.model(data)
@@ -184,24 +190,23 @@ class Tracker(object):
         if save_dir:
             if not os.path.exists(save_dir): os.makedirs(save_dir)
         use_detector = False if not self.model.detector else True
+        use_reid = False if not self.model.reid else True
 
         timer = MOTTimer()
         results = defaultdict(list)
         frame_id = 0
         self.status['mode'] = 'track'
         self.model.eval()
-        self.model.reid.eval()
+        if use_reid:
+            self.model.reid.eval()
         if not use_detector:
             dets_list = load_det_results(det_file, len(dataloader))
             logger.info('Finish loading detection results file {}.'.format(
                 det_file))
 
-        for step_id, data in enumerate(dataloader):
+        tracker = self.model.tracker
+        for step_id, data in enumerate(tqdm(dataloader)):
             self.status['step_id'] = step_id
-            if frame_id % 40 == 0:
-                logger.info('Processing frame {} ({:.2f} fps)'.format(
-                    frame_id, 1. / max(1e-5, timer.average_time)))
-
             ori_image = data['ori_image']  # [bs, H, W, 3]
             ori_image_shape = data['ori_image'].shape[1:3]
             # ori_image_shape: [H, W]
@@ -240,7 +245,7 @@ class Tracker(object):
                 outs['bbox'] = outs['bbox'].numpy()
                 outs['bbox_num'] = outs['bbox_num'].numpy()
 
-                if outs['bbox_num'] > 0 and empty_detections == False:
+                if len(outs['bbox']) > 0 and empty_detections == False:
                     # detector outputs: pred_cls_ids, pred_scores, pred_bboxes
                     pred_cls_ids = outs['bbox'][:, 0:1]
                     pred_scores = outs['bbox'][:, 1:2]
@@ -256,6 +261,8 @@ class Tracker(object):
                                                    scale_factor)
                     else:
                         pred_bboxes = outs['bbox'][:, 2:]
+                    pred_dets_old = np.concatenate(
+                        (pred_cls_ids, pred_scores, pred_bboxes), axis=1)
                 else:
                     logger.warning(
                         'Frame {} has not detected object, try to modify score threshold.'.
@@ -281,52 +288,82 @@ class Tracker(object):
                 # thus will not inference reid model
                 continue
 
-            pred_scores = pred_scores[keep_idx[0]]
             pred_cls_ids = pred_cls_ids[keep_idx[0]]
-            pred_tlwhs = np.concatenate(
-                (pred_xyxys[:, 0:2],
-                 pred_xyxys[:, 2:4] - pred_xyxys[:, 0:2] + 1),
-                axis=1)
+            pred_scores = pred_scores[keep_idx[0]]
             pred_dets = np.concatenate(
-                (pred_tlwhs, pred_scores, pred_cls_ids), axis=1)
-
-            tracker = self.model.tracker
-            crops = get_crops(
-                pred_xyxys,
-                ori_image,
-                w=tracker.input_size[0],
-                h=tracker.input_size[1])
-            crops = paddle.to_tensor(crops)
-
-            data.update({'crops': crops})
-            pred_embs = self.model(data).numpy()
-
-            tracker.predict()
-            online_targets = tracker.update(pred_dets, pred_embs)
-
-            online_tlwhs, online_scores, online_ids = [], [], []
-            for t in online_targets:
-                if not t.is_confirmed() or t.time_since_update > 1:
-                    continue
-                tlwh = t.to_tlwh()
-                tscore = t.score
-                tid = t.track_id
-                if tscore < draw_threshold: continue
-                if tlwh[2] * tlwh[3] <= tracker.min_box_area: continue
-                if tracker.vertical_ratio > 0 and tlwh[2] / tlwh[
-                        3] > tracker.vertical_ratio:
-                    continue
-                online_tlwhs.append(tlwh)
-                online_scores.append(tscore)
-                online_ids.append(tid)
-            timer.toc()
+                (pred_cls_ids, pred_scores, pred_xyxys), axis=1)
+
+            if use_reid:
+                crops = get_crops(
+                    pred_xyxys,
+                    ori_image,
+                    w=tracker.input_size[0],
+                    h=tracker.input_size[1])
+                crops = paddle.to_tensor(crops)
+
+                data.update({'crops': crops})
+                pred_embs = self.model(data)['embeddings'].numpy()
+            else:
+                pred_embs = None
+
+            if isinstance(tracker, DeepSORTTracker):
+                online_tlwhs, online_scores, online_ids = [], [], []
+                tracker.predict()
+                online_targets = tracker.update(pred_dets, pred_embs)
+                for t in online_targets:
+                    if not t.is_confirmed() or t.time_since_update > 1:
+                        continue
+                    tlwh = t.to_tlwh()
+                    tscore = t.score
+                    tid = t.track_id
+                    if tscore < draw_threshold: continue
+                    if tlwh[2] * tlwh[3] <= tracker.min_box_area: continue
+                    if tracker.vertical_ratio > 0 and tlwh[2] / tlwh[
+                            3] > tracker.vertical_ratio:
+                        continue
+                    online_tlwhs.append(tlwh)
+                    online_scores.append(tscore)
+                    online_ids.append(tid)
+                timer.toc()
+
+                # save results
+                results[0].append(
+                    (frame_id + 1, online_tlwhs, online_scores, online_ids))
+                save_vis_results(data, frame_id, online_ids, online_tlwhs,
+                                 online_scores, timer.average_time, show_image,
+                                 save_dir, self.cfg.num_classes)
+
+            elif isinstance(tracker, JDETracker):
+                # trick hyperparams only used for MOTChallenge (MOT17, MOT20) Test-set
+                tracker.track_buffer, tracker.conf_thres = get_trick_hyperparams(
+                    seq_name, tracker.track_buffer, tracker.conf_thres)
+
+                online_targets_dict = tracker.update(pred_dets_old, pred_embs)
+                online_tlwhs = defaultdict(list)
+                online_scores = defaultdict(list)
+                online_ids = defaultdict(list)
+                for cls_id in range(self.cfg.num_classes):
+                    online_targets = online_targets_dict[cls_id]
+                    for t in online_targets:
+                        tlwh = t.tlwh
+                        tid = t.track_id
+                        tscore = t.score
+                        if tlwh[2] * tlwh[3] <= tracker.min_box_area: continue
+                        if tracker.vertical_ratio > 0 and tlwh[2] / tlwh[
+                                3] > tracker.vertical_ratio:
+                            continue
+                        online_tlwhs[cls_id].append(tlwh)
+                        online_ids[cls_id].append(tid)
+                        online_scores[cls_id].append(tscore)
+                    # save results
+                    results[cls_id].append(
+                        (frame_id + 1, online_tlwhs[cls_id],
+                         online_scores[cls_id], online_ids[cls_id]))
+                timer.toc()
+                save_vis_results(data, frame_id, online_ids, online_tlwhs,
+                                 online_scores, timer.average_time, show_image,
+                                 save_dir, self.cfg.num_classes)
 
-            # save results
-            results[0].append(
-                (frame_id + 1, online_tlwhs, online_scores, online_ids))
-            save_vis_results(data, frame_id, online_ids, online_tlwhs,
-                             online_scores, timer.average_time, show_image,
-                             save_dir, self.cfg.num_classes)
             frame_id += 1
 
         return results, frame_id, timer.average_time, timer.calls
@@ -345,10 +382,10 @@ class Tracker(object):
         if not os.path.exists(output_dir): os.makedirs(output_dir)
         result_root = os.path.join(output_dir, 'mot_results')
         if not os.path.exists(result_root): os.makedirs(result_root)
-        assert data_type in ['mot', 'mcmot', 'kitti'], \
+        assert data_type in MOT_DATA_TYPE, \
             "data_type should be 'mot', 'mcmot' or 'kitti'"
-        assert model_type in ['JDE', 'DeepSORT', 'FairMOT'], \
-            "model_type should be 'JDE', 'DeepSORT' or 'FairMOT'"
+        assert model_type in MOT_ARCH, \
+            "model_type should be 'JDE', 'DeepSORT', 'FairMOT' or 'ByteTrack'"
 
         # run tracking
         n_frame = 0
@@ -372,7 +409,7 @@ class Tracker(object):
             save_dir = os.path.join(
                 output_dir, 'mot_outputs',
                 seq) if save_images or save_videos else None
-            logger.info('start seq: {}'.format(seq))
+            logger.info('Evaluate seq: {}'.format(seq))
 
             self.dataset.set_images(self.get_infer_images(infer_dir))
             dataloader = create('EvalMOTReader')(self.dataset, 0)
@@ -380,13 +417,13 @@ class Tracker(object):
             result_filename = os.path.join(result_root, '{}.txt'.format(seq))
 
             with paddle.no_grad():
-                if model_type in ['JDE', 'FairMOT']:
+                if model_type in MOT_ARCH_JDE:
                     results, nf, ta, tc = self._eval_seq_jde(
                         dataloader,
                         save_dir=save_dir,
                         show_image=show_image,
                         frame_rate=frame_rate)
-                elif model_type in ['DeepSORT']:
+                elif model_type in MOT_ARCH_SDE:
                     results, nf, ta, tc = self._eval_seq_sde(
                         dataloader,
                         save_dir=save_dir,
@@ -413,7 +450,6 @@ class Tracker(object):
                 os.system(cmd_str)
                 logger.info('Save video in {}.'.format(output_video_path))
 
-            logger.info('Evaluate seq: {}'.format(seq))
             # update metrics
             for metric in self._metrics:
                 metric.update(data_root, seq, data_type, result_root,
@@ -472,10 +508,10 @@ class Tracker(object):
         if not os.path.exists(output_dir): os.makedirs(output_dir)
         result_root = os.path.join(output_dir, 'mot_results')
         if not os.path.exists(result_root): os.makedirs(result_root)
-        assert data_type in ['mot', 'mcmot', 'kitti'], \
+        assert data_type in MOT_DATA_TYPE, \
             "data_type should be 'mot', 'mcmot' or 'kitti'"
-        assert model_type in ['JDE', 'DeepSORT', 'FairMOT'], \
-            "model_type should be 'JDE', 'DeepSORT' or 'FairMOT'"
+        assert model_type in MOT_ARCH, \
+            "model_type should be 'JDE', 'DeepSORT', 'FairMOT' or 'ByteTrack'"
 
         # run tracking
         if video_file:
@@ -505,14 +541,14 @@ class Tracker(object):
             frame_rate = self.dataset.frame_rate
 
         with paddle.no_grad():
-            if model_type in ['JDE', 'FairMOT']:
+            if model_type in MOT_ARCH_JDE:
                 results, nf, ta, tc = self._eval_seq_jde(
                     dataloader,
                     save_dir=save_dir,
                     show_image=show_image,
                     frame_rate=frame_rate,
                     draw_threshold=draw_threshold)
-            elif model_type in ['DeepSORT']:
+            elif model_type in MOT_ARCH_SDE:
                 results, nf, ta, tc = self._eval_seq_sde(
                     dataloader,
                     save_dir=save_dir,
@@ -536,3 +572,35 @@ class Tracker(object):
 
         write_mot_results(result_filename, results, data_type,
                           self.cfg.num_classes)
+
+
+def get_trick_hyperparams(video_name, ori_buffer, ori_thresh):
+    if video_name[:3] != 'MOT':
+        # only used for MOTChallenge (MOT17, MOT20) Test-set
+        return ori_buffer, ori_thresh
+
+    video_name = video_name[:8]
+    if 'MOT17-05' in video_name:
+        track_buffer = 14
+    elif 'MOT17-13' in video_name:
+        track_buffer = 25
+    else:
+        track_buffer = ori_buffer
+
+    if 'MOT17-01' in video_name:
+        track_thresh = 0.65
+    elif 'MOT17-06' in video_name:
+        track_thresh = 0.65
+    elif 'MOT17-12' in video_name:
+        track_thresh = 0.7
+    elif 'MOT17-14' in video_name:
+        track_thresh = 0.67
+    else:
+        track_thresh = ori_thresh
+
+    if 'MOT20-06' in video_name or 'MOT20-08' in video_name:
+        track_thresh = 0.3
+    else:
+        track_thresh = ori_thresh
+
+    return track_buffer, ori_thresh

+ 194 - 64
paddlex/ppdet/engine/trainer.py

@@ -20,30 +20,33 @@ import os
 import sys
 import copy
 import time
+from tqdm import tqdm
 
 import numpy as np
 import typing
-from PIL import Image, ImageOps
+from PIL import Image, ImageOps, ImageFile
+
+ImageFile.LOAD_TRUNCATED_IMAGES = True
 
 import paddle
+import paddle.nn as nn
 import paddle.distributed as dist
 from paddle.distributed import fleet
-from paddle import amp
 from paddle.static import InputSpec
 from paddlex.ppdet.optimizer import ModelEMA
 
 from paddlex.ppdet.core.workspace import create
-from paddlex.ppdet.modeling.architectures.meta_arch import BaseArch
 from paddlex.ppdet.utils.checkpoint import load_weight, load_pretrain_weight
 from paddlex.ppdet.utils.visualizer import visualize_results, save_result
 from paddlex.ppdet.metrics import Metric, COCOMetric, VOCMetric, WiderFaceMetric, get_infer_results, KeyPointTopDownCOCOEval, KeyPointTopDownMPIIEval
 from paddlex.ppdet.metrics import RBoxMetric, JDEDetMetric, SNIPERCOCOMetric
 from paddlex.ppdet.data.source.sniper_coco import SniperCOCODataSet
 from paddlex.ppdet.data.source.category import get_categories
-from paddlex.ppdet.utils import stats
+import paddlex.ppdet.utils.stats as stats
+from paddlex.ppdet.utils.fuse_utils import fuse_conv_bn
 from paddlex.ppdet.utils import profiler
 
-from .callbacks import Callback, ComposeCallback, LogPrinter, Checkpointer, WiferFaceEval, VisualDLWriter, SniperProposalsGenerator
+from .callbacks import Callback, ComposeCallback, LogPrinter, Checkpointer, WiferFaceEval, VisualDLWriter, SniperProposalsGenerator, WandbCallback
 from .export_utils import _dump_infer_config, _prune_input_spec
 
 from paddlex.ppdet.utils.logger import setup_logger
@@ -51,7 +54,7 @@ logger = setup_logger('ppdet.engine')
 
 __all__ = ['Trainer']
 
-MOT_ARCH = ['DeepSORT', 'JDE', 'FairMOT']
+MOT_ARCH = ['DeepSORT', 'JDE', 'FairMOT', 'ByteTrack']
 
 
 class Trainer(object):
@@ -64,17 +67,24 @@ class Trainer(object):
         self.is_loaded_weights = False
 
         # build data loader
+        capital_mode = self.mode.capitalize()
         if cfg.architecture in MOT_ARCH and self.mode in ['eval', 'test']:
-            self.dataset = cfg['{}MOTDataset'.format(self.mode.capitalize())]
+            self.dataset = self.cfg['{}MOTDataset'.format(
+                capital_mode)] = create('{}MOTDataset'.format(capital_mode))()
         else:
-            self.dataset = cfg['{}Dataset'.format(self.mode.capitalize())]
+            self.dataset = self.cfg['{}Dataset'.format(capital_mode)] = create(
+                '{}Dataset'.format(capital_mode))()
 
         if cfg.architecture == 'DeepSORT' and self.mode == 'train':
             logger.error('DeepSORT has no need of training on mot dataset.')
             sys.exit(1)
 
+        if cfg.architecture == 'FairMOT' and self.mode == 'eval':
+            images = self.parse_mot_images(cfg)
+            self.dataset.set_images(images)
+
         if self.mode == 'train':
-            self.loader = create('{}Reader'.format(self.mode.capitalize()))(
+            self.loader = create('{}Reader'.format(capital_mode))(
                 self.dataset, cfg.worker_num)
 
         if cfg.architecture == 'JDE' and self.mode == 'train':
@@ -94,30 +104,51 @@ class Trainer(object):
             self.model = self.cfg.model
             self.is_loaded_weights = True
 
+        if cfg.architecture == 'YOLOX':
+            for k, m in self.model.named_sublayers():
+                if isinstance(m, nn.BatchNorm2D):
+                    m._epsilon = 1e-3  # for amp(fp16)
+                    m._momentum = 0.97  # 0.03 in pytorch
+
         #normalize params for deploy
-        self.model.load_meanstd(cfg['TestReader']['sample_transforms'])
+        if 'slim' in cfg and cfg['slim_type'] == 'OFA':
+            self.model.model.load_meanstd(cfg['TestReader'][
+                'sample_transforms'])
+        elif 'slim' in cfg and cfg['slim_type'] == 'Distill':
+            self.model.student_model.load_meanstd(cfg['TestReader'][
+                'sample_transforms'])
+        elif 'slim' in cfg and cfg[
+                'slim_type'] == 'DistillPrune' and self.mode == 'train':
+            self.model.student_model.load_meanstd(cfg['TestReader'][
+                'sample_transforms'])
+        else:
+            self.model.load_meanstd(cfg['TestReader']['sample_transforms'])
 
         self.use_ema = ('use_ema' in cfg and cfg['use_ema'])
         if self.use_ema:
             ema_decay = self.cfg.get('ema_decay', 0.9998)
             cycle_epoch = self.cfg.get('cycle_epoch', -1)
+            ema_decay_type = self.cfg.get('ema_decay_type', 'threshold')
             self.ema = ModelEMA(
                 self.model,
                 decay=ema_decay,
-                use_thres_step=True,
+                ema_decay_type=ema_decay_type,
                 cycle_epoch=cycle_epoch)
 
         # EvalDataset build with BatchSampler to evaluate in single device
         # TODO: multi-device evaluate
         if self.mode == 'eval':
-            self._eval_batch_sampler = paddle.io.BatchSampler(
-                self.dataset, batch_size=self.cfg.EvalReader['batch_size'])
-            reader_name = '{}Reader'.format(self.mode.capitalize())
-            # If metric is VOC, need to be set collate_batch=False.
-            if cfg.metric == 'VOC':
-                cfg[reader_name]['collate_batch'] = False
-            self.loader = create(reader_name)(self.dataset, cfg.worker_num,
-                                              self._eval_batch_sampler)
+            if cfg.architecture == 'FairMOT':
+                self.loader = create('EvalMOTReader')(self.dataset, 0)
+            else:
+                self._eval_batch_sampler = paddle.io.BatchSampler(
+                    self.dataset, batch_size=self.cfg.EvalReader['batch_size'])
+                reader_name = '{}Reader'.format(self.mode.capitalize())
+                # If metric is VOC, need to be set collate_batch=False.
+                if cfg.metric == 'VOC':
+                    cfg[reader_name]['collate_batch'] = False
+                self.loader = create(reader_name)(self.dataset, cfg.worker_num,
+                                                  self._eval_batch_sampler)
         # TestDataset build after user set images, skip loader creation here
 
         # build optimizer in train mode
@@ -126,9 +157,10 @@ class Trainer(object):
             self.lr = create('LearningRate')(steps_per_epoch)
             self.optimizer = create('OptimizerBuilder')(self.lr, self.model)
 
-        if self.cfg.get('unstructured_prune'):
-            self.pruner = create('UnstructuredPruner')(self.model,
-                                                       steps_per_epoch)
+            # Unstructured pruner is only enabled in the train mode.
+            if self.cfg.get('unstructured_prune'):
+                self.pruner = create('UnstructuredPruner')(self.model,
+                                                           steps_per_epoch)
 
         self._nranks = dist.get_world_size()
         self._local_rank = dist.get_rank()
@@ -152,6 +184,8 @@ class Trainer(object):
                 self._callbacks.append(VisualDLWriter(self))
             if self.cfg.get('save_proposals', False):
                 self._callbacks.append(SniperProposalsGenerator(self))
+            if self.cfg.get('use_wandb', False) or 'wandb' in self.cfg:
+                self._callbacks.append(WandbCallback(self))
             self._compose_callback = ComposeCallback(self._callbacks)
         elif self.mode == 'eval':
             self._callbacks = [LogPrinter(self)]
@@ -172,7 +206,7 @@ class Trainer(object):
         classwise = self.cfg['classwise'] if 'classwise' in self.cfg else False
         if self.cfg.metric == 'COCO' or self.cfg.metric == "SNIPERCOCO":
             # TODO: bias should be unified
-            bias = self.cfg['bias'] if 'bias' in self.cfg else 0
+            bias = 1 if self.cfg.get('bias', False) else 0
             output_eval = self.cfg['output_eval'] \
                 if 'output_eval' in self.cfg else None
             save_prediction_only = self.cfg.get('save_prediction_only', False)
@@ -184,13 +218,14 @@ class Trainer(object):
 
             # when do validation in train, annotation file should be get from
             # EvalReader instead of self.dataset(which is TrainReader)
-            anno_file = self.dataset.get_anno()
-            dataset = self.dataset
             if self.mode == 'train' and validate:
                 eval_dataset = self.cfg['EvalDataset']
                 eval_dataset.check_or_download_dataset()
                 anno_file = eval_dataset.get_anno()
                 dataset = eval_dataset
+            else:
+                dataset = self.dataset
+                anno_file = dataset.get_anno()
 
             IouType = self.cfg['IouType'] if 'IouType' in self.cfg else 'bbox'
             if self.cfg.metric == "COCO":
@@ -334,20 +369,32 @@ class Trainer(object):
             self.start_epoch = load_weight(self.model.student_model, weights,
                                            self.optimizer)
         else:
-            self.start_epoch = load_weight(self.model, weights, self.optimizer)
+            self.start_epoch = load_weight(self.model, weights, self.optimizer,
+                                           self.ema if self.use_ema else None)
         logger.debug("Resume weights of epoch {}".format(self.start_epoch))
 
     def train(self, validate=False):
         assert self.mode == 'train', "Model not in 'train' mode"
         Init_mark = False
-
-        sync_bn = (
-            getattr(self.cfg, 'norm_type', None) in [None, 'sync_bn'] and
-            self.cfg.use_gpu and self._nranks > 1)
-        if sync_bn:
-            self.model = BaseArch.convert_sync_batchnorm(self.model)
+        if validate:
+            self.cfg['EvalDataset'] = self.cfg.EvalDataset = create(
+                "EvalDataset")()
 
         model = self.model
+        sync_bn = (getattr(self.cfg, 'norm_type', None) == 'sync_bn' and
+                   self.cfg.use_gpu and self._nranks > 1)
+        if sync_bn:
+            model = paddle.nn.SyncBatchNorm.convert_sync_batchnorm(model)
+
+        # enabel auto mixed precision mode
+        use_amp = self.cfg.get('amp', False)
+        amp_level = self.cfg.get('amp_level', 'O1')
+        if use_amp:
+            scaler = paddle.amp.GradScaler(
+                enable=self.cfg.use_gpu or self.cfg.use_npu,
+                init_loss_scaling=self.cfg.get('init_loss_scaling', 1024))
+            model = paddle.amp.decorate(models=model, level=amp_level)
+        # get distributed model
         if self.cfg.get('fleet', False):
             model = fleet.distributed_model(model)
             self.optimizer = fleet.distributed_optimizer(self.optimizer)
@@ -355,12 +402,7 @@ class Trainer(object):
             find_unused_parameters = self.cfg[
                 'find_unused_parameters'] if 'find_unused_parameters' in self.cfg else False
             model = paddle.DataParallel(
-                self.model, find_unused_parameters=find_unused_parameters)
-
-        # initial fp16
-        if self.cfg.get('fp16', False):
-            scaler = amp.GradScaler(
-                enable=self.cfg.use_gpu, init_loss_scaling=1024)
+                model, find_unused_parameters=find_unused_parameters)
 
         self.status.update({
             'epoch_id': self.start_epoch,
@@ -396,12 +438,12 @@ class Trainer(object):
                 self._compose_callback.on_step_begin(self.status)
                 data['epoch_id'] = epoch_id
 
-                if self.cfg.get('fp16', False):
-                    with amp.auto_cast(enable=self.cfg.use_gpu):
+                if use_amp:
+                    with paddle.amp.auto_cast(
+                            enable=self.cfg.use_gpu, level=amp_level):
                         # model forward
                         outputs = model(data)
                         loss = outputs['loss']
-
                     # model backward
                     scaled_loss = scaler.scale(loss)
                     scaled_loss.backward()
@@ -427,21 +469,23 @@ class Trainer(object):
                 self.status['batch_time'].update(time.time() - iter_tic)
                 self._compose_callback.on_step_end(self.status)
                 if self.use_ema:
-                    self.ema.update(self.model)
+                    self.ema.update()
                 iter_tic = time.time()
 
-            # apply ema weight on model
-            if self.use_ema:
-                weight = copy.deepcopy(self.model.state_dict())
-                self.model.set_dict(self.ema.apply())
             if self.cfg.get('unstructured_prune'):
                 self.pruner.update_params()
 
+            is_snapshot = (self._nranks < 2 or self._local_rank == 0) \
+                       and ((epoch_id + 1) % self.cfg.snapshot_epoch == 0 or epoch_id == self.end_epoch - 1)
+            if is_snapshot and self.use_ema:
+                # apply ema weight on model
+                weight = copy.deepcopy(self.model.state_dict())
+                self.model.set_dict(self.ema.apply())
+                self.status['weight'] = weight
+
             self._compose_callback.on_epoch_end(self.status)
 
-            if validate and (self._nranks < 2 or self._local_rank == 0) \
-                    and ((epoch_id + 1) % self.cfg.snapshot_epoch == 0 \
-                             or epoch_id == self.end_epoch - 1):
+            if validate and is_snapshot:
                 if not hasattr(self, '_eval_loader'):
                     # build evaluation dataset and loader
                     self._eval_dataset = self.cfg.EvalDataset
@@ -462,13 +506,15 @@ class Trainer(object):
                     Init_mark = True
                     self._init_metrics(validate=validate)
                     self._reset_metrics()
+
                 with paddle.no_grad():
                     self.status['save_best_model'] = True
                     self._eval_with_loader(self._eval_loader)
 
-            # restore origin weight on model
-            if self.use_ema:
+            if is_snapshot and self.use_ema:
+                # reset original weight
                 self.model.set_dict(weight)
+                self.status.pop('weight')
 
         self._compose_callback.on_train_end(self.status)
 
@@ -518,10 +564,45 @@ class Trainer(object):
                 images,
                 draw_threshold=0.5,
                 output_dir='output',
-                save_txt=False):
+                save_results=False):
         self.dataset.set_images(images)
         loader = create('TestReader')(self.dataset, 0)
 
+        def setup_metrics_for_loader():
+            # mem
+            metrics = copy.deepcopy(self._metrics)
+            mode = self.mode
+            save_prediction_only = self.cfg[
+                'save_prediction_only'] if 'save_prediction_only' in self.cfg else None
+            output_eval = self.cfg[
+                'output_eval'] if 'output_eval' in self.cfg else None
+
+            # modify
+            self.mode = '_test'
+            self.cfg['save_prediction_only'] = True
+            self.cfg['output_eval'] = output_dir
+            self._init_metrics()
+
+            # restore
+            self.mode = mode
+            self.cfg.pop('save_prediction_only')
+            if save_prediction_only is not None:
+                self.cfg['save_prediction_only'] = save_prediction_only
+
+            self.cfg.pop('output_eval')
+            if output_eval is not None:
+                self.cfg['output_eval'] = output_eval
+
+            _metrics = copy.deepcopy(self._metrics)
+            self._metrics = metrics
+
+            return _metrics
+
+        if save_results:
+            metrics = setup_metrics_for_loader()
+        else:
+            metrics = []
+
         imid2path = self.dataset.get_imid2path()
 
         anno_file = self.dataset.get_anno()
@@ -535,11 +616,14 @@ class Trainer(object):
             flops_loader = create('TestReader')(self.dataset, 0)
             self._flops(flops_loader)
         results = []
-        for step_id, data in enumerate(loader):
+        for step_id, data in enumerate(tqdm(loader)):
             self.status['step_id'] = step_id
             # forward
             outs = self.model(data)
 
+            for _m in metrics:
+                _m.update(data, outs)
+
             for key in ['im_shape', 'scale_factor', 'im_id']:
                 if isinstance(data, typing.Sequence):
                     outs[key] = data[0][key]
@@ -549,11 +633,16 @@ class Trainer(object):
                 if hasattr(value, 'numpy'):
                     outs[key] = value.numpy()
             results.append(outs)
+
         # sniper
         if type(self.dataset) == SniperCOCODataSet:
             results = self.dataset.anno_cropper.aggregate_chips_detections(
                 results)
 
+        for _m in metrics:
+            _m.accumulate()
+            _m.reset()
+
         for outs in results:
             batch_res = get_infer_results(outs, clsid2catid)
             bbox_num = outs['bbox_num']
@@ -585,15 +674,7 @@ class Trainer(object):
                 logger.info("Detection bbox results save in {}".format(
                     save_name))
                 image.save(save_name, quality=95)
-                if save_txt:
-                    save_path = os.path.splitext(save_name)[0] + '.txt'
-                    results = {}
-                    results["im_id"] = im_id
-                    if bbox_res:
-                        results["bbox_res"] = bbox_res
-                    if keypoint_res:
-                        results["keypoint_res"] = keypoint_res
-                    save_result(save_path, results, catid2name, draw_threshold)
+
                 start = end
 
     def _get_save_image_name(self, output_dir, image_path):
@@ -629,9 +710,27 @@ class Trainer(object):
 
         if hasattr(self.model, 'deploy'):
             self.model.deploy = True
+
+        if 'slim' not in self.cfg:
+            for layer in self.model.sublayers():
+                if hasattr(layer, 'convert_to_deploy'):
+                    layer.convert_to_deploy()
+
+        export_post_process = self.cfg['export'].get(
+            'post_process', False) if hasattr(self.cfg, 'export') else True
+        export_nms = self.cfg['export'].get('nms', False) if hasattr(
+            self.cfg, 'export') else True
+        export_benchmark = self.cfg['export'].get(
+            'benchmark', False) if hasattr(self.cfg, 'export') else False
         if hasattr(self.model, 'fuse_norm'):
             self.model.fuse_norm = self.cfg['TestReader'].get('fuse_normalize',
                                                               False)
+        if hasattr(self.model, 'export_post_process'):
+            self.model.export_post_process = export_post_process if not export_benchmark else False
+        if hasattr(self.model, 'export_nms'):
+            self.model.export_nms = export_nms if not export_benchmark else False
+        if export_post_process and not export_benchmark:
+            image_shape = [None] + image_shape[1:]
 
         # Save infer cfg
         _dump_infer_config(self.cfg,
@@ -664,7 +763,7 @@ class Trainer(object):
             pruned_input_spec = input_spec
 
         # TODO: Hard code, delete it when support prune input_spec.
-        if self.cfg.architecture == 'PicoDet':
+        if self.cfg.architecture == 'PicoDet' and not export_post_process:
             pruned_input_spec = [{
                 "image": InputSpec(
                     shape=image_shape, name='image')
@@ -674,6 +773,11 @@ class Trainer(object):
 
     def export(self, output_dir='output_inference'):
         self.model.eval()
+
+        if hasattr(self.cfg, 'export') and 'fuse_conv_bn' in self.cfg[
+                'export'] and self.cfg['export']['fuse_conv_bn']:
+            self.model = fuse_conv_bn(self.model)
+
         model_name = os.path.splitext(os.path.split(self.cfg.filename)[-1])[0]
         save_dir = os.path.join(output_dir, model_name)
         if not os.path.exists(save_dir):
@@ -683,7 +787,7 @@ class Trainer(object):
             save_dir)
 
         # dy2st and save model
-        if 'slim' not in self.cfg or self.cfg['slim_type'] != 'QAT':
+        if 'slim' not in self.cfg or 'QAT' not in self.cfg['slim_type']:
             paddle.jit.save(
                 static_model,
                 os.path.join(save_dir, 'model'),
@@ -740,3 +844,29 @@ class Trainer(object):
         flops = flops(self.model, input_spec) / (1000**3)
         logger.info(" Model FLOPs : {:.6f}G. (image shape is {})".format(
             flops, input_data['image'][0].unsqueeze(0).shape))
+
+    def parse_mot_images(self, cfg):
+        import glob
+        # for quant
+        dataset_dir = cfg['EvalMOTDataset'].dataset_dir
+        data_root = cfg['EvalMOTDataset'].data_root
+        data_root = '{}/{}'.format(dataset_dir, data_root)
+        seqs = os.listdir(data_root)
+        seqs.sort()
+        all_images = []
+        for seq in seqs:
+            infer_dir = os.path.join(data_root, seq)
+            assert infer_dir is None or os.path.isdir(infer_dir), \
+                "{} is not a directory".format(infer_dir)
+            images = set()
+            exts = ['jpg', 'jpeg', 'png', 'bmp']
+            exts += [ext.upper() for ext in exts]
+            for ext in exts:
+                images.update(glob.glob('{}/*.{}'.format(infer_dir, ext)))
+            images = list(images)
+            images.sort()
+            assert len(images) > 0, "no image found in {}".format(infer_dir)
+            all_images.extend(images)
+            logger.info("Found {} inference images in total.".format(
+                len(images)))
+        return all_images

+ 38 - 0
paddlex/ppdet/ext_op/README.md

@@ -0,0 +1,38 @@
+# 自定义OP编译
+旋转框IOU计算OP是参考[自定义外部算子](https://www.paddlepaddle.org.cn/documentation/docs/zh/guides/07_new_op/new_custom_op.html) 。
+
+## 1. 环境依赖
+- Paddle >= 2.0.1
+- gcc 8.2
+
+## 2. 安装
+```
+python3.7 setup.py install
+```
+
+按照如下方式使用
+```
+# 引入自定义op
+from rbox_iou_ops import rbox_iou
+
+paddle.set_device('gpu:0')
+paddle.disable_static()
+
+rbox1 = np.random.rand(13000, 5)
+rbox2 = np.random.rand(7, 5)
+
+pd_rbox1 = paddle.to_tensor(rbox1)
+pd_rbox2 = paddle.to_tensor(rbox2)
+
+iou = rbox_iou(pd_rbox1, pd_rbox2)
+print('iou', iou)
+```
+
+## 3. 单元测试
+单元测试`test.py`文件中,通过对比python实现的结果和测试自定义op结果。
+
+由于python计算细节与cpp计算细节略有区别,误差区间设置为0.02。
+```
+python3.7 test.py
+```
+提示`rbox_iou OP compute right!`说明OP测试通过。

+ 97 - 0
paddlex/ppdet/ext_op/rbox_iou_op.cc

@@ -0,0 +1,97 @@
+//   Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// The code is based on https://github.com/csuhan/s2anet/blob/master/mmdet/ops/box_iou_rotated
+
+#include "rbox_iou_op.h"
+#include "paddle/extension.h"
+
+
+template <typename T>
+void rbox_iou_cpu_kernel(
+    const int rbox1_num,
+    const int rbox2_num,
+    const T* rbox1_data_ptr,
+    const T* rbox2_data_ptr,
+    T* output_data_ptr) {
+
+    int i, j;
+    for (i = 0; i < rbox1_num; i++) {
+        for (j = 0; j < rbox2_num; j++) {
+		int offset = i * rbox2_num + j;
+		output_data_ptr[offset] = rbox_iou_single<T>(rbox1_data_ptr + i * 5, rbox2_data_ptr + j * 5);
+        }
+    }
+}
+
+
+#define CHECK_INPUT_CPU(x) PD_CHECK(x.place() == paddle::PlaceType::kCPU, #x " must be a CPU Tensor.")
+
+std::vector<paddle::Tensor> RboxIouCPUForward(const paddle::Tensor& rbox1, const paddle::Tensor& rbox2) {
+    CHECK_INPUT_CPU(rbox1);
+    CHECK_INPUT_CPU(rbox2);
+
+    auto rbox1_num = rbox1.shape()[0];
+    auto rbox2_num = rbox2.shape()[0];
+
+    auto output = paddle::Tensor(paddle::PlaceType::kCPU, {rbox1_num, rbox2_num});
+
+    PD_DISPATCH_FLOATING_TYPES(
+        rbox1.type(),
+        "rbox_iou_cpu_kernel",
+        ([&] {
+            rbox_iou_cpu_kernel<data_t>(
+                rbox1_num,
+                rbox2_num,
+                rbox1.data<data_t>(),
+                rbox2.data<data_t>(),
+                output.mutable_data<data_t>());
+        }));
+    
+    return {output};
+}
+
+
+#ifdef PADDLE_WITH_CUDA
+std::vector<paddle::Tensor> RboxIouCUDAForward(const paddle::Tensor& rbox1, const paddle::Tensor& rbox2);
+#endif
+
+
+#define CHECK_INPUT_SAME(x1, x2) PD_CHECK(x1.place() == x2.place(), "input must be smae pacle.")
+
+std::vector<paddle::Tensor> RboxIouForward(const paddle::Tensor& rbox1, const paddle::Tensor& rbox2) {
+    CHECK_INPUT_SAME(rbox1, rbox2);
+    if (rbox1.place() == paddle::PlaceType::kCPU) {
+        return RboxIouCPUForward(rbox1, rbox2);
+#ifdef PADDLE_WITH_CUDA
+    } else if (rbox1.place() == paddle::PlaceType::kGPU) {
+        return RboxIouCUDAForward(rbox1, rbox2);
+#endif
+    }
+}
+
+std::vector<std::vector<int64_t>> InferShape(std::vector<int64_t> rbox1_shape, std::vector<int64_t> rbox2_shape) {
+    return {{rbox1_shape[0], rbox2_shape[0]}};
+}
+
+std::vector<paddle::DataType> InferDtype(paddle::DataType t1, paddle::DataType t2) {
+    return {t1};
+}
+
+PD_BUILD_OP(rbox_iou)
+    .Inputs({"RBOX1", "RBOX2"})
+    .Outputs({"Output"})
+    .SetKernelFn(PD_KERNEL(RboxIouForward))
+    .SetInferShapeFn(PD_INFER_SHAPE(InferShape))
+    .SetInferDtypeFn(PD_INFER_DTYPE(InferDtype));

+ 120 - 0
paddlex/ppdet/ext_op/rbox_iou_op.cu

@@ -0,0 +1,120 @@
+//   Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// The code is based on https://github.com/csuhan/s2anet/blob/master/mmdet/ops/box_iou_rotated
+
+#include "rbox_iou_op.h"
+#include "paddle/extension.h"
+
+// 2D block with 32 * 16 = 512 threads per block
+const int BLOCK_DIM_X = 32;
+const int BLOCK_DIM_Y = 16;
+
+/**
+   Computes ceil(a / b)
+*/
+
+static inline int CeilDiv(const int a, const int b) {
+  return (a + b - 1)  / b;
+}
+
+template <typename T>
+__global__ void rbox_iou_cuda_kernel(
+    const int rbox1_num,
+    const int rbox2_num,
+    const T* rbox1_data_ptr,
+    const T* rbox2_data_ptr,
+    T* output_data_ptr) {
+
+  // get row_start and col_start
+  const int rbox1_block_idx = blockIdx.x * blockDim.x;
+  const int rbox2_block_idx = blockIdx.y * blockDim.y;
+
+  const int rbox1_thread_num = min(rbox1_num - rbox1_block_idx, blockDim.x);
+  const int rbox2_thread_num = min(rbox2_num - rbox2_block_idx, blockDim.y);
+
+  __shared__ T block_boxes1[BLOCK_DIM_X * 5];
+  __shared__ T block_boxes2[BLOCK_DIM_Y * 5];
+
+
+  // It's safe to copy using threadIdx.x since BLOCK_DIM_X >= BLOCK_DIM_Y
+  if (threadIdx.x < rbox1_thread_num && threadIdx.y == 0) {
+    block_boxes1[threadIdx.x * 5 + 0] =
+        rbox1_data_ptr[(rbox1_block_idx + threadIdx.x) * 5 + 0];
+    block_boxes1[threadIdx.x * 5 + 1] =
+        rbox1_data_ptr[(rbox1_block_idx + threadIdx.x) * 5 + 1];
+    block_boxes1[threadIdx.x * 5 + 2] =
+        rbox1_data_ptr[(rbox1_block_idx + threadIdx.x) * 5 + 2];
+    block_boxes1[threadIdx.x * 5 + 3] =
+        rbox1_data_ptr[(rbox1_block_idx + threadIdx.x) * 5 + 3];
+    block_boxes1[threadIdx.x * 5 + 4] =
+        rbox1_data_ptr[(rbox1_block_idx + threadIdx.x) * 5 + 4];
+  }
+
+  // threadIdx.x < BLOCK_DIM_Y=rbox2_thread_num, just use same condition as above: threadIdx.y == 0
+  if (threadIdx.x < rbox2_thread_num && threadIdx.y == 0) {
+    block_boxes2[threadIdx.x * 5 + 0] =
+        rbox2_data_ptr[(rbox2_block_idx + threadIdx.x) * 5 + 0];
+    block_boxes2[threadIdx.x * 5 + 1] =
+        rbox2_data_ptr[(rbox2_block_idx + threadIdx.x) * 5 + 1];
+    block_boxes2[threadIdx.x * 5 + 2] =
+        rbox2_data_ptr[(rbox2_block_idx + threadIdx.x) * 5 + 2];
+    block_boxes2[threadIdx.x * 5 + 3] =
+        rbox2_data_ptr[(rbox2_block_idx + threadIdx.x) * 5 + 3];
+    block_boxes2[threadIdx.x * 5 + 4] =
+        rbox2_data_ptr[(rbox2_block_idx + threadIdx.x) * 5 + 4];
+  }
+
+  // sync
+  __syncthreads();
+
+  if (threadIdx.x < rbox1_thread_num && threadIdx.y < rbox2_thread_num) {
+    int offset = (rbox1_block_idx + threadIdx.x) * rbox2_num + rbox2_block_idx + threadIdx.y;
+    output_data_ptr[offset] = rbox_iou_single<T>(block_boxes1 + threadIdx.x * 5, block_boxes2 + threadIdx.y * 5);
+  }
+}
+
+#define CHECK_INPUT_GPU(x) PD_CHECK(x.place() == paddle::PlaceType::kGPU, #x " must be a GPU Tensor.")
+
+std::vector<paddle::Tensor> RboxIouCUDAForward(const paddle::Tensor& rbox1, const paddle::Tensor& rbox2) {
+    CHECK_INPUT_GPU(rbox1);
+    CHECK_INPUT_GPU(rbox2);
+
+    auto rbox1_num = rbox1.shape()[0];
+    auto rbox2_num = rbox2.shape()[0];
+
+    auto output = paddle::Tensor(paddle::PlaceType::kGPU, {rbox1_num, rbox2_num});
+
+    const int blocks_x = CeilDiv(rbox1_num, BLOCK_DIM_X);
+    const int blocks_y = CeilDiv(rbox2_num, BLOCK_DIM_Y);
+
+    dim3 blocks(blocks_x, blocks_y);
+    dim3 threads(BLOCK_DIM_X, BLOCK_DIM_Y);
+
+    PD_DISPATCH_FLOATING_TYPES(
+        rbox1.type(),
+        "rbox_iou_cuda_kernel",
+        ([&] {
+            rbox_iou_cuda_kernel<data_t><<<blocks, threads, 0, rbox1.stream()>>>(
+                rbox1_num,
+                rbox2_num,
+                rbox1.data<data_t>(),
+                rbox2.data<data_t>(),
+                output.mutable_data<data_t>());
+        }));
+
+    return {output};
+}
+
+

+ 356 - 0
paddlex/ppdet/ext_op/rbox_iou_op.h

@@ -0,0 +1,356 @@
+//   Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// The code is based on https://github.com/csuhan/s2anet/blob/master/mmdet/ops/box_iou_rotated
+
+#pragma once
+
+#include <cassert>
+#include <cmath>
+#include <vector>
+
+#ifdef __CUDACC__
+// Designates functions callable from the host (CPU) and the device (GPU)
+#define HOST_DEVICE __host__ __device__
+#define HOST_DEVICE_INLINE HOST_DEVICE __forceinline__
+#else
+#include <algorithm>
+#define HOST_DEVICE
+#define HOST_DEVICE_INLINE HOST_DEVICE inline
+#endif
+
+namespace {
+
+template <typename T>
+struct RotatedBox {
+  T x_ctr, y_ctr, w, h, a;
+};
+
+template <typename T>
+struct Point {
+  T x, y;
+  HOST_DEVICE_INLINE Point(const T& px = 0, const T& py = 0) : x(px), y(py) {}
+  HOST_DEVICE_INLINE Point operator+(const Point& p) const {
+    return Point(x + p.x, y + p.y);
+  }
+  HOST_DEVICE_INLINE Point& operator+=(const Point& p) {
+    x += p.x;
+    y += p.y;
+    return *this;
+  }
+  HOST_DEVICE_INLINE Point operator-(const Point& p) const {
+    return Point(x - p.x, y - p.y);
+  }
+  HOST_DEVICE_INLINE Point operator*(const T coeff) const {
+    return Point(x * coeff, y * coeff);
+  }
+};
+
+template <typename T>
+HOST_DEVICE_INLINE T dot_2d(const Point<T>& A, const Point<T>& B) {
+  return A.x * B.x + A.y * B.y;
+}
+
+template <typename T>
+HOST_DEVICE_INLINE T cross_2d(const Point<T>& A, const Point<T>& B) {
+  return A.x * B.y - B.x * A.y;
+}
+
+template <typename T>
+HOST_DEVICE_INLINE void get_rotated_vertices(
+    const RotatedBox<T>& box,
+    Point<T> (&pts)[4]) {
+  // M_PI / 180. == 0.01745329251
+  //double theta = box.a * 0.01745329251;
+  //MODIFIED
+  double theta = box.a;
+  T cosTheta2 = (T)cos(theta) * 0.5f;
+  T sinTheta2 = (T)sin(theta) * 0.5f;
+
+  // y: top --> down; x: left --> right
+  pts[0].x = box.x_ctr - sinTheta2 * box.h - cosTheta2 * box.w;
+  pts[0].y = box.y_ctr + cosTheta2 * box.h - sinTheta2 * box.w;
+  pts[1].x = box.x_ctr + sinTheta2 * box.h - cosTheta2 * box.w;
+  pts[1].y = box.y_ctr - cosTheta2 * box.h - sinTheta2 * box.w;
+  pts[2].x = 2 * box.x_ctr - pts[0].x;
+  pts[2].y = 2 * box.y_ctr - pts[0].y;
+  pts[3].x = 2 * box.x_ctr - pts[1].x;
+  pts[3].y = 2 * box.y_ctr - pts[1].y;
+}
+
+template <typename T>
+HOST_DEVICE_INLINE int get_intersection_points(
+    const Point<T> (&pts1)[4],
+    const Point<T> (&pts2)[4],
+    Point<T> (&intersections)[24]) {
+  // Line vector
+  // A line from p1 to p2 is: p1 + (p2-p1)*t, t=[0,1]
+  Point<T> vec1[4], vec2[4];
+  for (int i = 0; i < 4; i++) {
+    vec1[i] = pts1[(i + 1) % 4] - pts1[i];
+    vec2[i] = pts2[(i + 1) % 4] - pts2[i];
+  }
+
+  // Line test - test all line combos for intersection
+  int num = 0; // number of intersections
+  for (int i = 0; i < 4; i++) {
+    for (int j = 0; j < 4; j++) {
+      // Solve for 2x2 Ax=b
+      T det = cross_2d<T>(vec2[j], vec1[i]);
+
+      // This takes care of parallel lines
+      if (fabs(det) <= 1e-14) {
+        continue;
+      }
+
+      auto vec12 = pts2[j] - pts1[i];
+
+      T t1 = cross_2d<T>(vec2[j], vec12) / det;
+      T t2 = cross_2d<T>(vec1[i], vec12) / det;
+
+      if (t1 >= 0.0f && t1 <= 1.0f && t2 >= 0.0f && t2 <= 1.0f) {
+        intersections[num++] = pts1[i] + vec1[i] * t1;
+      }
+    }
+  }
+
+  // Check for vertices of rect1 inside rect2
+  {
+    const auto& AB = vec2[0];
+    const auto& DA = vec2[3];
+    auto ABdotAB = dot_2d<T>(AB, AB);
+    auto ADdotAD = dot_2d<T>(DA, DA);
+    for (int i = 0; i < 4; i++) {
+      // assume ABCD is the rectangle, and P is the point to be judged
+      // P is inside ABCD iff. P's projection on AB lies within AB
+      // and P's projection on AD lies within AD
+
+      auto AP = pts1[i] - pts2[0];
+
+      auto APdotAB = dot_2d<T>(AP, AB);
+      auto APdotAD = -dot_2d<T>(AP, DA);
+
+      if ((APdotAB >= 0) && (APdotAD >= 0) && (APdotAB <= ABdotAB) &&
+          (APdotAD <= ADdotAD)) {
+        intersections[num++] = pts1[i];
+      }
+    }
+  }
+
+  // Reverse the check - check for vertices of rect2 inside rect1
+  {
+    const auto& AB = vec1[0];
+    const auto& DA = vec1[3];
+    auto ABdotAB = dot_2d<T>(AB, AB);
+    auto ADdotAD = dot_2d<T>(DA, DA);
+    for (int i = 0; i < 4; i++) {
+      auto AP = pts2[i] - pts1[0];
+
+      auto APdotAB = dot_2d<T>(AP, AB);
+      auto APdotAD = -dot_2d<T>(AP, DA);
+
+      if ((APdotAB >= 0) && (APdotAD >= 0) && (APdotAB <= ABdotAB) &&
+          (APdotAD <= ADdotAD)) {
+        intersections[num++] = pts2[i];
+      }
+    }
+  }
+
+  return num;
+}
+
+template <typename T>
+HOST_DEVICE_INLINE int convex_hull_graham(
+    const Point<T> (&p)[24],
+    const int& num_in,
+    Point<T> (&q)[24],
+    bool shift_to_zero = false) {
+  assert(num_in >= 2);
+
+  // Step 1:
+  // Find point with minimum y
+  // if more than 1 points have the same minimum y,
+  // pick the one with the minimum x.
+  int t = 0;
+  for (int i = 1; i < num_in; i++) {
+    if (p[i].y < p[t].y || (p[i].y == p[t].y && p[i].x < p[t].x)) {
+      t = i;
+    }
+  }
+  auto& start = p[t]; // starting point
+
+  // Step 2:
+  // Subtract starting point from every points (for sorting in the next step)
+  for (int i = 0; i < num_in; i++) {
+    q[i] = p[i] - start;
+  }
+
+  // Swap the starting point to position 0
+  auto tmp = q[0];
+  q[0] = q[t];
+  q[t] = tmp;
+
+  // Step 3:
+  // Sort point 1 ~ num_in according to their relative cross-product values
+  // (essentially sorting according to angles)
+  // If the angles are the same, sort according to their distance to origin
+  T dist[24];
+  for (int i = 0; i < num_in; i++) {
+    dist[i] = dot_2d<T>(q[i], q[i]);
+  }
+
+#ifdef __CUDACC__
+  // CUDA version
+  // In the future, we can potentially use thrust
+  // for sorting here to improve speed (though not guaranteed)
+  for (int i = 1; i < num_in - 1; i++) {
+    for (int j = i + 1; j < num_in; j++) {
+      T crossProduct = cross_2d<T>(q[i], q[j]);
+      if ((crossProduct < -1e-6) ||
+          (fabs(crossProduct) < 1e-6 && dist[i] > dist[j])) {
+        auto q_tmp = q[i];
+        q[i] = q[j];
+        q[j] = q_tmp;
+        auto dist_tmp = dist[i];
+        dist[i] = dist[j];
+        dist[j] = dist_tmp;
+      }
+    }
+  }
+#else
+  // CPU version
+  std::sort(
+      q + 1, q + num_in, [](const Point<T>& A, const Point<T>& B) -> bool {
+        T temp = cross_2d<T>(A, B);
+        if (fabs(temp) < 1e-6) {
+          return dot_2d<T>(A, A) < dot_2d<T>(B, B);
+        } else {
+          return temp > 0;
+        }
+      });
+#endif
+
+  // Step 4:
+  // Make sure there are at least 2 points (that don't overlap with each other)
+  // in the stack
+  int k; // index of the non-overlapped second point
+  for (k = 1; k < num_in; k++) {
+    if (dist[k] > 1e-8) {
+      break;
+    }
+  }
+  if (k == num_in) {
+    // We reach the end, which means the convex hull is just one point
+    q[0] = p[t];
+    return 1;
+  }
+  q[1] = q[k];
+  int m = 2; // 2 points in the stack
+  // Step 5:
+  // Finally we can start the scanning process.
+  // When a non-convex relationship between the 3 points is found
+  // (either concave shape or duplicated points),
+  // we pop the previous point from the stack
+  // until the 3-point relationship is convex again, or
+  // until the stack only contains two points
+  for (int i = k + 1; i < num_in; i++) {
+    while (m > 1 && cross_2d<T>(q[i] - q[m - 2], q[m - 1] - q[m - 2]) >= 0) {
+      m--;
+    }
+    q[m++] = q[i];
+  }
+
+  // Step 6 (Optional):
+  // In general sense we need the original coordinates, so we
+  // need to shift the points back (reverting Step 2)
+  // But if we're only interested in getting the area/perimeter of the shape
+  // We can simply return.
+  if (!shift_to_zero) {
+    for (int i = 0; i < m; i++) {
+      q[i] += start;
+    }
+  }
+
+  return m;
+}
+
+template <typename T>
+HOST_DEVICE_INLINE T polygon_area(const Point<T> (&q)[24], const int& m) {
+  if (m <= 2) {
+    return 0;
+  }
+
+  T area = 0;
+  for (int i = 1; i < m - 1; i++) {
+    area += fabs(cross_2d<T>(q[i] - q[0], q[i + 1] - q[0]));
+  }
+
+  return area / 2.0;
+}
+
+template <typename T>
+HOST_DEVICE_INLINE T rboxes_intersection(
+    const RotatedBox<T>& box1,
+    const RotatedBox<T>& box2) {
+  // There are up to 4 x 4 + 4 + 4 = 24 intersections (including dups) returned
+  // from rotated_rect_intersection_pts
+  Point<T> intersectPts[24], orderedPts[24];
+
+  Point<T> pts1[4];
+  Point<T> pts2[4];
+  get_rotated_vertices<T>(box1, pts1);
+  get_rotated_vertices<T>(box2, pts2);
+
+  int num = get_intersection_points<T>(pts1, pts2, intersectPts);
+
+  if (num <= 2) {
+    return 0.0;
+  }
+
+  // Convex Hull to order the intersection points in clockwise order and find
+  // the contour area.
+  int num_convex = convex_hull_graham<T>(intersectPts, num, orderedPts, true);
+  return polygon_area<T>(orderedPts, num_convex);
+}
+
+} // namespace
+
+template <typename T>
+HOST_DEVICE_INLINE T
+rbox_iou_single(T const* const box1_raw, T const* const box2_raw) {
+  // shift center to the middle point to achieve higher precision in result
+  RotatedBox<T> box1, box2;
+  auto center_shift_x = (box1_raw[0] + box2_raw[0]) / 2.0;
+  auto center_shift_y = (box1_raw[1] + box2_raw[1]) / 2.0;
+  box1.x_ctr = box1_raw[0] - center_shift_x;
+  box1.y_ctr = box1_raw[1] - center_shift_y;
+  box1.w = box1_raw[2];
+  box1.h = box1_raw[3];
+  box1.a = box1_raw[4];
+  box2.x_ctr = box2_raw[0] - center_shift_x;
+  box2.y_ctr = box2_raw[1] - center_shift_y;
+  box2.w = box2_raw[2];
+  box2.h = box2_raw[3];
+  box2.a = box2_raw[4];
+
+  const T area1 = box1.w * box1.h;
+  const T area2 = box2.w * box2.h;
+  if (area1 < 1e-14 || area2 < 1e-14) {
+    return 0.f;
+  }
+
+  const T intersection = rboxes_intersection<T>(box1, box2);
+  const T iou = intersection / (area1 + area2 - intersection);
+  return iou;
+}

+ 14 - 0
paddlex/ppdet/ext_op/setup.py

@@ -0,0 +1,14 @@
+import paddle
+from paddle.utils.cpp_extension import CppExtension, CUDAExtension, setup
+
+if __name__ == "__main__":
+    if paddle.device.is_compiled_with_cuda():
+        setup(
+            name='rbox_iou_ops',
+            ext_modules=CUDAExtension(
+                sources=['rbox_iou_op.cc', 'rbox_iou_op.cu'],
+                extra_compile_args={'cxx': ['-DPADDLE_WITH_CUDA']}))
+    else:
+        setup(
+            name='rbox_iou_ops',
+            ext_modules=CppExtension(sources=['rbox_iou_op.cc']))

+ 156 - 0
paddlex/ppdet/ext_op/test.py

@@ -0,0 +1,156 @@
+import numpy as np
+import sys
+import time
+from shapely.geometry import Polygon
+import paddle
+import unittest
+
+try:
+    from rbox_iou_ops import rbox_iou
+except Exception as e:
+    print('import rbox_iou_ops error', e)
+    sys.exit(-1)
+
+
+def rbox2poly_single(rrect, get_best_begin_point=False):
+    """
+    rrect:[x_ctr,y_ctr,w,h,angle]
+    to
+    poly:[x0,y0,x1,y1,x2,y2,x3,y3]
+    """
+    x_ctr, y_ctr, width, height, angle = rrect[:5]
+    tl_x, tl_y, br_x, br_y = -width / 2, -height / 2, width / 2, height / 2
+    # rect 2x4
+    rect = np.array([[tl_x, br_x, br_x, tl_x], [tl_y, tl_y, br_y, br_y]])
+    R = np.array([[np.cos(angle), -np.sin(angle)],
+                  [np.sin(angle), np.cos(angle)]])
+    # poly
+    poly = R.dot(rect)
+    x0, x1, x2, x3 = poly[0, :4] + x_ctr
+    y0, y1, y2, y3 = poly[1, :4] + y_ctr
+    poly = np.array([x0, y0, x1, y1, x2, y2, x3, y3], dtype=np.float64)
+    return poly
+
+
+def intersection(g, p):
+    """
+    Intersection.
+    """
+
+    g = g[:8].reshape((4, 2))
+    p = p[:8].reshape((4, 2))
+
+    a = g
+    b = p
+
+    use_filter = True
+    if use_filter:
+        # step1:
+        inter_x1 = np.maximum(np.min(a[:, 0]), np.min(b[:, 0]))
+        inter_x2 = np.minimum(np.max(a[:, 0]), np.max(b[:, 0]))
+        inter_y1 = np.maximum(np.min(a[:, 1]), np.min(b[:, 1]))
+        inter_y2 = np.minimum(np.max(a[:, 1]), np.max(b[:, 1]))
+        if inter_x1 >= inter_x2 or inter_y1 >= inter_y2:
+            return 0.
+        x1 = np.minimum(np.min(a[:, 0]), np.min(b[:, 0]))
+        x2 = np.maximum(np.max(a[:, 0]), np.max(b[:, 0]))
+        y1 = np.minimum(np.min(a[:, 1]), np.min(b[:, 1]))
+        y2 = np.maximum(np.max(a[:, 1]), np.max(b[:, 1]))
+        if x1 >= x2 or y1 >= y2 or (x2 - x1) < 2 or (y2 - y1) < 2:
+            return 0.
+
+    g = Polygon(g)
+    p = Polygon(p)
+    if not g.is_valid or not p.is_valid:
+        return 0
+
+    inter = Polygon(g).intersection(Polygon(p)).area
+    union = g.area + p.area - inter
+    if union == 0:
+        return 0
+    else:
+        return inter / union
+
+
+def rbox_overlaps(anchors, gt_bboxes, use_cv2=False):
+    """
+
+    Args:
+        anchors: [NA, 5]  x1,y1,x2,y2,angle
+        gt_bboxes: [M, 5]  x1,y1,x2,y2,angle
+
+    Returns:
+
+    """
+    assert anchors.shape[1] == 5
+    assert gt_bboxes.shape[1] == 5
+
+    gt_bboxes_ploy = [rbox2poly_single(e) for e in gt_bboxes]
+    anchors_ploy = [rbox2poly_single(e) for e in anchors]
+
+    num_gt, num_anchors = len(gt_bboxes_ploy), len(anchors_ploy)
+    iou = np.zeros((num_gt, num_anchors), dtype=np.float64)
+
+    start_time = time.time()
+    for i in range(num_gt):
+        for j in range(num_anchors):
+            try:
+                iou[i, j] = intersection(gt_bboxes_ploy[i], anchors_ploy[j])
+            except Exception as e:
+                print('cur gt_bboxes_ploy[i]', gt_bboxes_ploy[i],
+                      'anchors_ploy[j]', anchors_ploy[j], e)
+    iou = iou.T
+    return iou
+
+
+def gen_sample(n):
+    rbox = np.random.rand(n, 5)
+    rbox[:, 0:4] = rbox[:, 0:4] * 0.45 + 0.001
+    rbox[:, 4] = rbox[:, 4] - 0.5
+    return rbox
+
+
+class RBoxIoUTest(unittest.TestCase):
+    def setUp(self):
+        self.initTestCase()
+        self.rbox1 = gen_sample(self.n)
+        self.rbox2 = gen_sample(self.m)
+
+    def initTestCase(self):
+        self.n = 13000
+        self.m = 7
+
+    def assertAllClose(self, x, y, msg, atol=5e-1, rtol=1e-2):
+        self.assertTrue(np.allclose(x, y, atol=atol, rtol=rtol), msg=msg)
+
+    def get_places(self):
+        places = [paddle.CPUPlace()]
+        if paddle.device.is_compiled_with_cuda():
+            places.append(paddle.CUDAPlace(0))
+
+        return places
+
+    def check_output(self, place):
+        paddle.disable_static()
+        pd_rbox1 = paddle.to_tensor(self.rbox1, place=place)
+        pd_rbox2 = paddle.to_tensor(self.rbox2, place=place)
+        actual_t = rbox_iou(pd_rbox1, pd_rbox2).numpy()
+        poly_rbox1 = self.rbox1
+        poly_rbox2 = self.rbox2
+        poly_rbox1[:, 0:4] = self.rbox1[:, 0:4] * 1024
+        poly_rbox2[:, 0:4] = self.rbox2[:, 0:4] * 1024
+        expect_t = rbox_overlaps(poly_rbox1, poly_rbox2, use_cv2=False)
+        self.assertAllClose(
+            actual_t,
+            expect_t,
+            msg="rbox_iou has diff at {} \nExpect {}\nBut got {}".format(
+                str(place), str(expect_t), str(actual_t)))
+
+    def test_output(self):
+        places = self.get_places()
+        for place in places:
+            self.check_output(place)
+
+
+if __name__ == "__main__":
+    unittest.main()

+ 1 - 1
paddlex/ppdet/metrics/coco_utils.py

@@ -150,7 +150,7 @@ def cocoapi_eval(jsonfile,
         results_flatten = list(itertools.chain(*results_per_category))
         headers = ['category', 'AP'] * (num_columns // 2)
         results_2d = itertools.zip_longest(
-            *[results_flatten[i::num_columns] for i in range(num_columns)])
+            * [results_flatten[i::num_columns] for i in range(num_columns)])
         table_data = [headers]
         table_data += [result for result in results_2d]
         table = AsciiTable(table_data)

+ 11 - 1
paddlex/ppdet/metrics/json_results.py

@@ -65,6 +65,14 @@ def get_det_poly_res(bboxes, bbox_nums, image_id, label_to_cat_id_map, bias=0):
     return det_res
 
 
+def strip_mask(mask):
+    row = mask[0, 0, :]
+    col = mask[0, :, 0]
+    im_h = len(col) - np.count_nonzero(col == -1)
+    im_w = len(row) - np.count_nonzero(row == -1)
+    return mask[:, :im_h, :im_w]
+
+
 def get_seg_res(masks, bboxes, mask_nums, image_id, label_to_cat_id_map):
     import pycocotools.mask as mask_util
     seg_res = []
@@ -72,8 +80,10 @@ def get_seg_res(masks, bboxes, mask_nums, image_id, label_to_cat_id_map):
     for i in range(len(mask_nums)):
         cur_image_id = int(image_id[i][0])
         det_nums = mask_nums[i]
+        mask_i = masks[k:k + det_nums]
+        mask_i = strip_mask(mask_i)
         for j in range(det_nums):
-            mask = masks[k].astype(np.uint8)
+            mask = mask_i[j].astype(np.uint8)
             score = float(bboxes[k][1])
             label = int(bboxes[k][0])
             k = k + 1

+ 1 - 1
paddlex/ppdet/metrics/map_utils.py

@@ -331,7 +331,7 @@ class DetectionMAP(object):
             num_columns = min(6, len(results_per_category) * 2)
             results_flatten = list(itertools.chain(*results_per_category))
             headers = ['category', 'AP'] * (num_columns // 2)
-            results_2d = itertools.zip_longest(*[
+            results_2d = itertools.zip_longest(* [
                 results_flatten[i::num_columns] for i in range(num_columns)
             ])
             table_data = [headers]

+ 5 - 4
paddlex/ppdet/metrics/mcmot_metrics.py

@@ -26,8 +26,6 @@ from motmetrics.math_util import quiet_divide
 import numpy as np
 import pandas as pd
 
-import paddle
-import paddle.nn.functional as F
 from .metrics import Metric
 import motmetrics as mm
 import openpyxl
@@ -309,9 +307,12 @@ class MCMOTEvaluator(object):
 
     def load_annotations(self):
         assert self.data_type == 'mcmot'
-        self.gt_filename = os.path.join(self.data_root, '../', '../',
-                                        'sequences',
+        self.gt_filename = os.path.join(self.data_root, '../', 'sequences',
                                         '{}.txt'.format(self.seq_name))
+        if not os.path.exists(self.gt_filename):
+            logger.warning(
+                "gt_filename '{}' of MCMOTEvaluator is not exist, so the MOTA will be -INF."
+            )
 
     def reset_accumulator(self):
         import motmetrics as mm

+ 9 - 2
paddlex/ppdet/metrics/metrics.py

@@ -22,6 +22,7 @@ import json
 import paddle
 import numpy as np
 import typing
+from pathlib import Path
 
 from .map_utils import prune_zero_padding, DetectionMAP
 from .coco_utils import get_infer_results, cocoapi_eval
@@ -69,8 +70,6 @@ class Metric(paddle.metric.Metric):
 
 class COCOMetric(Metric):
     def __init__(self, anno_file, **kwargs):
-        assert os.path.isfile(anno_file), \
-                "anno_file {} not a file".format(anno_file)
         self.anno_file = anno_file
         self.clsid2catid = kwargs.get('clsid2catid', None)
         if self.clsid2catid is None:
@@ -81,6 +80,14 @@ class COCOMetric(Metric):
         self.bias = kwargs.get('bias', 0)
         self.save_prediction_only = kwargs.get('save_prediction_only', False)
         self.iou_type = kwargs.get('IouType', 'bbox')
+
+        if not self.save_prediction_only:
+            assert os.path.isfile(anno_file), \
+                    "anno_file {} not a file".format(anno_file)
+
+        if self.output_eval is not None:
+            Path(self.output_eval).mkdir(exist_ok=True)
+
         self.reset()
 
     def reset(self):

+ 14 - 8
paddlex/ppdet/metrics/mot_metrics.py

@@ -22,8 +22,7 @@ import sys
 import math
 from collections import defaultdict
 import numpy as np
-import paddle
-import paddle.nn.functional as F
+
 from paddlex.ppdet.modeling.bbox_utils import bbox_iou_np_expand
 from .map_utils import ap_per_class
 from .metrics import Metric
@@ -36,8 +35,13 @@ __all__ = ['MOTEvaluator', 'MOTMetric', 'JDEDetMetric', 'KITTIMOTMetric']
 
 
 def read_mot_results(filename, is_gt=False, is_ignore=False):
-    valid_labels = {1}
-    ignore_labels = {2, 7, 8, 12}  # only in motchallenge datasets like 'MOT16'
+    valid_label = [1]
+    ignore_labels = [2, 7, 8, 12]  # only in motchallenge datasets like 'MOT16'
+    if is_gt:
+        logger.info(
+            "In MOT16/17 dataset the valid_label of ground truth is '{}', "
+            "in other dataset it should be '0' for single classs MOT.".format(
+                valid_label[0]))
     results_dict = dict()
     if os.path.isfile(filename):
         with open(filename, 'r') as f:
@@ -50,12 +54,10 @@ def read_mot_results(filename, is_gt=False, is_ignore=False):
                     continue
                 results_dict.setdefault(fid, list())
 
-                box_size = float(linelist[4]) * float(linelist[5])
-
                 if is_gt:
                     label = int(float(linelist[7]))
                     mark = int(float(linelist[6]))
-                    if mark == 0 or label not in valid_labels:
+                    if mark == 0 or label not in valid_label:
                         continue
                     score = 1
                 elif is_ignore:
@@ -118,6 +120,10 @@ class MOTEvaluator(object):
         assert self.data_type == 'mot'
         gt_filename = os.path.join(self.data_root, self.seq_name, 'gt',
                                    'gt.txt')
+        if not os.path.exists(gt_filename):
+            logger.warning(
+                "gt_filename '{}' of MOTEvaluator is not exist, so the MOTA will be -INF."
+            )
         self.gt_frame_dict = read_mot_results(gt_filename, is_gt=True)
         self.gt_ignore_frame_dict = read_mot_results(
             gt_filename, is_ignore=True)
@@ -553,7 +559,7 @@ class KITTIEvaluation(object):
                             "track ids are not unique for sequence %d: frame %d"
                             % (seq, t_data.frame))
                         logger.info(
-                            "track id %d occured at least twice for this frame"
+                            "track id %d occurred at least twice for this frame"
                             % t_data.track_id)
                         logger.info("Exiting...")
                         #continue # this allows to evaluate non-unique result files

+ 1 - 0
paddlex/ppdet/model_zoo/.gitignore

@@ -0,0 +1 @@
+MODEL_ZOO

+ 13 - 0
paddlex/ppdet/model_zoo/tests/__init__.py

@@ -0,0 +1,13 @@
+#   Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.

+ 48 - 0
paddlex/ppdet/model_zoo/tests/test_get_model.py

@@ -0,0 +1,48 @@
+#   Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import os
+import paddle
+import paddlex.ppdet as ppdet
+import unittest
+
+# NOTE: weights downloading costs time, we choose
+#       a small model for unittesting
+MODEL_NAME = 'ppyolo/ppyolo_tiny_650e_coco'
+
+
+class TestGetConfigFile(unittest.TestCase):
+    def test_main(self):
+        try:
+            cfg_file = ppdet.model_zoo.get_config_file(MODEL_NAME)
+            assert os.path.isfile(cfg_file)
+        except:
+            self.assertTrue(False)
+
+
+class TestGetModel(unittest.TestCase):
+    def test_main(self):
+        try:
+            model = ppdet.model_zoo.get_model(MODEL_NAME)
+            assert isinstance(model, paddle.nn.Layer)
+        except:
+            self.assertTrue(False)
+
+
+if __name__ == '__main__':
+    unittest.main()

+ 68 - 0
paddlex/ppdet/model_zoo/tests/test_list_model.py

@@ -0,0 +1,68 @@
+#   Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import unittest
+import paddlex.ppdet as ppdet
+
+
+class TestListModel(unittest.TestCase):
+    def setUp(self):
+        self._filter = []
+
+    def test_main(self):
+        try:
+            ppdet.model_zoo.list_model(self._filter)
+            self.assertTrue(True)
+        except:
+            self.assertTrue(False)
+
+
+class TestListModelYOLO(TestListModel):
+    def setUp(self):
+        self._filter = ['yolo']
+
+
+class TestListModelRCNN(TestListModel):
+    def setUp(self):
+        self._filter = ['rcnn']
+
+
+class TestListModelSSD(TestListModel):
+    def setUp(self):
+        self._filter = ['ssd']
+
+
+class TestListModelMultiFilter(TestListModel):
+    def setUp(self):
+        self._filter = ['yolo', 'darknet']
+
+
+class TestListModelError(unittest.TestCase):
+    def setUp(self):
+        self._filter = ['xxx']
+
+    def test_main(self):
+        try:
+            ppdet.model_zoo.list_model(self._filter)
+            self.assertTrue(False)
+        except ValueError:
+            self.assertTrue(True)
+
+
+if __name__ == '__main__':
+    unittest.main()

+ 13 - 0
paddlex/ppdet/modeling/architectures/__init__.py

@@ -5,6 +5,13 @@
 # You may obtain a copy of the License at
 #
 #     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
 from . import meta_arch
 from . import faster_rcnn
 from . import mask_rcnn
@@ -26,6 +33,9 @@ from . import picodet
 from . import detr
 from . import sparse_rcnn
 from . import tood
+from . import retinanet
+from . import bytetrack
+from . import yolox
 
 from .meta_arch import *
 from .faster_rcnn import *
@@ -49,3 +59,6 @@ from .picodet import *
 from .detr import *
 from .sparse_rcnn import *
 from .tood import *
+from .retinanet import *
+from .bytetrack import *
+from .yolox import *

+ 79 - 0
paddlex/ppdet/modeling/architectures/bytetrack.py

@@ -0,0 +1,79 @@
+# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from paddlex.ppdet.core.workspace import register, create
+from .meta_arch import BaseArch
+
+__all__ = ['ByteTrack']
+
+
+@register
+class ByteTrack(BaseArch):
+    """
+    ByteTrack network, see https://arxiv.org/abs/2110.06864
+
+    Args:
+        detector (object): detector model instance
+        reid (object): reid model instance, default None
+        tracker (object): tracker instance
+    """
+    __category__ = 'architecture'
+
+    def __init__(self, detector='YOLOX', reid=None, tracker='JDETracker'):
+        super(ByteTrack, self).__init__()
+        self.detector = detector
+        self.reid = reid
+        self.tracker = tracker
+
+    @classmethod
+    def from_config(cls, cfg, *args, **kwargs):
+        detector = create(cfg['detector'])
+
+        if cfg['reid'] != 'None':
+            reid = create(cfg['reid'])
+        else:
+            reid = None
+
+        tracker = create(cfg['tracker'])
+
+        return {
+            "detector": detector,
+            "reid": reid,
+            "tracker": tracker,
+        }
+
+    def _forward(self):
+        det_outs = self.detector(self.inputs)
+
+        if self.training:
+            return det_outs
+        else:
+            if self.reid is not None:
+                assert 'crops' in self.inputs
+                crops = self.inputs['crops']
+                pred_embs = self.reid(crops)
+            else:
+                pred_embs = None
+            det_outs['embeddings'] = pred_embs
+            return det_outs
+
+    def get_loss(self):
+        return self._forward()
+
+    def get_pred(self):
+        return self._forward()

+ 4 - 4
paddlex/ppdet/modeling/architectures/cascade_rcnn.py

@@ -112,14 +112,14 @@ class CascadeRCNN(BaseArch):
             bbox, bbox_num = self.bbox_post_process(
                 preds, (refined_rois, rois_num), im_shape, scale_factor)
             # rescale the prediction back to origin image
-            bbox_pred = self.bbox_post_process.get_pred(bbox, bbox_num,
-                                                        im_shape, scale_factor)
+            bbox, bbox_pred, bbox_num = self.bbox_post_process.get_pred(
+                bbox, bbox_num, im_shape, scale_factor)
             if not self.with_mask:
                 return bbox_pred, bbox_num, None
             mask_out = self.mask_head(body_feats, bbox, bbox_num, self.inputs)
             origin_shape = self.bbox_post_process.get_origin_shape()
-            mask_pred = self.mask_post_process(mask_out[:, 0, :, :], bbox_pred,
-                                               bbox_num, origin_shape)
+            mask_pred = self.mask_post_process(mask_out, bbox_pred, bbox_num,
+                                               origin_shape)
             return bbox_pred, bbox_num, mask_pred
 
     def get_loss(self, ):

+ 3 - 2
paddlex/ppdet/modeling/architectures/deepsort.py

@@ -62,8 +62,9 @@ class DeepSORT(BaseArch):
 
     def _forward(self):
         crops = self.inputs['crops']
-        features = self.reid(crops)
-        return features
+        outs = {}
+        outs['embeddings'] = self.reid(crops)
+        return outs
 
     def get_pred(self):
         return self._forward()

+ 2 - 2
paddlex/ppdet/modeling/architectures/faster_rcnn.py

@@ -87,8 +87,8 @@ class FasterRCNN(BaseArch):
                                                     im_shape, scale_factor)
 
             # rescale the prediction back to origin image
-            bbox_pred = self.bbox_post_process.get_pred(bbox, bbox_num,
-                                                        im_shape, scale_factor)
+            bboxes, bbox_pred, bbox_num = self.bbox_post_process.get_pred(
+                bbox, bbox_num, im_shape, scale_factor)
             return bbox_pred, bbox_num
 
     def get_loss(self, ):

+ 1 - 1
paddlex/ppdet/modeling/architectures/keypoint_hrhrnet.py

@@ -153,7 +153,7 @@ class HrHRNetPostProcess(object):
         heat_thresh (float): value of topk below this threshhold will be ignored
         tag_thresh (float): coord's value sampled in tagmap below this threshold belong to same people for init
 
-        inputs(list[heatmap]): the output list of modle, [heatmap, heatmap_maxpool, tagmap], heatmap_maxpool used to get topk
+        inputs(list[heatmap]): the output list of model, [heatmap, heatmap_maxpool, tagmap], heatmap_maxpool used to get topk
         original_height, original_width (float): the original image size
     '''
 

+ 4 - 4
paddlex/ppdet/modeling/architectures/mask_rcnn.py

@@ -112,11 +112,11 @@ class MaskRCNN(BaseArch):
                 body_feats, bbox, bbox_num, self.inputs, feat_func=feat_func)
 
             # rescale the prediction back to origin image
-            bbox_pred = self.bbox_post_process.get_pred(bbox, bbox_num,
-                                                        im_shape, scale_factor)
+            bbox, bbox_pred, bbox_num = self.bbox_post_process.get_pred(
+                bbox, bbox_num, im_shape, scale_factor)
             origin_shape = self.bbox_post_process.get_origin_shape()
-            mask_pred = self.mask_post_process(mask_out[:, 0, :, :], bbox_pred,
-                                               bbox_num, origin_shape)
+            mask_pred = self.mask_post_process(mask_out, bbox_pred, bbox_num,
+                                               origin_shape)
             return bbox_pred, bbox_num, mask_pred
 
     def get_loss(self, ):

+ 20 - 28
paddlex/ppdet/modeling/architectures/meta_arch.py

@@ -22,23 +22,23 @@ class BaseArch(nn.Layer):
         self.fuse_norm = False
 
     def load_meanstd(self, cfg_transform):
-        self.scale = 1.
-        self.mean = paddle.to_tensor([0.485, 0.456, 0.406]).reshape(
-            (1, 3, 1, 1))
-        self.std = paddle.to_tensor([0.229, 0.224, 0.225]).reshape(
-            (1, 3, 1, 1))
+        scale = 1.
+        mean = np.array([0.485, 0.456, 0.406], dtype=np.float32)
+        std = np.array([0.229, 0.224, 0.225], dtype=np.float32)
         for item in cfg_transform:
             if 'NormalizeImage' in item:
-                self.mean = paddle.to_tensor(item['NormalizeImage'][
-                    'mean']).reshape((1, 3, 1, 1))
-                self.std = paddle.to_tensor(item['NormalizeImage'][
-                    'std']).reshape((1, 3, 1, 1))
+                mean = np.array(
+                    item['NormalizeImage']['mean'], dtype=np.float32)
+                std = np.array(item['NormalizeImage']['std'], dtype=np.float32)
                 if item['NormalizeImage'].get('is_scale', True):
-                    self.scale = 1. / 255.
+                    scale = 1. / 255.
                 break
         if self.data_format == 'NHWC':
-            self.mean = self.mean.reshape(1, 1, 1, 3)
-            self.std = self.std.reshape(1, 1, 1, 3)
+            self.scale = paddle.to_tensor(scale / std).reshape((1, 1, 1, 3))
+            self.bias = paddle.to_tensor(-mean / std).reshape((1, 1, 1, 3))
+        else:
+            self.scale = paddle.to_tensor(scale / std).reshape((1, 3, 1, 1))
+            self.bias = paddle.to_tensor(-mean / std).reshape((1, 3, 1, 1))
 
     def forward(self, inputs):
         if self.data_format == 'NHWC':
@@ -47,7 +47,7 @@ class BaseArch(nn.Layer):
 
         if self.fuse_norm:
             image = inputs['image']
-            self.inputs['image'] = (image * self.scale - self.mean) / self.std
+            self.inputs['image'] = image * self.scale + self.bias
             self.inputs['im_shape'] = inputs['im_shape']
             self.inputs['scale_factor'] = inputs['scale_factor']
         else:
@@ -64,10 +64,15 @@ class BaseArch(nn.Layer):
                 inputs_list.append(inputs)
             else:
                 inputs_list.extend(inputs)
-
             outs = []
             for inp in inputs_list:
-                self.inputs = inp
+                if self.fuse_norm:
+                    self.inputs['image'] = inp[
+                        'image'] * self.scale + self.bias
+                    self.inputs['im_shape'] = inp['im_shape']
+                    self.inputs['scale_factor'] = inp['scale_factor']
+                else:
+                    self.inputs = inp
                 outs.append(self.get_pred())
 
             # multi-scale test
@@ -126,16 +131,3 @@ class BaseArch(nn.Layer):
 
     def get_pred(self, ):
         raise NotImplementedError("Should implement get_pred method!")
-
-    @classmethod
-    def convert_sync_batchnorm(cls, layer):
-        layer_output = layer
-        if getattr(layer, 'norm_type', None) == 'sync_bn':
-            layer_output = nn.SyncBatchNorm.convert_sync_batchnorm(layer)
-        else:
-            for name, sublayer in layer.named_children():
-                layer_output.add_sublayer(name,
-                                          cls.convert_sync_batchnorm(sublayer))
-
-        del layer
-        return layer_output

+ 12 - 8
paddlex/ppdet/modeling/architectures/picodet.py

@@ -41,7 +41,8 @@ class PicoDet(BaseArch):
         self.backbone = backbone
         self.neck = neck
         self.head = head
-        self.deploy = False
+        self.export_post_process = True
+        self.export_nms = True
 
     @classmethod
     def from_config(cls, cfg, *args, **kwargs):
@@ -62,14 +63,13 @@ class PicoDet(BaseArch):
     def _forward(self):
         body_feats = self.backbone(self.inputs)
         fpn_feats = self.neck(body_feats)
-        head_outs = self.head(fpn_feats, self.deploy)
-        if self.training or self.deploy:
+        head_outs = self.head(fpn_feats, self.export_post_process)
+        if self.training or not self.export_post_process:
             return head_outs, None
         else:
-            im_shape = self.inputs['im_shape']
             scale_factor = self.inputs['scale_factor']
-            bboxes, bbox_num = self.head.post_process(head_outs, im_shape,
-                                                      scale_factor)
+            bboxes, bbox_num = self.head.post_process(
+                head_outs, scale_factor, export_nms=self.export_nms)
             return bboxes, bbox_num
 
     def get_loss(self, ):
@@ -83,9 +83,13 @@ class PicoDet(BaseArch):
         return loss
 
     def get_pred(self):
-        if self.deploy:
+        if not self.export_post_process:
             return {'picodet': self._forward()[0]}
-        else:
+        elif self.export_nms:
             bbox_pred, bbox_num = self._forward()
             output = {'bbox': bbox_pred, 'bbox_num': bbox_num}
             return output
+        else:
+            bboxes, mlvl_scores = self._forward()
+            output = {'bbox': bboxes, 'scores': mlvl_scores}
+            return output

+ 69 - 0
paddlex/ppdet/modeling/architectures/retinanet.py

@@ -0,0 +1,69 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from paddlex.ppdet.core.workspace import register, create
+from .meta_arch import BaseArch
+import paddle
+
+__all__ = ['RetinaNet']
+
+
+@register
+class RetinaNet(BaseArch):
+    __category__ = 'architecture'
+
+    def __init__(self, backbone, neck, head):
+        super(RetinaNet, self).__init__()
+        self.backbone = backbone
+        self.neck = neck
+        self.head = head
+
+    @classmethod
+    def from_config(cls, cfg, *args, **kwargs):
+        backbone = create(cfg['backbone'])
+
+        kwargs = {'input_shape': backbone.out_shape}
+        neck = create(cfg['neck'], **kwargs)
+
+        kwargs = {'input_shape': neck.out_shape}
+        head = create(cfg['head'], **kwargs)
+
+        return {
+            'backbone': backbone,
+            'neck': neck,
+            'head': head,
+        }
+
+    def _forward(self):
+        body_feats = self.backbone(self.inputs)
+        neck_feats = self.neck(body_feats)
+
+        if self.training:
+            return self.head(neck_feats, self.inputs)
+        else:
+            head_outs = self.head(neck_feats)
+            bbox, bbox_num = self.head.post_process(
+                head_outs, self.inputs['im_shape'],
+                self.inputs['scale_factor'])
+            return {'bbox': bbox, 'bbox_num': bbox_num}
+
+    def get_loss(self):
+        return self._forward()
+
+    def get_pred(self):
+        return self._forward()

+ 5 - 1
paddlex/ppdet/modeling/architectures/yolo.py

@@ -109,10 +109,14 @@ class YOLOv3(BaseArch):
                 if self.return_idx:
                     _, bbox, bbox_num, _ = self.post_process(
                         yolo_head_outs, self.yolo_head.mask_anchors)
-                else:
+                elif self.post_process is not None:
                     bbox, bbox_num = self.post_process(
                         yolo_head_outs, self.yolo_head.mask_anchors,
                         self.inputs['im_shape'], self.inputs['scale_factor'])
+                else:
+                    bbox, bbox_num = self.yolo_head.post_process(
+                        yolo_head_outs, self.inputs['im_shape'],
+                        self.inputs['scale_factor'])
                 output = {'bbox': bbox, 'bbox_num': bbox_num}
 
             return output

+ 139 - 0
paddlex/ppdet/modeling/architectures/yolox.py

@@ -0,0 +1,139 @@
+# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from paddlex.ppdet.core.workspace import register, create
+from .meta_arch import BaseArch
+
+import random
+import paddle
+import paddle.nn.functional as F
+import paddle.distributed as dist
+
+__all__ = ['YOLOX']
+
+
+@register
+class YOLOX(BaseArch):
+    """
+    YOLOX network, see https://arxiv.org/abs/2107.08430
+
+    Args:
+        backbone (nn.Layer): backbone instance
+        neck (nn.Layer): neck instance
+        head (nn.Layer): head instance
+        for_mot (bool): whether used for MOT or not
+        input_size (list[int]): initial scale, will be reset by self._preprocess()
+        size_stride (int): stride of the size range
+        size_range (list[int]): multi-scale range for training
+        random_interval (int): interval of iter to change self._input_size
+    """
+    __category__ = 'architecture'
+
+    def __init__(self,
+                 backbone='CSPDarkNet',
+                 neck='YOLOCSPPAN',
+                 head='YOLOXHead',
+                 for_mot=False,
+                 input_size=[640, 640],
+                 size_stride=32,
+                 size_range=[15, 25],
+                 random_interval=10):
+        super(YOLOX, self).__init__()
+        self.backbone = backbone
+        self.neck = neck
+        self.head = head
+        self.for_mot = for_mot
+
+        self.input_size = input_size
+        self._input_size = paddle.to_tensor(input_size)
+        self.size_stride = size_stride
+        self.size_range = size_range
+        self.random_interval = random_interval
+        self._step = 0
+
+    @classmethod
+    def from_config(cls, cfg, *args, **kwargs):
+        # backbone
+        backbone = create(cfg['backbone'])
+
+        # fpn
+        kwargs = {'input_shape': backbone.out_shape}
+        neck = create(cfg['neck'], **kwargs)
+
+        # head
+        kwargs = {'input_shape': neck.out_shape}
+        head = create(cfg['head'], **kwargs)
+
+        return {
+            'backbone': backbone,
+            'neck': neck,
+            "head": head,
+        }
+
+    def _forward(self):
+        if self.training:
+            self._preprocess()
+        body_feats = self.backbone(self.inputs)
+        neck_feats = self.neck(body_feats, self.for_mot)
+
+        if self.training:
+            yolox_losses = self.head(neck_feats, self.inputs)
+            yolox_losses.update({'size': self._input_size[0]})
+            return yolox_losses
+        else:
+            head_outs = self.head(neck_feats)
+            bbox, bbox_num = self.head.post_process(
+                head_outs, self.inputs['im_shape'],
+                self.inputs['scale_factor'])
+            return {'bbox': bbox, 'bbox_num': bbox_num}
+
+    def get_loss(self):
+        return self._forward()
+
+    def get_pred(self):
+        return self._forward()
+
+    def _preprocess(self):
+        # YOLOX multi-scale training, interpolate resize before inputs of the network.
+        self._get_size()
+        scale_y = self._input_size[0] / self.input_size[0]
+        scale_x = self._input_size[1] / self.input_size[1]
+        if scale_x != 1 or scale_y != 1:
+            self.inputs['image'] = F.interpolate(
+                self.inputs['image'],
+                size=self._input_size,
+                mode='bilinear',
+                align_corners=False)
+            gt_bboxes = self.inputs['gt_bbox']
+            for i in range(len(gt_bboxes)):
+                if len(gt_bboxes[i]) > 0:
+                    gt_bboxes[i][:, 0::2] = gt_bboxes[i][:, 0::2] * scale_x
+                    gt_bboxes[i][:, 1::2] = gt_bboxes[i][:, 1::2] * scale_y
+            self.inputs['gt_bbox'] = gt_bboxes
+
+    def _get_size(self):
+        # random_interval = 10 as default, every 10 iters to change self._input_size
+        image_ratio = self.input_size[1] * 1.0 / self.input_size[0]
+        if self._step % self.random_interval == 0:
+            size_factor = random.randint(*self.size_range)
+            size = [
+                self.size_stride * size_factor,
+                self.size_stride * int(size_factor * image_ratio)
+            ]
+            self._input_size = paddle.to_tensor(size)
+        self._step += 1

+ 2 - 0
paddlex/ppdet/modeling/assigners/__init__.py

@@ -16,8 +16,10 @@ from . import utils
 from . import task_aligned_assigner
 from . import atss_assigner
 from . import simota_assigner
+from . import max_iou_assigner
 
 from .utils import *
 from .task_aligned_assigner import *
 from .atss_assigner import *
 from .simota_assigner import *
+from .max_iou_assigner import *

+ 33 - 29
paddlex/ppdet/modeling/assigners/atss_assigner.py

@@ -22,11 +22,13 @@ import paddle.nn as nn
 import paddle.nn.functional as F
 
 from paddlex.ppdet.core.workspace import register
-from ..ops import iou_similarity
+from ..bbox_utils import iou_similarity, batch_iou_similarity
 from ..bbox_utils import bbox_center
-from .utils import (pad_gt, check_points_inside_bboxes, compute_max_iou_anchor,
+from .utils import (check_points_inside_bboxes, compute_max_iou_anchor,
                     compute_max_iou_gt)
 
+__all__ = ['ATSSAssigner']
+
 
 @register
 class ATSSAssigner(nn.Layer):
@@ -48,7 +50,6 @@ class ATSSAssigner(nn.Layer):
 
     def _gather_topk_pyramid(self, gt2anchor_distances, num_anchors_list,
                              pad_gt_mask):
-        pad_gt_mask = pad_gt_mask.tile([1, 1, self.topk]).astype(paddle.bool)
         gt2anchor_distances_list = paddle.split(
             gt2anchor_distances, num_anchors_list, axis=-1)
         num_anchors_index = np.cumsum(num_anchors_list).tolist()
@@ -58,17 +59,12 @@ class ATSSAssigner(nn.Layer):
         for distances, anchors_index in zip(gt2anchor_distances_list,
                                             num_anchors_index):
             num_anchors = distances.shape[-1]
-            topk_metrics, topk_idxs = paddle.topk(
+            _, topk_idxs = paddle.topk(
                 distances, self.topk, axis=-1, largest=False)
             topk_idxs_list.append(topk_idxs + anchors_index)
-            topk_idxs = paddle.where(pad_gt_mask, topk_idxs,
-                                     paddle.zeros_like(topk_idxs))
-            is_in_topk = F.one_hot(topk_idxs, num_anchors).sum(axis=-2)
-            is_in_topk = paddle.where(is_in_topk > 1,
-                                      paddle.zeros_like(is_in_topk),
-                                      is_in_topk)
-            is_in_topk_list.append(
-                is_in_topk.astype(gt2anchor_distances.dtype))
+            is_in_topk = F.one_hot(topk_idxs, num_anchors).sum(
+                axis=-2).astype(gt2anchor_distances.dtype)
+            is_in_topk_list.append(is_in_topk * pad_gt_mask)
         is_in_topk_list = paddle.concat(is_in_topk_list, axis=-1)
         topk_idxs_list = paddle.concat(topk_idxs_list, axis=-1)
         return is_in_topk_list, topk_idxs_list
@@ -79,8 +75,10 @@ class ATSSAssigner(nn.Layer):
                 num_anchors_list,
                 gt_labels,
                 gt_bboxes,
+                pad_gt_mask,
                 bg_index,
-                gt_scores=None):
+                gt_scores=None,
+                pred_bboxes=None):
         r"""This code is based on
             https://github.com/fcjian/TOOD/blob/master/mmdet/core/bbox/assigners/atss_assigner.py
 
@@ -101,18 +99,18 @@ class ATSSAssigner(nn.Layer):
             anchor_bboxes (Tensor, float32): pre-defined anchors, shape(L, 4),
                     "xmin, xmax, ymin, ymax" format
             num_anchors_list (List): num of anchors in each level
-            gt_labels (Tensor|List[Tensor], int64): Label of gt_bboxes, shape(B, n, 1)
-            gt_bboxes (Tensor|List[Tensor], float32): Ground truth bboxes, shape(B, n, 4)
+            gt_labels (Tensor, int64|int32): Label of gt_bboxes, shape(B, n, 1)
+            gt_bboxes (Tensor, float32): Ground truth bboxes, shape(B, n, 4)
+            pad_gt_mask (Tensor, float32): 1 means bbox, 0 means no bbox, shape(B, n, 1)
             bg_index (int): background index
-            gt_scores (Tensor|List[Tensor]|None, float32) Score of gt_bboxes,
+            gt_scores (Tensor|None, float32) Score of gt_bboxes,
                     shape(B, n, 1), if None, then it will initialize with one_hot label
+            pred_bboxes (Tensor, float32, optional): predicted bounding boxes, shape(B, L, 4)
         Returns:
             assigned_labels (Tensor): (B, L)
             assigned_bboxes (Tensor): (B, L, 4)
-            assigned_scores (Tensor): (B, L, C)
+            assigned_scores (Tensor): (B, L, C), if pred_bboxes is not None, then output ious
         """
-        gt_labels, gt_bboxes, pad_gt_scores, pad_gt_mask = pad_gt(
-            gt_labels, gt_bboxes, gt_scores)
         assert gt_labels.ndim == gt_bboxes.ndim and \
                gt_bboxes.ndim == 3
 
@@ -121,7 +119,8 @@ class ATSSAssigner(nn.Layer):
 
         # negative batch
         if num_max_boxes == 0:
-            assigned_labels = paddle.full([batch_size, num_anchors], bg_index)
+            assigned_labels = paddle.full(
+                [batch_size, num_anchors], bg_index, dtype=gt_labels.dtype)
             assigned_bboxes = paddle.zeros([batch_size, num_anchors, 4])
             assigned_scores = paddle.zeros(
                 [batch_size, num_anchors, self.num_classes])
@@ -151,9 +150,8 @@ class ATSSAssigner(nn.Layer):
         iou_threshold = iou_threshold.reshape([batch_size, num_max_boxes, -1])
         iou_threshold = iou_threshold.mean(axis=-1, keepdim=True) + \
                         iou_threshold.std(axis=-1, keepdim=True)
-        is_in_topk = paddle.where(
-            iou_candidates > iou_threshold.tile([1, 1, num_anchors]),
-            is_in_topk, paddle.zeros_like(is_in_topk))
+        is_in_topk = paddle.where(iou_candidates > iou_threshold, is_in_topk,
+                                  paddle.zeros_like(is_in_topk))
 
         # 6. check the positive sample's center in gt, [B, n, L]
         is_in_gts = check_points_inside_bboxes(anchor_centers, gt_bboxes)
@@ -180,9 +178,6 @@ class ATSSAssigner(nn.Layer):
                                          mask_positive)
             mask_positive_sum = mask_positive.sum(axis=-2)
         assigned_gt_index = mask_positive.argmax(axis=-2)
-        assert mask_positive_sum.max() == 1, \
-            ("one anchor just assign one gt, but received not equals 1. "
-             "Received: %f" % mask_positive_sum.max().item())
 
         # assigned target
         batch_ind = paddle.arange(
@@ -199,10 +194,19 @@ class ATSSAssigner(nn.Layer):
             gt_bboxes.reshape([-1, 4]), assigned_gt_index.flatten(), axis=0)
         assigned_bboxes = assigned_bboxes.reshape([batch_size, num_anchors, 4])
 
-        assigned_scores = F.one_hot(assigned_labels, self.num_classes)
-        if gt_scores is not None:
+        assigned_scores = F.one_hot(assigned_labels, self.num_classes + 1)
+        ind = list(range(self.num_classes + 1))
+        ind.remove(bg_index)
+        assigned_scores = paddle.index_select(
+            assigned_scores, paddle.to_tensor(ind), axis=-1)
+        if pred_bboxes is not None:
+            # assigned iou
+            ious = batch_iou_similarity(gt_bboxes, pred_bboxes) * mask_positive
+            ious = ious.max(axis=-2).unsqueeze(-1)
+            assigned_scores *= ious
+        elif gt_scores is not None:
             gather_scores = paddle.gather(
-                pad_gt_scores.flatten(), assigned_gt_index.flatten(), axis=0)
+                gt_scores.flatten(), assigned_gt_index.flatten(), axis=0)
             gather_scores = gather_scores.reshape([batch_size, num_anchors])
             gather_scores = paddle.where(mask_positive_sum > 0, gather_scores,
                                          paddle.zeros_like(gather_scores))

+ 54 - 0
paddlex/ppdet/modeling/assigners/max_iou_assigner.py

@@ -0,0 +1,54 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from paddlex.ppdet.core.workspace import register
+from paddlex.ppdet.modeling.proposal_generator.target import label_box
+
+__all__ = ['MaxIoUAssigner']
+
+
+@register
+class MaxIoUAssigner(object):
+    """a standard bbox assigner based on max IoU, use ppdet's label_box
+    as backend.
+    Args:
+        positive_overlap (float): threshold for defining positive samples
+        negative_overlap (float): threshold for denining negative samples
+        allow_low_quality (bool): whether to lower IoU thr if a GT poorly
+            overlaps with candidate bboxes
+    """
+
+    def __init__(self,
+                 positive_overlap,
+                 negative_overlap,
+                 allow_low_quality=True):
+        self.positive_overlap = positive_overlap
+        self.negative_overlap = negative_overlap
+        self.allow_low_quality = allow_low_quality
+
+    def __call__(self, bboxes, gt_bboxes):
+        matches, match_labels = label_box(
+            bboxes,
+            gt_bboxes,
+            positive_overlap=self.positive_overlap,
+            negative_overlap=self.negative_overlap,
+            allow_low_quality=self.allow_low_quality,
+            ignore_thresh=-1,
+            is_crowd=None,
+            assign_on_cpu=False)
+        return matches, match_labels

+ 19 - 17
paddlex/ppdet/modeling/assigners/task_aligned_assigner.py

@@ -21,10 +21,12 @@ import paddle.nn as nn
 import paddle.nn.functional as F
 
 from paddlex.ppdet.core.workspace import register
-from ..bbox_utils import iou_similarity
-from .utils import (pad_gt, gather_topk_anchors, check_points_inside_bboxes,
+from ..bbox_utils import batch_iou_similarity
+from .utils import (gather_topk_anchors, check_points_inside_bboxes,
                     compute_max_iou_anchor)
 
+__all__ = ['TaskAlignedAssigner']
+
 
 @register
 class TaskAlignedAssigner(nn.Layer):
@@ -43,8 +45,10 @@ class TaskAlignedAssigner(nn.Layer):
                 pred_scores,
                 pred_bboxes,
                 anchor_points,
+                num_anchors_list,
                 gt_labels,
                 gt_bboxes,
+                pad_gt_mask,
                 bg_index,
                 gt_scores=None):
         r"""This code is based on
@@ -61,20 +65,18 @@ class TaskAlignedAssigner(nn.Layer):
             pred_scores (Tensor, float32): predicted class probability, shape(B, L, C)
             pred_bboxes (Tensor, float32): predicted bounding boxes, shape(B, L, 4)
             anchor_points (Tensor, float32): pre-defined anchors, shape(L, 2), "cxcy" format
-            gt_labels (Tensor|List[Tensor], int64): Label of gt_bboxes, shape(B, n, 1)
-            gt_bboxes (Tensor|List[Tensor], float32): Ground truth bboxes, shape(B, n, 4)
+            num_anchors_list (List): num of anchors in each level, shape(L)
+            gt_labels (Tensor, int64|int32): Label of gt_bboxes, shape(B, n, 1)
+            gt_bboxes (Tensor, float32): Ground truth bboxes, shape(B, n, 4)
+            pad_gt_mask (Tensor, float32): 1 means bbox, 0 means no bbox, shape(B, n, 1)
             bg_index (int): background index
-            gt_scores (Tensor|List[Tensor]|None, float32) Score of gt_bboxes,
-                    shape(B, n, 1), if None, then it will initialize with one_hot label
+            gt_scores (Tensor|None, float32) Score of gt_bboxes, shape(B, n, 1)
         Returns:
             assigned_labels (Tensor): (B, L)
             assigned_bboxes (Tensor): (B, L, 4)
             assigned_scores (Tensor): (B, L, C)
         """
         assert pred_scores.ndim == pred_bboxes.ndim
-
-        gt_labels, gt_bboxes, pad_gt_scores, pad_gt_mask = pad_gt(
-            gt_labels, gt_bboxes, gt_scores)
         assert gt_labels.ndim == gt_bboxes.ndim and \
                gt_bboxes.ndim == 3
 
@@ -83,7 +85,8 @@ class TaskAlignedAssigner(nn.Layer):
 
         # negative batch
         if num_max_boxes == 0:
-            assigned_labels = paddle.full([batch_size, num_anchors], bg_index)
+            assigned_labels = paddle.full(
+                [batch_size, num_anchors], bg_index, dtype=gt_labels.dtype)
             assigned_bboxes = paddle.zeros([batch_size, num_anchors, 4])
             assigned_scores = paddle.zeros(
                 [batch_size, num_anchors, num_classes])
@@ -109,9 +112,7 @@ class TaskAlignedAssigner(nn.Layer):
         # select topk largest alignment metrics pred bbox as candidates
         # for each gt, [B, n, L]
         is_in_topk = gather_topk_anchors(
-            alignment_metrics * is_in_gts,
-            self.topk,
-            topk_mask=pad_gt_mask.tile([1, 1, self.topk]).astype(paddle.bool))
+            alignment_metrics * is_in_gts, self.topk, topk_mask=pad_gt_mask)
 
         # select positive sample, [B, n, L]
         mask_positive = is_in_topk * is_in_gts * pad_gt_mask
@@ -127,9 +128,6 @@ class TaskAlignedAssigner(nn.Layer):
                                          mask_positive)
             mask_positive_sum = mask_positive.sum(axis=-2)
         assigned_gt_index = mask_positive.argmax(axis=-2)
-        assert mask_positive_sum.max() == 1, \
-            ("one anchor just assign one gt, but received not equals 1. "
-             "Received: %f" % mask_positive_sum.max().item())
 
         # assigned target
         assigned_gt_index = assigned_gt_index + batch_ind * num_max_boxes
@@ -144,7 +142,11 @@ class TaskAlignedAssigner(nn.Layer):
             gt_bboxes.reshape([-1, 4]), assigned_gt_index.flatten(), axis=0)
         assigned_bboxes = assigned_bboxes.reshape([batch_size, num_anchors, 4])
 
-        assigned_scores = F.one_hot(assigned_labels, num_classes)
+        assigned_scores = F.one_hot(assigned_labels, num_classes + 1)
+        ind = list(range(num_classes + 1))
+        ind.remove(bg_index)
+        assigned_scores = paddle.index_select(
+            assigned_scores, paddle.to_tensor(ind), axis=-1)
         # rescale alignment metrics
         alignment_metrics *= mask_positive
         max_metrics_per_instance = alignment_metrics.max(axis=-1, keepdim=True)

+ 49 - 19
paddlex/ppdet/modeling/assigners/utils.py

@@ -88,7 +88,7 @@ def gather_topk_anchors(metrics, topk, largest=True, topk_mask=None, eps=1e-9):
         largest (bool) : largest is a flag, if set to true,
             algorithm will sort by descending order, otherwise sort by
             ascending order. Default: True
-        topk_mask (Tensor, bool|None): shape[B, n, topk], ignore bbox mask,
+        topk_mask (Tensor, float32): shape[B, n, 1], ignore bbox mask,
             Default: None
         eps (float): Default: 1e-9
     Returns:
@@ -98,21 +98,22 @@ def gather_topk_anchors(metrics, topk, largest=True, topk_mask=None, eps=1e-9):
     topk_metrics, topk_idxs = paddle.topk(
         metrics, topk, axis=-1, largest=largest)
     if topk_mask is None:
-        topk_mask = (topk_metrics.max(axis=-1, keepdim=True) > eps).tile(
-            [1, 1, topk])
-    topk_idxs = paddle.where(topk_mask, topk_idxs,
-                             paddle.zeros_like(topk_idxs))
-    is_in_topk = F.one_hot(topk_idxs, num_anchors).sum(axis=-2)
-    is_in_topk = paddle.where(is_in_topk > 1,
-                              paddle.zeros_like(is_in_topk), is_in_topk)
-    return is_in_topk.astype(metrics.dtype)
+        topk_mask = (topk_metrics.max(axis=-1, keepdim=True) > eps
+                     ).astype(metrics.dtype)
+    is_in_topk = F.one_hot(topk_idxs, num_anchors).sum(
+        axis=-2).astype(metrics.dtype)
+    return is_in_topk * topk_mask
 
 
-def check_points_inside_bboxes(points, bboxes, eps=1e-9):
+def check_points_inside_bboxes(points,
+                               bboxes,
+                               center_radius_tensor=None,
+                               eps=1e-9):
     r"""
     Args:
         points (Tensor, float32): shape[L, 2], "xy" format, L: num_anchors
         bboxes (Tensor, float32): shape[B, n, 4], "xmin, ymin, xmax, ymax" format
+        center_radius_tensor (Tensor, float32): shape [L, 1]. Default: None.
         eps (float): Default: 1e-9
     Returns:
         is_in_bboxes (Tensor, float32): shape[B, n, L], value=1. means selected
@@ -120,12 +121,28 @@ def check_points_inside_bboxes(points, bboxes, eps=1e-9):
     points = points.unsqueeze([0, 1])
     x, y = points.chunk(2, axis=-1)
     xmin, ymin, xmax, ymax = bboxes.unsqueeze(2).chunk(4, axis=-1)
+    # check whether `points` is in `bboxes`
     l = x - xmin
     t = y - ymin
     r = xmax - x
     b = ymax - y
-    bbox_ltrb = paddle.concat([l, t, r, b], axis=-1)
-    return (bbox_ltrb.min(axis=-1) > eps).astype(bboxes.dtype)
+    delta_ltrb = paddle.concat([l, t, r, b], axis=-1)
+    is_in_bboxes = (delta_ltrb.min(axis=-1) > eps)
+    if center_radius_tensor is not None:
+        # check whether `points` is in `center_radius`
+        center_radius_tensor = center_radius_tensor.unsqueeze([0, 1])
+        cx = (xmin + xmax) * 0.5
+        cy = (ymin + ymax) * 0.5
+        l = x - (cx - center_radius_tensor)
+        t = y - (cy - center_radius_tensor)
+        r = (cx + center_radius_tensor) - x
+        b = (cy + center_radius_tensor) - y
+        delta_ltrb_c = paddle.concat([l, t, r, b], axis=-1)
+        is_in_center = (delta_ltrb_c.min(axis=-1) > eps)
+        return (paddle.logical_and(is_in_bboxes, is_in_center),
+                paddle.logical_or(is_in_bboxes, is_in_center))
+
+    return is_in_bboxes.astype(bboxes.dtype)
 
 
 def compute_max_iou_anchor(ious):
@@ -168,14 +185,16 @@ def generate_anchors_for_grid_cell(feats,
         grid_cell_size (float): anchor size
         grid_cell_offset (float): The range is between 0 and 1.
     Returns:
-        anchors (List[Tensor]): shape[s, (l, 4)]
-        num_anchors_list (List[int]): shape[s]
-        stride_tensor_list (List[Tensor]): shape[s, (l, 1)]
+        anchors (Tensor): shape[l, 4], "xmin, ymin, xmax, ymax" format.
+        anchor_points (Tensor): shape[l, 2], "x, y" format.
+        num_anchors_list (List[int]): shape[s], contains [s_1, s_2, ...].
+        stride_tensor (Tensor): shape[l, 1], contains the stride for each scale.
     """
     assert len(feats) == len(fpn_strides)
     anchors = []
+    anchor_points = []
     num_anchors_list = []
-    stride_tensor_list = []
+    stride_tensor = []
     for feat, stride in zip(feats, fpn_strides):
         _, _, h, w = feat.shape
         cell_half_size = grid_cell_size * stride * 0.5
@@ -188,8 +207,19 @@ def generate_anchors_for_grid_cell(feats,
                 shift_x + cell_half_size, shift_y + cell_half_size
             ],
             axis=-1).astype(feat.dtype)
+        anchor_point = paddle.stack(
+            [shift_x, shift_y], axis=-1).astype(feat.dtype)
+
         anchors.append(anchor.reshape([-1, 4]))
+        anchor_points.append(anchor_point.reshape([-1, 2]))
         num_anchors_list.append(len(anchors[-1]))
-        stride_tensor_list.append(
-            paddle.full([num_anchors_list[-1], 1], stride))
-    return anchors, num_anchors_list, stride_tensor_list
+        stride_tensor.append(
+            paddle.full(
+                [num_anchors_list[-1], 1], stride, dtype=feat.dtype))
+    anchors = paddle.concat(anchors)
+    anchors.stop_gradient = True
+    anchor_points = paddle.concat(anchor_points)
+    anchor_points.stop_gradient = True
+    stride_tensor = paddle.concat(stride_tensor)
+    stride_tensor.stop_gradient = True
+    return anchors, anchor_points, num_anchors_list, stride_tensor

+ 6 - 2
paddlex/ppdet/modeling/backbones/__init__.py

@@ -1,10 +1,10 @@
-# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 # You may obtain a copy of the License at
 #
-#    http://www.apache.org/licenses/LICENSE-2.0
+#     http://www.apache.org/licenses/LICENSE-2.0
 #
 # Unless required by applicable law or agreed to in writing, software
 # distributed under the License is distributed on an "AS IS" BASIS,
@@ -29,6 +29,8 @@ from . import swin_transformer
 from . import lcnet
 from . import hardnet
 from . import esnet
+from . import cspresnet
+from . import csp_darknet
 
 from .vgg import *
 from .resnet import *
@@ -47,3 +49,5 @@ from .swin_transformer import *
 from .lcnet import *
 from .hardnet import *
 from .esnet import *
+from .cspresnet import *
+from .csp_darknet import *

+ 439 - 0
paddlex/ppdet/modeling/backbones/csp_darknet.py

@@ -0,0 +1,439 @@
+# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import paddle
+import paddle.nn as nn
+import paddle.nn.functional as F
+from paddle import ParamAttr
+from paddle.regularizer import L2Decay
+from paddlex.ppdet.core.workspace import register, serializable
+from paddlex.ppdet.modeling.initializer import conv_init_
+from ..shape_spec import ShapeSpec
+
+__all__ = [
+    'CSPDarkNet', 'BaseConv', 'DWConv', 'BottleNeck', 'SPPLayer', 'SPPFLayer'
+]
+
+
+class BaseConv(nn.Layer):
+    def __init__(self,
+                 in_channels,
+                 out_channels,
+                 ksize,
+                 stride,
+                 groups=1,
+                 bias=False,
+                 act="silu"):
+        super(BaseConv, self).__init__()
+        self.conv = nn.Conv2D(
+            in_channels,
+            out_channels,
+            kernel_size=ksize,
+            stride=stride,
+            padding=(ksize - 1) // 2,
+            groups=groups,
+            bias_attr=bias)
+        self.bn = nn.BatchNorm2D(
+            out_channels,
+            weight_attr=ParamAttr(regularizer=L2Decay(0.0)),
+            bias_attr=ParamAttr(regularizer=L2Decay(0.0)))
+
+        self._init_weights()
+
+    def _init_weights(self):
+        conv_init_(self.conv)
+
+    def forward(self, x):
+        # use 'x * F.sigmoid(x)' replace 'silu'
+        x = self.bn(self.conv(x))
+        y = x * F.sigmoid(x)
+        return y
+
+
+class DWConv(nn.Layer):
+    """Depthwise Conv"""
+
+    def __init__(self,
+                 in_channels,
+                 out_channels,
+                 ksize,
+                 stride=1,
+                 bias=False,
+                 act="silu"):
+        super(DWConv, self).__init__()
+        self.dw_conv = BaseConv(
+            in_channels,
+            in_channels,
+            ksize=ksize,
+            stride=stride,
+            groups=in_channels,
+            bias=bias,
+            act=act)
+        self.pw_conv = BaseConv(
+            in_channels,
+            out_channels,
+            ksize=1,
+            stride=1,
+            groups=1,
+            bias=bias,
+            act=act)
+
+    def forward(self, x):
+        return self.pw_conv(self.dw_conv(x))
+
+
+class Focus(nn.Layer):
+    """Focus width and height information into channel space, used in YOLOX."""
+
+    def __init__(self,
+                 in_channels,
+                 out_channels,
+                 ksize=3,
+                 stride=1,
+                 bias=False,
+                 act="silu"):
+        super(Focus, self).__init__()
+        self.conv = BaseConv(
+            in_channels * 4,
+            out_channels,
+            ksize=ksize,
+            stride=stride,
+            bias=bias,
+            act=act)
+
+    def forward(self, inputs):
+        # inputs [bs, C, H, W] -> outputs [bs, 4C, W/2, H/2]
+        top_left = inputs[:, :, 0::2, 0::2]
+        top_right = inputs[:, :, 0::2, 1::2]
+        bottom_left = inputs[:, :, 1::2, 0::2]
+        bottom_right = inputs[:, :, 1::2, 1::2]
+        outputs = paddle.concat(
+            [top_left, bottom_left, top_right, bottom_right], 1)
+        return self.conv(outputs)
+
+
+class BottleNeck(nn.Layer):
+    def __init__(self,
+                 in_channels,
+                 out_channels,
+                 shortcut=True,
+                 expansion=0.5,
+                 depthwise=False,
+                 bias=False,
+                 act="silu"):
+        super(BottleNeck, self).__init__()
+        hidden_channels = int(out_channels * expansion)
+        Conv = DWConv if depthwise else BaseConv
+        self.conv1 = BaseConv(
+            in_channels,
+            hidden_channels,
+            ksize=1,
+            stride=1,
+            bias=bias,
+            act=act)
+        self.conv2 = Conv(
+            hidden_channels,
+            out_channels,
+            ksize=3,
+            stride=1,
+            bias=bias,
+            act=act)
+        self.add_shortcut = shortcut and in_channels == out_channels
+
+    def forward(self, x):
+        y = self.conv2(self.conv1(x))
+        if self.add_shortcut:
+            y = y + x
+        return y
+
+
+class SPPLayer(nn.Layer):
+    """Spatial Pyramid Pooling (SPP) layer used in YOLOv3-SPP and YOLOX"""
+
+    def __init__(self,
+                 in_channels,
+                 out_channels,
+                 kernel_sizes=(5, 9, 13),
+                 bias=False,
+                 act="silu"):
+        super(SPPLayer, self).__init__()
+        hidden_channels = in_channels // 2
+        self.conv1 = BaseConv(
+            in_channels,
+            hidden_channels,
+            ksize=1,
+            stride=1,
+            bias=bias,
+            act=act)
+        self.maxpoolings = nn.LayerList([
+            nn.MaxPool2D(
+                kernel_size=ks, stride=1, padding=ks // 2)
+            for ks in kernel_sizes
+        ])
+        conv2_channels = hidden_channels * (len(kernel_sizes) + 1)
+        self.conv2 = BaseConv(
+            conv2_channels,
+            out_channels,
+            ksize=1,
+            stride=1,
+            bias=bias,
+            act=act)
+
+    def forward(self, x):
+        x = self.conv1(x)
+        x = paddle.concat([x] + [mp(x) for mp in self.maxpoolings], axis=1)
+        x = self.conv2(x)
+        return x
+
+
+class SPPFLayer(nn.Layer):
+    """ Spatial Pyramid Pooling - Fast (SPPF) layer used in YOLOv5 by Glenn Jocher,
+        equivalent to SPP(k=(5, 9, 13))
+    """
+
+    def __init__(self,
+                 in_channels,
+                 out_channels,
+                 ksize=5,
+                 bias=False,
+                 act='silu'):
+        super(SPPFLayer, self).__init__()
+        hidden_channels = in_channels // 2
+        self.conv1 = BaseConv(
+            in_channels,
+            hidden_channels,
+            ksize=1,
+            stride=1,
+            bias=bias,
+            act=act)
+        self.maxpooling = nn.MaxPool2D(
+            kernel_size=ksize, stride=1, padding=ksize // 2)
+        conv2_channels = hidden_channels * 4
+        self.conv2 = BaseConv(
+            conv2_channels,
+            out_channels,
+            ksize=1,
+            stride=1,
+            bias=bias,
+            act=act)
+
+    def forward(self, x):
+        x = self.conv1(x)
+        y1 = self.maxpooling(x)
+        y2 = self.maxpooling(y1)
+        y3 = self.maxpooling(y2)
+        concats = paddle.concat([x, y1, y2, y3], axis=1)
+        out = self.conv2(concats)
+        return out
+
+
+class CSPLayer(nn.Layer):
+    """CSP (Cross Stage Partial) layer with 3 convs, named C3 in YOLOv5"""
+
+    def __init__(self,
+                 in_channels,
+                 out_channels,
+                 num_blocks=1,
+                 shortcut=True,
+                 expansion=0.5,
+                 depthwise=False,
+                 bias=False,
+                 act="silu"):
+        super(CSPLayer, self).__init__()
+        hidden_channels = int(out_channels * expansion)
+        self.conv1 = BaseConv(
+            in_channels,
+            hidden_channels,
+            ksize=1,
+            stride=1,
+            bias=bias,
+            act=act)
+        self.conv2 = BaseConv(
+            in_channels,
+            hidden_channels,
+            ksize=1,
+            stride=1,
+            bias=bias,
+            act=act)
+        self.bottlenecks = nn.Sequential(* [
+            BottleNeck(
+                hidden_channels,
+                hidden_channels,
+                shortcut=shortcut,
+                expansion=1.0,
+                depthwise=depthwise,
+                bias=bias,
+                act=act) for _ in range(num_blocks)
+        ])
+        self.conv3 = BaseConv(
+            hidden_channels * 2,
+            out_channels,
+            ksize=1,
+            stride=1,
+            bias=bias,
+            act=act)
+
+    def forward(self, x):
+        x_1 = self.conv1(x)
+        x_1 = self.bottlenecks(x_1)
+        x_2 = self.conv2(x)
+        x = paddle.concat([x_1, x_2], axis=1)
+        x = self.conv3(x)
+        return x
+
+
+@register
+@serializable
+class CSPDarkNet(nn.Layer):
+    """
+    CSPDarkNet backbone.
+    Args:
+        arch (str): Architecture of CSPDarkNet, from {P5, P6, X}, default as X,
+            and 'X' means used in YOLOX, 'P5/P6' means used in YOLOv5.
+        depth_mult (float): Depth multiplier, multiply number of channels in
+            each layer, default as 1.0.
+        width_mult (float): Width multiplier, multiply number of blocks in
+            CSPLayer, default as 1.0.
+        depthwise (bool): Whether to use depth-wise conv layer.
+        act (str): Activation function type, default as 'silu'.
+        return_idx (list): Index of stages whose feature maps are returned.
+    """
+
+    __shared__ = ['depth_mult', 'width_mult', 'act', 'trt']
+
+    # in_channels, out_channels, num_blocks, add_shortcut, use_spp(use_sppf)
+    # 'X' means setting used in YOLOX, 'P5/P6' means setting used in YOLOv5.
+    arch_settings = {
+        'X': [[64, 128, 3, True, False], [128, 256, 9, True, False],
+              [256, 512, 9, True, False], [512, 1024, 3, False, True]],
+        'P5': [[64, 128, 3, True, False], [128, 256, 6, True, False],
+               [256, 512, 9, True, False], [512, 1024, 3, True, True]],
+        'P6': [[64, 128, 3, True, False], [128, 256, 6, True, False],
+               [256, 512, 9, True, False], [512, 768, 3, True, False],
+               [768, 1024, 3, True, True]],
+    }
+
+    def __init__(self,
+                 arch='X',
+                 depth_mult=1.0,
+                 width_mult=1.0,
+                 depthwise=False,
+                 act='silu',
+                 trt=False,
+                 return_idx=[2, 3, 4]):
+        super(CSPDarkNet, self).__init__()
+        self.arch = arch
+        self.return_idx = return_idx
+        Conv = DWConv if depthwise else BaseConv
+        arch_setting = self.arch_settings[arch]
+        base_channels = int(arch_setting[0][0] * width_mult)
+
+        # Note: differences between the latest YOLOv5 and the original YOLOX
+        # 1. self.stem, use SPPF(in YOLOv5) or SPP(in YOLOX)
+        # 2. use SPPF(in YOLOv5) or SPP(in YOLOX)
+        # 3. put SPPF before(YOLOv5) or SPP after(YOLOX) the last cspdark block's CSPLayer
+        # 4. whether SPPF(SPP)'CSPLayer add shortcut, True in YOLOv5, False in YOLOX
+        if arch in ['P5', 'P6']:
+            # in the latest YOLOv5, use Conv stem, and SPPF (fast, only single spp kernal size)
+            self.stem = Conv(
+                3, base_channels, ksize=6, stride=2, bias=False, act=act)
+            spp_kernal_sizes = 5
+        elif arch in ['X']:
+            # in the original YOLOX, use Focus stem, and SPP (three spp kernal sizes)
+            self.stem = Focus(
+                3, base_channels, ksize=3, stride=1, bias=False, act=act)
+            spp_kernal_sizes = (5, 9, 13)
+        else:
+            raise AttributeError("Unsupported arch type: {}".format(arch))
+
+        _out_channels = [base_channels]
+        layers_num = 1
+        self.csp_dark_blocks = []
+
+        for i, (in_channels, out_channels, num_blocks, shortcut,
+                use_spp) in enumerate(arch_setting):
+            in_channels = int(in_channels * width_mult)
+            out_channels = int(out_channels * width_mult)
+            _out_channels.append(out_channels)
+            num_blocks = max(round(num_blocks * depth_mult), 1)
+            stage = []
+
+            conv_layer = self.add_sublayer(
+                'layers{}.stage{}.conv_layer'.format(layers_num, i + 1),
+                Conv(
+                    in_channels, out_channels, 3, 2, bias=False, act=act))
+            stage.append(conv_layer)
+            layers_num += 1
+
+            if use_spp and arch in ['X']:
+                # in YOLOX use SPPLayer
+                spp_layer = self.add_sublayer(
+                    'layers{}.stage{}.spp_layer'.format(layers_num, i + 1),
+                    SPPLayer(
+                        out_channels,
+                        out_channels,
+                        kernel_sizes=spp_kernal_sizes,
+                        bias=False,
+                        act=act))
+                stage.append(spp_layer)
+                layers_num += 1
+
+            csp_layer = self.add_sublayer(
+                'layers{}.stage{}.csp_layer'.format(layers_num, i + 1),
+                CSPLayer(
+                    out_channels,
+                    out_channels,
+                    num_blocks=num_blocks,
+                    shortcut=shortcut,
+                    depthwise=depthwise,
+                    bias=False,
+                    act=act))
+            stage.append(csp_layer)
+            layers_num += 1
+
+            if use_spp and arch in ['P5', 'P6']:
+                # in latest YOLOv5 use SPPFLayer instead of SPPLayer
+                sppf_layer = self.add_sublayer(
+                    'layers{}.stage{}.sppf_layer'.format(layers_num, i + 1),
+                    SPPFLayer(
+                        out_channels,
+                        out_channels,
+                        ksize=5,
+                        bias=False,
+                        act=act))
+                stage.append(sppf_layer)
+                layers_num += 1
+
+            self.csp_dark_blocks.append(nn.Sequential(*stage))
+
+        self._out_channels = [_out_channels[i] for i in self.return_idx]
+        self.strides = [[2, 4, 8, 16, 32, 64][i] for i in self.return_idx]
+
+    def forward(self, inputs):
+        x = inputs['image']
+        outputs = []
+        x = self.stem(x)
+        for i, layer in enumerate(self.csp_dark_blocks):
+            x = layer(x)
+            if i + 1 in self.return_idx:
+                outputs.append(x)
+        return outputs
+
+    @property
+    def out_shape(self):
+        return [
+            ShapeSpec(
+                channels=c, stride=s)
+            for c, s in zip(self._out_channels, self.strides)
+        ]

+ 283 - 0
paddlex/ppdet/modeling/backbones/cspresnet.py

@@ -0,0 +1,283 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import paddle
+import paddle.nn as nn
+import paddle.nn.functional as F
+from paddle import ParamAttr
+from paddle.regularizer import L2Decay
+
+from paddlex.ppdet.modeling.ops import get_act_fn
+from paddlex.ppdet.core.workspace import register, serializable
+from ..shape_spec import ShapeSpec
+
+__all__ = ['CSPResNet', 'BasicBlock', 'EffectiveSELayer', 'ConvBNLayer']
+
+
+class ConvBNLayer(nn.Layer):
+    def __init__(self,
+                 ch_in,
+                 ch_out,
+                 filter_size=3,
+                 stride=1,
+                 groups=1,
+                 padding=0,
+                 act=None):
+        super(ConvBNLayer, self).__init__()
+
+        self.conv = nn.Conv2D(
+            in_channels=ch_in,
+            out_channels=ch_out,
+            kernel_size=filter_size,
+            stride=stride,
+            padding=padding,
+            groups=groups,
+            bias_attr=False)
+
+        self.bn = nn.BatchNorm2D(
+            ch_out,
+            weight_attr=ParamAttr(regularizer=L2Decay(0.0)),
+            bias_attr=ParamAttr(regularizer=L2Decay(0.0)))
+        self.act = get_act_fn(act) if act is None or isinstance(act, (
+            str, dict)) else act
+
+    def forward(self, x):
+        x = self.conv(x)
+        x = self.bn(x)
+        x = self.act(x)
+
+        return x
+
+
+class RepVggBlock(nn.Layer):
+    def __init__(self, ch_in, ch_out, act='relu'):
+        super(RepVggBlock, self).__init__()
+        self.ch_in = ch_in
+        self.ch_out = ch_out
+        self.conv1 = ConvBNLayer(
+            ch_in, ch_out, 3, stride=1, padding=1, act=None)
+        self.conv2 = ConvBNLayer(
+            ch_in, ch_out, 1, stride=1, padding=0, act=None)
+        self.act = get_act_fn(act) if act is None or isinstance(act, (
+            str, dict)) else act
+
+    def forward(self, x):
+        if hasattr(self, 'conv'):
+            y = self.conv(x)
+        else:
+            y = self.conv1(x) + self.conv2(x)
+        y = self.act(y)
+        return y
+
+    def convert_to_deploy(self):
+        if not hasattr(self, 'conv'):
+            self.conv = nn.Conv2D(
+                in_channels=self.ch_in,
+                out_channels=self.ch_out,
+                kernel_size=3,
+                stride=1,
+                padding=1,
+                groups=1)
+        kernel, bias = self.get_equivalent_kernel_bias()
+        self.conv.weight.set_value(kernel)
+        self.conv.bias.set_value(bias)
+        self.__delattr__('conv1')
+        self.__delattr__('conv2')
+
+    def get_equivalent_kernel_bias(self):
+        kernel3x3, bias3x3 = self._fuse_bn_tensor(self.conv1)
+        kernel1x1, bias1x1 = self._fuse_bn_tensor(self.conv2)
+        return kernel3x3 + self._pad_1x1_to_3x3_tensor(
+            kernel1x1), bias3x3 + bias1x1
+
+    def _pad_1x1_to_3x3_tensor(self, kernel1x1):
+        if kernel1x1 is None:
+            return 0
+        else:
+            return nn.functional.pad(kernel1x1, [1, 1, 1, 1])
+
+    def _fuse_bn_tensor(self, branch):
+        if branch is None:
+            return 0, 0
+        kernel = branch.conv.weight
+        running_mean = branch.bn._mean
+        running_var = branch.bn._variance
+        gamma = branch.bn.weight
+        beta = branch.bn.bias
+        eps = branch.bn._epsilon
+        std = (running_var + eps).sqrt()
+        t = (gamma / std).reshape((-1, 1, 1, 1))
+        return kernel * t, beta - running_mean * gamma / std
+
+
+class BasicBlock(nn.Layer):
+    def __init__(self, ch_in, ch_out, act='relu', shortcut=True):
+        super(BasicBlock, self).__init__()
+        assert ch_in == ch_out
+        self.conv1 = ConvBNLayer(
+            ch_in, ch_out, 3, stride=1, padding=1, act=act)
+        self.conv2 = RepVggBlock(ch_out, ch_out, act=act)
+        self.shortcut = shortcut
+
+    def forward(self, x):
+        y = self.conv1(x)
+        y = self.conv2(y)
+        if self.shortcut:
+            return paddle.add(x, y)
+        else:
+            return y
+
+
+class EffectiveSELayer(nn.Layer):
+    """ Effective Squeeze-Excitation
+    From `CenterMask : Real-Time Anchor-Free Instance Segmentation` - https://arxiv.org/abs/1911.06667
+    """
+
+    def __init__(self, channels, act='hardsigmoid'):
+        super(EffectiveSELayer, self).__init__()
+        self.fc = nn.Conv2D(channels, channels, kernel_size=1, padding=0)
+        self.act = get_act_fn(act) if act is None or isinstance(act, (
+            str, dict)) else act
+
+    def forward(self, x):
+        x_se = x.mean((2, 3), keepdim=True)
+        x_se = self.fc(x_se)
+        return x * self.act(x_se)
+
+
+class CSPResStage(nn.Layer):
+    def __init__(self,
+                 block_fn,
+                 ch_in,
+                 ch_out,
+                 n,
+                 stride,
+                 act='relu',
+                 attn='eca'):
+        super(CSPResStage, self).__init__()
+
+        ch_mid = (ch_in + ch_out) // 2
+        if stride == 2:
+            self.conv_down = ConvBNLayer(
+                ch_in, ch_mid, 3, stride=2, padding=1, act=act)
+        else:
+            self.conv_down = None
+        self.conv1 = ConvBNLayer(ch_mid, ch_mid // 2, 1, act=act)
+        self.conv2 = ConvBNLayer(ch_mid, ch_mid // 2, 1, act=act)
+        self.blocks = nn.Sequential(* [
+            block_fn(
+                ch_mid // 2, ch_mid // 2, act=act, shortcut=True)
+            for i in range(n)
+        ])
+        if attn:
+            self.attn = EffectiveSELayer(ch_mid, act='hardsigmoid')
+        else:
+            self.attn = None
+
+        self.conv3 = ConvBNLayer(ch_mid, ch_out, 1, act=act)
+
+    def forward(self, x):
+        if self.conv_down is not None:
+            x = self.conv_down(x)
+        y1 = self.conv1(x)
+        y2 = self.blocks(self.conv2(x))
+        y = paddle.concat([y1, y2], axis=1)
+        if self.attn is not None:
+            y = self.attn(y)
+        y = self.conv3(y)
+        return y
+
+
+@register
+@serializable
+class CSPResNet(nn.Layer):
+    __shared__ = ['width_mult', 'depth_mult', 'trt']
+
+    def __init__(self,
+                 layers=[3, 6, 6, 3],
+                 channels=[64, 128, 256, 512, 1024],
+                 act='swish',
+                 return_idx=[0, 1, 2, 3, 4],
+                 depth_wise=False,
+                 use_large_stem=False,
+                 width_mult=1.0,
+                 depth_mult=1.0,
+                 trt=False):
+        super(CSPResNet, self).__init__()
+        channels = [max(round(c * width_mult), 1) for c in channels]
+        layers = [max(round(l * depth_mult), 1) for l in layers]
+        act = get_act_fn(
+            act, trt=trt) if act is None or isinstance(act,
+                                                       (str, dict)) else act
+
+        if use_large_stem:
+            self.stem = nn.Sequential(
+                ('conv1', ConvBNLayer(
+                    3, channels[0] // 2, 3, stride=2, padding=1, act=act)),
+                ('conv2', ConvBNLayer(
+                    channels[0] // 2,
+                    channels[0] // 2,
+                    3,
+                    stride=1,
+                    padding=1,
+                    act=act)), ('conv3', ConvBNLayer(
+                        channels[0] // 2,
+                        channels[0],
+                        3,
+                        stride=1,
+                        padding=1,
+                        act=act)))
+        else:
+            self.stem = nn.Sequential(
+                ('conv1', ConvBNLayer(
+                    3, channels[0] // 2, 3, stride=2, padding=1, act=act)),
+                ('conv2', ConvBNLayer(
+                    channels[0] // 2,
+                    channels[0],
+                    3,
+                    stride=1,
+                    padding=1,
+                    act=act)))
+
+        n = len(channels) - 1
+        self.stages = nn.Sequential(* [(str(i), CSPResStage(
+            BasicBlock, channels[i], channels[i + 1], layers[i], 2, act=act))
+                                       for i in range(n)])
+
+        self._out_channels = channels[1:]
+        self._out_strides = [4, 8, 16, 32]
+        self.return_idx = return_idx
+
+    def forward(self, inputs):
+        x = inputs['image']
+        x = self.stem(x)
+        outs = []
+        for idx, stage in enumerate(self.stages):
+            x = stage(x)
+            if idx in self.return_idx:
+                outs.append(x)
+
+        return outs
+
+    @property
+    def out_shape(self):
+        return [
+            ShapeSpec(
+                channels=self._out_channels[i], stride=self._out_strides[i])
+            for i in self.return_idx
+        ]

+ 15 - 10
paddlex/ppdet/modeling/backbones/darknet.py

@@ -77,8 +77,8 @@ class ConvBNLayer(nn.Layer):
         out = self.batch_norm(out)
         if self.act == 'leaky':
             out = F.leaky_relu(out, 0.1)
-        elif self.act == 'mish':
-            out = mish(out)
+        else:
+            out = getattr(F, self.act)(out)
         return out
 
 
@@ -149,9 +149,14 @@ class BasicBlock(nn.Layer):
 
         super(BasicBlock, self).__init__()
 
+        assert ch_in == ch_out and (ch_in % 2) == 0, \
+            f"ch_in and ch_out should be the same even int, but the input \'ch_in is {ch_in}, \'ch_out is {ch_out}"
+        # example:
+        # --------------{conv1} --> {conv2}
+        # channel route: 10-->5 --> 5-->10
         self.conv1 = ConvBNLayer(
             ch_in=ch_in,
-            ch_out=ch_out,
+            ch_out=int(ch_out / 2),
             filter_size=1,
             stride=1,
             padding=0,
@@ -160,8 +165,8 @@ class BasicBlock(nn.Layer):
             freeze_norm=freeze_norm,
             data_format=data_format)
         self.conv2 = ConvBNLayer(
-            ch_in=ch_out,
-            ch_out=ch_out * 2,
+            ch_in=int(ch_out / 2),
+            ch_out=ch_out,
             filter_size=3,
             stride=1,
             padding=1,
@@ -215,7 +220,7 @@ class Blocks(nn.Layer):
             res_out = self.add_sublayer(
                 block_name,
                 BasicBlock(
-                    ch_out * 2,
+                    ch_out,
                     ch_out,
                     norm_type=norm_type,
                     norm_decay=norm_decay,
@@ -296,7 +301,7 @@ class DarkNet(nn.Layer):
                 name,
                 Blocks(
                     int(ch_in[i]),
-                    32 * (2**i),
+                    int(ch_in[i]),
                     stage,
                     norm_type=norm_type,
                     norm_decay=norm_decay,
@@ -305,14 +310,14 @@ class DarkNet(nn.Layer):
                     name=name))
             self.darknet_conv_block_list.append(conv_block)
             if i in return_idx:
-                self._out_channels.append(64 * (2**i))
+                self._out_channels.append(int(ch_in[i]))
         for i in range(num_stages - 1):
             down_name = 'stage.{}.downsample'.format(i)
             downsample = self.add_sublayer(
                 down_name,
                 DownSample(
-                    ch_in=32 * (2**(i + 1)),
-                    ch_out=32 * (2**(i + 2)),
+                    ch_in=int(ch_in[i]),
+                    ch_out=int(ch_in[i + 1]),
                     norm_type=norm_type,
                     norm_decay=norm_decay,
                     freeze_norm=freeze_norm,

+ 1 - 1
paddlex/ppdet/modeling/backbones/esnet.py

@@ -20,7 +20,7 @@ import paddle
 import paddle.nn as nn
 import paddle.nn.functional as F
 from paddle import ParamAttr
-from paddle.nn import Conv2D, MaxPool2D, AdaptiveAvgPool2D
+from paddle.nn import Conv2D, MaxPool2D, AdaptiveAvgPool2D, BatchNorm
 from paddle.nn.initializer import KaimingNormal
 from paddle.regularizer import L2Decay
 

+ 5 - 5
paddlex/ppdet/modeling/backbones/lcnet.py

@@ -171,7 +171,7 @@ class LCNet(nn.Layer):
             num_filters=make_divisible(16 * scale),
             stride=2)
 
-        self.blocks2 = nn.Sequential(*[
+        self.blocks2 = nn.Sequential(* [
             DepthwiseSeparable(
                 num_channels=make_divisible(in_c * scale),
                 num_filters=make_divisible(out_c * scale),
@@ -181,7 +181,7 @@ class LCNet(nn.Layer):
             for i, (k, in_c, out_c, s, se) in enumerate(NET_CONFIG["blocks2"])
         ])
 
-        self.blocks3 = nn.Sequential(*[
+        self.blocks3 = nn.Sequential(* [
             DepthwiseSeparable(
                 num_channels=make_divisible(in_c * scale),
                 num_filters=make_divisible(out_c * scale),
@@ -194,7 +194,7 @@ class LCNet(nn.Layer):
         out_channels.append(
             make_divisible(NET_CONFIG["blocks3"][-1][2] * scale))
 
-        self.blocks4 = nn.Sequential(*[
+        self.blocks4 = nn.Sequential(* [
             DepthwiseSeparable(
                 num_channels=make_divisible(in_c * scale),
                 num_filters=make_divisible(out_c * scale),
@@ -207,7 +207,7 @@ class LCNet(nn.Layer):
         out_channels.append(
             make_divisible(NET_CONFIG["blocks4"][-1][2] * scale))
 
-        self.blocks5 = nn.Sequential(*[
+        self.blocks5 = nn.Sequential(* [
             DepthwiseSeparable(
                 num_channels=make_divisible(in_c * scale),
                 num_filters=make_divisible(out_c * scale),
@@ -220,7 +220,7 @@ class LCNet(nn.Layer):
         out_channels.append(
             make_divisible(NET_CONFIG["blocks5"][-1][2] * scale))
 
-        self.blocks6 = nn.Sequential(*[
+        self.blocks6 = nn.Sequential(* [
             DepthwiseSeparable(
                 num_channels=make_divisible(in_c * scale),
                 num_filters=make_divisible(out_c * scale),

+ 2 - 2
paddlex/ppdet/modeling/backbones/mobilenet_v3.py

@@ -321,8 +321,8 @@ class MobileNetV3(nn.Layer):
                 [3, 184, 80, False, "hard_swish", 1],
                 [3, 480, 112, True, "hard_swish", 1],
                 [3, 672, 112, True, "hard_swish", 1],  # YOLOv3 output
-                [5, 672, 160, True, "hard_swish",
-                 2],  # SSD/SSDLite/RCNN output
+                [5, 672, 160, True, "hard_swish", 2
+                 ],  # SSD/SSDLite/RCNN output
                 [5, 960, 160, True, "hard_swish", 1],
                 [5, 960, 160, True, "hard_swish", 1],  # YOLOv3 output
             ]

+ 4 - 5
paddlex/ppdet/modeling/backbones/swin_transformer.py

@@ -482,8 +482,7 @@ class BasicLayer(nn.Layer):
         # calculate attention mask for SW-MSA
         Hp = int(np.ceil(H / self.window_size)) * self.window_size
         Wp = int(np.ceil(W / self.window_size)) * self.window_size
-        img_mask = paddle.fluid.layers.zeros(
-            [1, Hp, Wp, 1], dtype='float32')  # 1 Hp Wp 1
+        img_mask = paddle.zeros([1, Hp, Wp, 1], dtype='float32')  # 1 Hp Wp 1
         h_slices = (slice(0, -self.window_size),
                     slice(-self.window_size, -self.shift_size),
                     slice(-self.shift_size, None))
@@ -691,10 +690,10 @@ class SwinTransformer(nn.Layer):
         if self.frozen_stages >= 0:
             self.patch_embed.eval()
             for param in self.patch_embed.parameters():
-                param.requires_grad = False
+                param.stop_gradient = True
 
         if self.frozen_stages >= 1 and self.ape:
-            self.absolute_pos_embed.requires_grad = False
+            self.absolute_pos_embed.stop_gradient = True
 
         if self.frozen_stages >= 2:
             self.pos_drop.eval()
@@ -702,7 +701,7 @@ class SwinTransformer(nn.Layer):
                 m = self.layers[i]
                 m.eval()
                 for param in m.parameters():
-                    param.requires_grad = False
+                    param.stop_gradient = True
 
     def _init_weights(self, m):
         if isinstance(m, nn.Linear):

+ 134 - 18
paddlex/ppdet/modeling/bbox_utils.py

@@ -279,8 +279,8 @@ def decode_yolo(box, anchor, downsample_ratio):
     return [x1, y1, w1, h1]
 
 
-def iou_similarity(box1, box2, eps=1e-9):
-    """Calculate iou of box1 and box2
+def batch_iou_similarity(box1, box2, eps=1e-9):
+    """Calculate iou of box1 and box2 in batch
 
     Args:
         box1 (Tensor): box with the shape [N, M1, 4]
@@ -747,9 +747,9 @@ def distance2bbox(points, distance, max_shape=None):
 def bbox_center(boxes):
     """Get bbox centers from boxes.
     Args:
-        boxes (Tensor): boxes with shape (N, 4), "xmin, ymin, xmax, ymax" format.
+        boxes (Tensor): boxes with shape (..., 4), "xmin, ymin, xmax, ymax" format.
     Returns:
-        Tensor: boxes centers with shape (N, 2), "cx, cy" format.
+        Tensor: boxes centers with shape (..., 2), "cx, cy" format.
     """
     boxes_cx = (boxes[..., 0] + boxes[..., 2]) / 2
     boxes_cy = (boxes[..., 1] + boxes[..., 3]) / 2
@@ -759,20 +759,136 @@ def bbox_center(boxes):
 def batch_distance2bbox(points, distance, max_shapes=None):
     """Decode distance prediction to bounding box for batch.
     Args:
-        points (Tensor): [B, ..., 2]
-        distance (Tensor): [B, ..., 4]
-        max_shapes (tuple): [B, 2], "h,w" format, Shape of the image.
+        points (Tensor): [B, ..., 2], "xy" format
+        distance (Tensor): [B, ..., 4], "ltrb" format
+        max_shapes (Tensor): [B, 2], "h,w" format, Shape of the image.
     Returns:
-        Tensor: Decoded bboxes.
+        Tensor: Decoded bboxes, "x1y1x2y2" format.
     """
-    x1 = points[..., 0] - distance[..., 0]
-    y1 = points[..., 1] - distance[..., 1]
-    x2 = points[..., 0] + distance[..., 2]
-    y2 = points[..., 1] + distance[..., 3]
+    lt, rb = paddle.split(distance, 2, -1)
+    # while tensor add parameters, parameters should be better placed on the second place
+    x1y1 = -lt + points
+    x2y2 = rb + points
+    out_bbox = paddle.concat([x1y1, x2y2], -1)
     if max_shapes is not None:
-        for i, max_shape in enumerate(max_shapes):
-            x1[i] = x1[i].clip(min=0, max=max_shape[1])
-            y1[i] = y1[i].clip(min=0, max=max_shape[0])
-            x2[i] = x2[i].clip(min=0, max=max_shape[1])
-            y2[i] = y2[i].clip(min=0, max=max_shape[0])
-    return paddle.stack([x1, y1, x2, y2], -1)
+        max_shapes = max_shapes.flip(-1).tile([1, 2])
+        delta_dim = out_bbox.ndim - max_shapes.ndim
+        for _ in range(delta_dim):
+            max_shapes.unsqueeze_(1)
+        out_bbox = paddle.where(out_bbox < max_shapes, out_bbox, max_shapes)
+        out_bbox = paddle.where(out_bbox > 0, out_bbox,
+                                paddle.zeros_like(out_bbox))
+    return out_bbox
+
+
+def delta2bbox_v2(rois,
+                  deltas,
+                  means=(0.0, 0.0, 0.0, 0.0),
+                  stds=(1.0, 1.0, 1.0, 1.0),
+                  max_shape=None,
+                  wh_ratio_clip=16.0 / 1000.0,
+                  ctr_clip=None):
+    """Transform network output(delta) to bboxes.
+    Based on https://github.com/open-mmlab/mmdetection/blob/master/mmdet/core/
+             bbox/coder/delta_xywh_bbox_coder.py
+    Args:
+        rois (Tensor): shape [..., 4], base bboxes, typical examples include
+            anchor and rois
+        deltas (Tensor): shape [..., 4], offset relative to base bboxes
+        means (list[float]): the mean that was used to normalize deltas,
+            must be of size 4
+        stds (list[float]): the std that was used to normalize deltas,
+            must be of size 4
+        max_shape (list[float] or None): height and width of image, will be
+            used to clip bboxes if not None
+        wh_ratio_clip (float): to clip delta wh of decoded bboxes
+        ctr_clip (float or None): whether to clip delta xy of decoded bboxes
+    """
+    if rois.size == 0:
+        return paddle.empty_like(rois)
+    means = paddle.to_tensor(means)
+    stds = paddle.to_tensor(stds)
+    deltas = deltas * stds + means
+
+    dxy = deltas[..., :2]
+    dwh = deltas[..., 2:]
+
+    pxy = (rois[..., :2] + rois[..., 2:]) * 0.5
+    pwh = rois[..., 2:] - rois[..., :2]
+    dxy_wh = pwh * dxy
+
+    max_ratio = np.abs(np.log(wh_ratio_clip))
+    if ctr_clip is not None:
+        dxy_wh = paddle.clip(dxy_wh, max=ctr_clip, min=-ctr_clip)
+        dwh = paddle.clip(dwh, max=max_ratio)
+    else:
+        dwh = dwh.clip(min=-max_ratio, max=max_ratio)
+
+    gxy = pxy + dxy_wh
+    gwh = pwh * dwh.exp()
+    x1y1 = gxy - (gwh * 0.5)
+    x2y2 = gxy + (gwh * 0.5)
+    bboxes = paddle.concat([x1y1, x2y2], axis=-1)
+    if max_shape is not None:
+        bboxes[..., 0::2] = bboxes[..., 0::2].clip(min=0, max=max_shape[1])
+        bboxes[..., 1::2] = bboxes[..., 1::2].clip(min=0, max=max_shape[0])
+    return bboxes
+
+
+def bbox2delta_v2(src_boxes,
+                  tgt_boxes,
+                  means=(0.0, 0.0, 0.0, 0.0),
+                  stds=(1.0, 1.0, 1.0, 1.0)):
+    """Encode bboxes to deltas.
+    Modified from paddlex.ppdet.modeling.bbox_utils.bbox2delta.
+    Args:
+        src_boxes (Tensor[..., 4]): base bboxes
+        tgt_boxes (Tensor[..., 4]): target bboxes
+        means (list[float]): the mean that will be used to normalize delta
+        stds (list[float]): the std that will be used to normalize delta
+    """
+    if src_boxes.size == 0:
+        return paddle.empty_like(src_boxes)
+    src_w = src_boxes[..., 2] - src_boxes[..., 0]
+    src_h = src_boxes[..., 3] - src_boxes[..., 1]
+    src_ctr_x = src_boxes[..., 0] + 0.5 * src_w
+    src_ctr_y = src_boxes[..., 1] + 0.5 * src_h
+
+    tgt_w = tgt_boxes[..., 2] - tgt_boxes[..., 0]
+    tgt_h = tgt_boxes[..., 3] - tgt_boxes[..., 1]
+    tgt_ctr_x = tgt_boxes[..., 0] + 0.5 * tgt_w
+    tgt_ctr_y = tgt_boxes[..., 1] + 0.5 * tgt_h
+
+    dx = (tgt_ctr_x - src_ctr_x) / src_w
+    dy = (tgt_ctr_y - src_ctr_y) / src_h
+    dw = paddle.log(tgt_w / src_w)
+    dh = paddle.log(tgt_h / src_h)
+
+    deltas = paddle.stack((dx, dy, dw, dh), axis=1)  # [n, 4]
+    means = paddle.to_tensor(means, place=src_boxes.place)
+    stds = paddle.to_tensor(stds, place=src_boxes.place)
+    deltas = (deltas - means) / stds
+    return deltas
+
+
+def iou_similarity(box1, box2, eps=1e-10):
+    """Calculate iou of box1 and box2
+
+    Args:
+        box1 (Tensor): box with the shape [M1, 4]
+        box2 (Tensor): box with the shape [M2, 4]
+
+    Return:
+        iou (Tensor): iou between box1 and box2 with the shape [M1, M2]
+    """
+    box1 = box1.unsqueeze(1)  # [M1, 4] -> [M1, 1, 4]
+    box2 = box2.unsqueeze(0)  # [M2, 4] -> [1, M2, 4]
+    px1y1, px2y2 = box1[:, :, 0:2], box1[:, :, 2:4]
+    gx1y1, gx2y2 = box2[:, :, 0:2], box2[:, :, 2:4]
+    x1y1 = paddle.maximum(px1y1, gx1y1)
+    x2y2 = paddle.minimum(px2y2, gx2y2)
+    overlap = (x2y2 - x1y1).clip(0).prod(-1)
+    area1 = (px2y2 - px1y1).clip(0).prod(-1)
+    area2 = (gx2y2 - gx1y1).clip(0).prod(-1)
+    union = area1 + area2 - overlap + eps
+    return overlap / union

+ 40 - 0
paddlex/ppdet/modeling/cls_utils.py

@@ -0,0 +1,40 @@
+#   Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+def _get_class_default_kwargs(cls, *args, **kwargs):
+    """
+    Get default arguments of a class in dict format, if args and
+    kwargs is specified, it will replace default arguments
+    """
+    varnames = cls.__init__.__code__.co_varnames
+    argcount = cls.__init__.__code__.co_argcount
+    keys = varnames[:argcount]
+    assert keys[0] == 'self'
+    keys = keys[1:]
+
+    values = list(cls.__init__.__defaults__)
+    assert len(values) == len(keys)
+
+    if len(args) > 0:
+        for i, arg in enumerate(args):
+            values[i] = arg
+
+    default_kwargs = dict(zip(keys, values))
+
+    if len(kwargs) > 0:
+        for k, v in kwargs.items():
+            default_kwargs[k] = v
+
+    return default_kwargs

+ 4 - 0
paddlex/ppdet/modeling/heads/__init__.py

@@ -31,6 +31,8 @@ from . import pico_head
 from . import detr_head
 from . import sparsercnn_head
 from . import tood_head
+from . import retina_head
+from . import ppyoloe_head
 
 from .bbox_head import *
 from .mask_head import *
@@ -51,3 +53,5 @@ from .pico_head import *
 from .detr_head import *
 from .sparsercnn_head import *
 from .tood_head import *
+from .retina_head import *
+from .ppyoloe_head import *

+ 2 - 1
paddlex/ppdet/modeling/heads/bbox_head.py

@@ -24,6 +24,7 @@ from paddlex.ppdet.core.workspace import register, create
 from .roi_extractor import RoIAlign
 from ..shape_spec import ShapeSpec
 from ..bbox_utils import bbox2delta
+from ..cls_utils import _get_class_default_kwargs
 from paddlex.ppdet.modeling.layers import ConvNormLayer
 
 __all__ = ['TwoFCHead', 'XConvNormHead', 'BBoxHead']
@@ -178,7 +179,7 @@ class BBoxHead(nn.Layer):
     def __init__(self,
                  head,
                  in_channel,
-                 roi_extractor=RoIAlign().__dict__,
+                 roi_extractor=_get_class_default_kwargs(RoIAlign),
                  bbox_assigner='BboxAssigner',
                  with_pool=False,
                  num_classes=80,

+ 2 - 1
paddlex/ppdet/modeling/heads/cascade_head.py

@@ -22,6 +22,7 @@ from .bbox_head import BBoxHead, TwoFCHead, XConvNormHead
 from .roi_extractor import RoIAlign
 from ..shape_spec import ShapeSpec
 from ..bbox_utils import delta2bbox, clip_bbox, nonempty_bbox
+from ..cls_utils import _get_class_default_kwargs
 
 __all__ = ['CascadeTwoFCHead', 'CascadeXConvNormHead', 'CascadeHead']
 
@@ -153,7 +154,7 @@ class CascadeHead(BBoxHead):
     def __init__(self,
                  head,
                  in_channel,
-                 roi_extractor=RoIAlign().__dict__,
+                 roi_extractor=_get_class_default_kwargs(RoIAlign),
                  bbox_assigner='BboxAssigner',
                  num_classes=80,
                  bbox_weight=[[10., 10., 5., 5.], [20.0, 20.0, 10.0, 10.0],

+ 10 - 8
paddlex/ppdet/modeling/heads/face_head.py

@@ -17,6 +17,7 @@ import paddle.nn as nn
 
 from paddlex.ppdet.core.workspace import register
 from ..layers import AnchorGeneratorSSD
+from ..cls_utils import _get_class_default_kwargs
 
 
 @register
@@ -36,14 +37,15 @@ class FaceHead(nn.Layer):
     __shared__ = ['num_classes']
     __inject__ = ['anchor_generator', 'loss']
 
-    def __init__(self,
-                 num_classes=80,
-                 in_channels=[96, 96],
-                 anchor_generator=AnchorGeneratorSSD().__dict__,
-                 kernel_size=3,
-                 padding=1,
-                 conv_decay=0.,
-                 loss='SSDLoss'):
+    def __init__(
+            self,
+            num_classes=80,
+            in_channels=[96, 96],
+            anchor_generator=_get_class_default_kwargs(AnchorGeneratorSSD),
+            kernel_size=3,
+            padding=1,
+            conv_decay=0.,
+            loss='SSDLoss'):
         super(FaceHead, self).__init__()
         # add background class
         self.num_classes = num_classes + 1

+ 2 - 0
paddlex/ppdet/modeling/heads/fcos_head.py

@@ -64,6 +64,8 @@ class FCOSFeat(nn.Layer):
                  norm_type='bn',
                  use_dcn=False):
         super(FCOSFeat, self).__init__()
+        self.feat_in = feat_in
+        self.feat_out = feat_out
         self.num_convs = num_convs
         self.norm_type = norm_type
         self.cls_subnet_convs = []

+ 36 - 76
paddlex/ppdet/modeling/heads/gfl_head.py

@@ -29,7 +29,7 @@ from paddle.nn.initializer import Normal, Constant
 
 from paddlex.ppdet.core.workspace import register
 from paddlex.ppdet.modeling.layers import ConvNormLayer
-from paddlex.ppdet.modeling.bbox_utils import distance2bbox, bbox2distance
+from paddlex.ppdet.modeling.bbox_utils import distance2bbox, bbox2distance, batch_distance2bbox
 from paddlex.ppdet.data.transform.atss_assigner import bbox_overlaps
 
 
@@ -79,7 +79,9 @@ class Integral(nn.Layer):
                 offsets from the box center in four directions, shape (N, 4).
         """
         x = F.softmax(x.reshape([-1, self.reg_max + 1]), axis=1)
-        x = F.linear(x, self.project).reshape([-1, 4])
+        x = F.linear(x, self.project)
+        if self.training:
+            x = x.reshape([-1, 4])
         return x
 
 
@@ -242,18 +244,34 @@ class GFLHead(nn.Layer):
         ), "The size of fpn_feats is not equal to size of fpn_stride"
         cls_logits_list = []
         bboxes_reg_list = []
-        for scale_reg, fpn_feat in zip(self.scales_regs, fpn_feats):
+        for stride, scale_reg, fpn_feat in zip(self.fpn_stride,
+                                               self.scales_regs, fpn_feats):
             conv_cls_feat, conv_reg_feat = self.conv_feat(fpn_feat)
-            cls_logits = self.gfl_head_cls(conv_cls_feat)
-            bbox_reg = scale_reg(self.gfl_head_reg(conv_reg_feat))
+            cls_score = self.gfl_head_cls(conv_cls_feat)
+            bbox_pred = scale_reg(self.gfl_head_reg(conv_reg_feat))
             if self.dgqp_module:
-                quality_score = self.dgqp_module(bbox_reg)
-                cls_logits = F.sigmoid(cls_logits) * quality_score
+                quality_score = self.dgqp_module(bbox_pred)
+                cls_score = F.sigmoid(cls_score) * quality_score
             if not self.training:
-                cls_logits = F.sigmoid(cls_logits.transpose([0, 2, 3, 1]))
-                bbox_reg = bbox_reg.transpose([0, 2, 3, 1])
-            cls_logits_list.append(cls_logits)
-            bboxes_reg_list.append(bbox_reg)
+                cls_score = F.sigmoid(cls_score.transpose([0, 2, 3, 1]))
+                bbox_pred = bbox_pred.transpose([0, 2, 3, 1])
+                b, cell_h, cell_w, _ = paddle.shape(cls_score)
+                y, x = self.get_single_level_center_point(
+                    [cell_h, cell_w], stride, cell_offset=self.cell_offset)
+                center_points = paddle.stack([x, y], axis=-1)
+                cls_score = cls_score.reshape([b, -1, self.cls_out_channels])
+                bbox_pred = self.distribution_project(bbox_pred) * stride
+                bbox_pred = bbox_pred.reshape([b, cell_h * cell_w, 4])
+
+                # NOTE: If keep_ratio=False and image shape value that
+                # multiples of 32, distance2bbox not set max_shapes parameter
+                # to speed up model prediction. If need to set max_shapes,
+                # please use inputs['im_shape'].
+                bbox_pred = batch_distance2bbox(
+                    center_points, bbox_pred, max_shapes=None)
+
+            cls_logits_list.append(cls_score)
+            bboxes_reg_list.append(bbox_pred)
 
         return (cls_logits_list, bboxes_reg_list)
 
@@ -417,71 +435,13 @@ class GFLHead(nn.Layer):
         x = x.flatten()
         return y, x
 
-    def get_bboxes_single(self,
-                          cls_scores,
-                          bbox_preds,
-                          img_shape,
-                          scale_factor,
-                          rescale=True,
-                          cell_offset=0):
-        assert len(cls_scores) == len(bbox_preds)
-        mlvl_bboxes = []
-        mlvl_scores = []
-        for stride, cls_score, bbox_pred in zip(self.fpn_stride, cls_scores,
-                                                bbox_preds):
-            featmap_size = [
-                paddle.shape(cls_score)[0], paddle.shape(cls_score)[1]
-            ]
-            y, x = self.get_single_level_center_point(
-                featmap_size, stride, cell_offset=cell_offset)
-            center_points = paddle.stack([x, y], axis=-1)
-            scores = cls_score.reshape([-1, self.cls_out_channels])
-            bbox_pred = self.distribution_project(bbox_pred) * stride
-
-            if scores.shape[0] > self.nms_pre:
-                max_scores = scores.max(axis=1)
-                _, topk_inds = max_scores.topk(self.nms_pre)
-                center_points = center_points.gather(topk_inds)
-                bbox_pred = bbox_pred.gather(topk_inds)
-                scores = scores.gather(topk_inds)
-
-            bboxes = distance2bbox(
-                center_points, bbox_pred, max_shape=img_shape)
-            mlvl_bboxes.append(bboxes)
-            mlvl_scores.append(scores)
-        mlvl_bboxes = paddle.concat(mlvl_bboxes)
-        if rescale:
-            # [h_scale, w_scale] to [w_scale, h_scale, w_scale, h_scale]
-            im_scale = paddle.concat([scale_factor[::-1], scale_factor[::-1]])
-            mlvl_bboxes /= im_scale
-        mlvl_scores = paddle.concat(mlvl_scores)
-        mlvl_scores = mlvl_scores.transpose([1, 0])
-        return mlvl_bboxes, mlvl_scores
-
-    def decode(self, cls_scores, bbox_preds, im_shape, scale_factor,
-               cell_offset):
-        batch_bboxes = []
-        batch_scores = []
-        for img_id in range(cls_scores[0].shape[0]):
-            num_levels = len(cls_scores)
-            cls_score_list = [cls_scores[i][img_id] for i in range(num_levels)]
-            bbox_pred_list = [bbox_preds[i][img_id] for i in range(num_levels)]
-            bboxes, scores = self.get_bboxes_single(
-                cls_score_list,
-                bbox_pred_list,
-                im_shape[img_id],
-                scale_factor[img_id],
-                cell_offset=cell_offset)
-            batch_bboxes.append(bboxes)
-            batch_scores.append(scores)
-        batch_bboxes = paddle.stack(batch_bboxes, axis=0)
-        batch_scores = paddle.stack(batch_scores, axis=0)
-
-        return batch_bboxes, batch_scores
-
     def post_process(self, gfl_head_outs, im_shape, scale_factor):
         cls_scores, bboxes_reg = gfl_head_outs
-        bboxes, score = self.decode(cls_scores, bboxes_reg, im_shape,
-                                    scale_factor, self.cell_offset)
-        bbox_pred, bbox_num, _ = self.nms(bboxes, score)
+        bboxes = paddle.concat(bboxes_reg, axis=1)
+        # rescale: [h_scale, w_scale] -> [w_scale, h_scale, w_scale, h_scale]
+        im_scale = scale_factor.flip([1]).tile([1, 2]).unsqueeze(1)
+        bboxes /= im_scale
+        mlvl_scores = paddle.concat(cls_scores, axis=1)
+        mlvl_scores = mlvl_scores.transpose([0, 2, 1])
+        bbox_pred, bbox_num, _ = self.nms(bboxes, mlvl_scores)
         return bbox_pred, bbox_num

+ 17 - 16
paddlex/ppdet/modeling/heads/mask_head.py

@@ -20,6 +20,7 @@ from paddle.nn.initializer import KaimingNormal
 from paddlex.ppdet.core.workspace import register, create
 from paddlex.ppdet.modeling.layers import ConvNormLayer
 from .roi_extractor import RoIAlign
+from ..cls_utils import _get_class_default_kwargs
 
 
 @register
@@ -103,7 +104,7 @@ class MaskFeat(nn.Layer):
 
 @register
 class MaskHead(nn.Layer):
-    __shared__ = ['num_classes']
+    __shared__ = ['num_classes', 'export_onnx']
     __inject__ = ['mask_assigner']
     """
     RCNN mask head
@@ -120,12 +121,14 @@ class MaskHead(nn.Layer):
 
     def __init__(self,
                  head,
-                 roi_extractor=RoIAlign().__dict__,
+                 roi_extractor=_get_class_default_kwargs(RoIAlign),
                  mask_assigner='MaskAssigner',
                  num_classes=80,
-                 share_bbox_feat=False):
+                 share_bbox_feat=False,
+                 export_onnx=False):
         super(MaskHead, self).__init__()
         self.num_classes = num_classes
+        self.export_onnx = export_onnx
 
         self.roi_extractor = roi_extractor
         if isinstance(roi_extractor, dict):
@@ -206,8 +209,8 @@ class MaskHead(nn.Layer):
         rois_num (Tensor): The number of prediction for each batch
         scale_factor (Tensor): The scale factor from origin size to input size
         """
-        if rois.shape[0] == 0:
-            mask_out = paddle.full([1, 1, 1, 1], -1)
+        if not self.export_onnx and rois.shape[0] == 0:
+            mask_out = paddle.full([1, 1, 1], -1)
         else:
             bbox = [rois[:, 2:]]
             labels = rois[:, 0].cast('int32')
@@ -218,19 +221,17 @@ class MaskHead(nn.Layer):
 
             mask_feat = self.head(rois_feat)
             mask_logit = self.mask_fcn_logits(mask_feat)
-            mask_num_class = mask_logit.shape[1]
-            if mask_num_class == 1:
+            if self.num_classes == 1:
                 mask_out = F.sigmoid(mask_logit)
             else:
-                num_masks = mask_logit.shape[0]
-                mask_out = []
-                # TODO: need to optimize gather
-                for i in range(mask_logit.shape[0]):
-                    pred_masks = paddle.unsqueeze(
-                        mask_logit[i, :, :, :], axis=0)
-                    mask = paddle.gather(pred_masks, labels[i], axis=1)
-                    mask_out.append(mask)
-                mask_out = F.sigmoid(paddle.concat(mask_out))
+                num_masks = paddle.shape(mask_logit)[0]
+                index = paddle.arange(num_masks).cast('int32')
+                mask_out = mask_logit[index, labels]
+                mask_out_shape = paddle.shape(mask_out)
+                mask_out = paddle.reshape(mask_out, [
+                    paddle.shape(index), mask_out_shape[-2], mask_out_shape[-1]
+                ])
+                mask_out = F.sigmoid(mask_out)
         return mask_out
 
     def forward(self,

+ 517 - 11
paddlex/ppdet/modeling/heads/pico_head.py

@@ -24,9 +24,36 @@ import paddle.nn.functional as F
 from paddle import ParamAttr
 from paddle.nn.initializer import Normal, Constant
 
+from paddlex.ppdet.modeling.ops import get_static_shape
+from ..initializer import normal_
+from ..assigners.utils import generate_anchors_for_grid_cell
+from ..bbox_utils import bbox_center, batch_distance2bbox, bbox2distance
 from paddlex.ppdet.core.workspace import register
 from paddlex.ppdet.modeling.layers import ConvNormLayer
 from .simota_head import OTAVFLHead
+from .gfl_head import Integral, GFLHead
+from paddlex.ppdet.modeling.necks.csp_pan import DPModule
+
+eps = 1e-9
+
+__all__ = ['PicoHead', 'PicoHeadV2', 'PicoFeat']
+
+
+class PicoSE(nn.Layer):
+    def __init__(self, feat_channels):
+        super(PicoSE, self).__init__()
+        self.fc = nn.Conv2D(feat_channels, feat_channels, 1)
+        self.conv = ConvNormLayer(feat_channels, feat_channels, 1, 1)
+
+        self._init_weights()
+
+    def _init_weights(self):
+        normal_(self.fc.weight, std=0.001)
+
+    def forward(self, feat, avg_feat):
+        weight = F.sigmoid(self.fc(avg_feat))
+        out = self.conv(feat * weight)
+        return out
 
 
 @register
@@ -39,6 +66,9 @@ class PicoFeat(nn.Layer):
         feat_out (int): The channel number of output Tensor.
         num_convs (int): The convolution number of the LiteGFLFeat.
         norm_type (str): Normalization type, 'bn'/'sync_bn'/'gn'.
+        share_cls_reg (bool): Whether to share the cls and reg output.
+        act (str): The act of per layers.
+        use_se (bool): Whether to use se module.
     """
 
     def __init__(self,
@@ -48,14 +78,20 @@ class PicoFeat(nn.Layer):
                  num_convs=2,
                  norm_type='bn',
                  share_cls_reg=False,
-                 act='hard_swish'):
+                 act='hard_swish',
+                 use_se=False):
         super(PicoFeat, self).__init__()
         self.num_convs = num_convs
         self.norm_type = norm_type
         self.share_cls_reg = share_cls_reg
         self.act = act
+        self.use_se = use_se
         self.cls_convs = []
         self.reg_convs = []
+        if use_se:
+            assert share_cls_reg == True, \
+                'In the case of using se, share_cls_reg must be set to True'
+            self.se = nn.LayerList()
         for stage_idx in range(num_fpn_stride):
             cls_subnet_convs = []
             reg_subnet_convs = []
@@ -111,6 +147,8 @@ class PicoFeat(nn.Layer):
                     reg_subnet_convs.append(reg_conv_pw)
             self.cls_convs.append(cls_subnet_convs)
             self.reg_convs.append(reg_subnet_convs)
+            if use_se:
+                self.se.append(PicoSE(feat_out))
 
     def act_func(self, x):
         if self.act == "leaky_relu":
@@ -125,9 +163,14 @@ class PicoFeat(nn.Layer):
         reg_feat = fpn_feat
         for i in range(len(self.cls_convs[stage_idx])):
             cls_feat = self.act_func(self.cls_convs[stage_idx][i](cls_feat))
+            reg_feat = cls_feat
             if not self.share_cls_reg:
                 reg_feat = self.act_func(self.reg_convs[stage_idx][i](
                     reg_feat))
+        if self.use_se:
+            avg_feat = F.adaptive_avg_pool2d(cls_feat, (1, 1))
+            se_feat = self.act_func(self.se[stage_idx](cls_feat, avg_feat))
+            return cls_feat, se_feat
         return cls_feat, reg_feat
 
 
@@ -151,7 +194,7 @@ class PicoHead(OTAVFLHead):
         'conv_feat', 'dgqp_module', 'loss_class', 'loss_dfl', 'loss_bbox',
         'assigner', 'nms'
     ]
-    __shared__ = ['num_classes']
+    __shared__ = ['num_classes', 'eval_size']
 
     def __init__(self,
                  conv_feat='PicoFeat',
@@ -167,7 +210,8 @@ class PicoHead(OTAVFLHead):
                  feat_in_chan=96,
                  nms=None,
                  nms_pre=1000,
-                 cell_offset=0):
+                 cell_offset=0,
+                 eval_size=None):
         super(PicoHead, self).__init__(
             conv_feat=conv_feat,
             dgqp_module=dgqp_module,
@@ -196,6 +240,7 @@ class PicoHead(OTAVFLHead):
         self.nms = nms
         self.nms_pre = nms_pre
         self.cell_offset = cell_offset
+        self.eval_size = eval_size
 
         self.use_sigmoid = self.loss_vfl.use_sigmoid
         if self.use_sigmoid:
@@ -239,12 +284,50 @@ class PicoHead(OTAVFLHead):
                         bias_attr=ParamAttr(initializer=Constant(value=0))))
                 self.head_reg_list.append(head_reg)
 
-    def forward(self, fpn_feats, deploy=False):
+        # initialize the anchor points
+        if self.eval_size:
+            self.anchor_points, self.stride_tensor = self._generate_anchors()
+
+    def forward(self, fpn_feats, export_post_process=True):
         assert len(fpn_feats) == len(
             self.fpn_stride
         ), "The size of fpn_feats is not equal to size of fpn_stride"
-        cls_logits_list = []
-        bboxes_reg_list = []
+
+        if self.training:
+            return self.forward_train(fpn_feats)
+        else:
+            return self.forward_eval(
+                fpn_feats, export_post_process=export_post_process)
+
+    def forward_train(self, fpn_feats):
+        cls_logits_list, bboxes_reg_list = [], []
+        for i, fpn_feat in enumerate(fpn_feats):
+            conv_cls_feat, conv_reg_feat = self.conv_feat(fpn_feat, i)
+            if self.conv_feat.share_cls_reg:
+                cls_logits = self.head_cls_list[i](conv_cls_feat)
+                cls_score, bbox_pred = paddle.split(
+                    cls_logits,
+                    [self.cls_out_channels, 4 * (self.reg_max + 1)],
+                    axis=1)
+            else:
+                cls_score = self.head_cls_list[i](conv_cls_feat)
+                bbox_pred = self.head_reg_list[i](conv_reg_feat)
+
+            if self.dgqp_module:
+                quality_score = self.dgqp_module(bbox_pred)
+                cls_score = F.sigmoid(cls_score) * quality_score
+
+            cls_logits_list.append(cls_score)
+            bboxes_reg_list.append(bbox_pred)
+
+        return (cls_logits_list, bboxes_reg_list)
+
+    def forward_eval(self, fpn_feats, export_post_process=True):
+        if self.eval_size:
+            anchor_points, stride_tensor = self.anchor_points, self.stride_tensor
+        else:
+            anchor_points, stride_tensor = self._generate_anchors(fpn_feats)
+        cls_logits_list, bboxes_reg_list = [], []
         for i, fpn_feat in enumerate(fpn_feats):
             conv_cls_feat, conv_reg_feat = self.conv_feat(fpn_feat, i)
             if self.conv_feat.share_cls_reg:
@@ -261,18 +344,441 @@ class PicoHead(OTAVFLHead):
                 quality_score = self.dgqp_module(bbox_pred)
                 cls_score = F.sigmoid(cls_score) * quality_score
 
-            if deploy:
+            if not export_post_process:
                 # Now only supports batch size = 1 in deploy
                 # TODO(ygh): support batch size > 1
-                cls_score = F.sigmoid(cls_score).reshape(
+                cls_score_out = F.sigmoid(cls_score).reshape(
                     [1, self.cls_out_channels, -1]).transpose([0, 2, 1])
                 bbox_pred = bbox_pred.reshape([1, (self.reg_max + 1) * 4,
                                                -1]).transpose([0, 2, 1])
-            elif not self.training:
-                cls_score = F.sigmoid(cls_score.transpose([0, 2, 3, 1]))
+            else:
+                b, _, h, w = fpn_feat.shape
+                l = h * w
+                cls_score_out = F.sigmoid(
+                    cls_score.reshape([b, self.cls_out_channels, l]))
                 bbox_pred = bbox_pred.transpose([0, 2, 3, 1])
+                bbox_pred = self.distribution_project(bbox_pred)
+                bbox_pred = bbox_pred.reshape([b, l, 4])
 
-            cls_logits_list.append(cls_score)
+            cls_logits_list.append(cls_score_out)
             bboxes_reg_list.append(bbox_pred)
 
+        if export_post_process:
+            cls_logits_list = paddle.concat(cls_logits_list, axis=-1)
+            bboxes_reg_list = paddle.concat(bboxes_reg_list, axis=1)
+            bboxes_reg_list = batch_distance2bbox(anchor_points,
+                                                  bboxes_reg_list)
+            bboxes_reg_list *= stride_tensor
+
         return (cls_logits_list, bboxes_reg_list)
+
+    def _generate_anchors(self, feats=None):
+        # just use in eval time
+        anchor_points = []
+        stride_tensor = []
+        for i, stride in enumerate(self.fpn_stride):
+            if feats is not None:
+                _, _, h, w = feats[i].shape
+            else:
+                h = math.ceil(self.eval_size[0] / stride)
+                w = math.ceil(self.eval_size[1] / stride)
+            shift_x = paddle.arange(end=w) + self.cell_offset
+            shift_y = paddle.arange(end=h) + self.cell_offset
+            shift_y, shift_x = paddle.meshgrid(shift_y, shift_x)
+            anchor_point = paddle.cast(
+                paddle.stack(
+                    [shift_x, shift_y], axis=-1), dtype='float32')
+            anchor_points.append(anchor_point.reshape([-1, 2]))
+            stride_tensor.append(
+                paddle.full(
+                    [h * w, 1], stride, dtype='float32'))
+        anchor_points = paddle.concat(anchor_points)
+        stride_tensor = paddle.concat(stride_tensor)
+        return anchor_points, stride_tensor
+
+    def post_process(self, head_outs, scale_factor, export_nms=True):
+        pred_scores, pred_bboxes = head_outs
+        if not export_nms:
+            return pred_bboxes, pred_scores
+        else:
+            # rescale: [h_scale, w_scale] -> [w_scale, h_scale, w_scale, h_scale]
+            scale_y, scale_x = paddle.split(scale_factor, 2, axis=-1)
+            scale_factor = paddle.concat(
+                [scale_x, scale_y, scale_x, scale_y],
+                axis=-1).reshape([-1, 1, 4])
+            # scale bbox to origin image size.
+            pred_bboxes /= scale_factor
+            bbox_pred, bbox_num, _ = self.nms(pred_bboxes, pred_scores)
+            return bbox_pred, bbox_num
+
+
+@register
+class PicoHeadV2(GFLHead):
+    """
+    PicoHeadV2
+    Args:
+        conv_feat (object): Instance of 'PicoFeat'
+        num_classes (int): Number of classes
+        fpn_stride (list): The stride of each FPN Layer
+        prior_prob (float): Used to set the bias init for the class prediction layer
+        loss_class (object): Instance of VariFocalLoss.
+        loss_dfl (object): Instance of DistributionFocalLoss.
+        loss_bbox (object): Instance of bbox loss.
+        assigner (object): Instance of label assigner.
+        reg_max: Max value of integral set :math: `{0, ..., reg_max}`
+                n QFL setting. Default: 7.
+    """
+    __inject__ = [
+        'conv_feat', 'dgqp_module', 'loss_class', 'loss_dfl', 'loss_bbox',
+        'static_assigner', 'assigner', 'nms'
+    ]
+    __shared__ = ['num_classes', 'eval_size']
+
+    def __init__(self,
+                 conv_feat='PicoFeatV2',
+                 dgqp_module=None,
+                 num_classes=80,
+                 fpn_stride=[8, 16, 32],
+                 prior_prob=0.01,
+                 use_align_head=True,
+                 loss_class='VariFocalLoss',
+                 loss_dfl='DistributionFocalLoss',
+                 loss_bbox='GIoULoss',
+                 static_assigner_epoch=60,
+                 static_assigner='ATSSAssigner',
+                 assigner='TaskAlignedAssigner',
+                 reg_max=16,
+                 feat_in_chan=96,
+                 nms=None,
+                 nms_pre=1000,
+                 cell_offset=0,
+                 act='hard_swish',
+                 grid_cell_scale=5.0,
+                 eval_size=None):
+        super(PicoHeadV2, self).__init__(
+            conv_feat=conv_feat,
+            dgqp_module=dgqp_module,
+            num_classes=num_classes,
+            fpn_stride=fpn_stride,
+            prior_prob=prior_prob,
+            loss_class=loss_class,
+            loss_dfl=loss_dfl,
+            loss_bbox=loss_bbox,
+            reg_max=reg_max,
+            feat_in_chan=feat_in_chan,
+            nms=nms,
+            nms_pre=nms_pre,
+            cell_offset=cell_offset, )
+        self.conv_feat = conv_feat
+        self.num_classes = num_classes
+        self.fpn_stride = fpn_stride
+        self.prior_prob = prior_prob
+        self.loss_vfl = loss_class
+        self.loss_dfl = loss_dfl
+        self.loss_bbox = loss_bbox
+
+        self.static_assigner_epoch = static_assigner_epoch
+        self.static_assigner = static_assigner
+        self.assigner = assigner
+
+        self.reg_max = reg_max
+        self.feat_in_chan = feat_in_chan
+        self.nms = nms
+        self.nms_pre = nms_pre
+        self.cell_offset = cell_offset
+        self.act = act
+        self.grid_cell_scale = grid_cell_scale
+        self.use_align_head = use_align_head
+        self.cls_out_channels = self.num_classes
+        self.eval_size = eval_size
+
+        bias_init_value = -math.log((1 - self.prior_prob) / self.prior_prob)
+        # Clear the super class initialization
+        self.gfl_head_cls = None
+        self.gfl_head_reg = None
+        self.scales_regs = None
+
+        self.head_cls_list = []
+        self.head_reg_list = []
+        self.cls_align = nn.LayerList()
+
+        for i in range(len(fpn_stride)):
+            head_cls = self.add_sublayer(
+                "head_cls" + str(i),
+                nn.Conv2D(
+                    in_channels=self.feat_in_chan,
+                    out_channels=self.cls_out_channels,
+                    kernel_size=1,
+                    stride=1,
+                    padding=0,
+                    weight_attr=ParamAttr(initializer=Normal(
+                        mean=0., std=0.01)),
+                    bias_attr=ParamAttr(
+                        initializer=Constant(value=bias_init_value))))
+            self.head_cls_list.append(head_cls)
+            head_reg = self.add_sublayer(
+                "head_reg" + str(i),
+                nn.Conv2D(
+                    in_channels=self.feat_in_chan,
+                    out_channels=4 * (self.reg_max + 1),
+                    kernel_size=1,
+                    stride=1,
+                    padding=0,
+                    weight_attr=ParamAttr(initializer=Normal(
+                        mean=0., std=0.01)),
+                    bias_attr=ParamAttr(initializer=Constant(value=0))))
+            self.head_reg_list.append(head_reg)
+            if self.use_align_head:
+                self.cls_align.append(
+                    DPModule(
+                        self.feat_in_chan,
+                        1,
+                        5,
+                        act=self.act,
+                        use_act_in_out=False))
+
+        # initialize the anchor points
+        if self.eval_size:
+            self.anchor_points, self.stride_tensor = self._generate_anchors()
+
+    def forward(self, fpn_feats, export_post_process=True):
+        assert len(fpn_feats) == len(
+            self.fpn_stride
+        ), "The size of fpn_feats is not equal to size of fpn_stride"
+
+        if self.training:
+            return self.forward_train(fpn_feats)
+        else:
+            return self.forward_eval(
+                fpn_feats, export_post_process=export_post_process)
+
+    def forward_train(self, fpn_feats):
+        cls_score_list, reg_list, box_list = [], [], []
+        for i, (fpn_feat,
+                stride) in enumerate(zip(fpn_feats, self.fpn_stride)):
+            b, _, h, w = get_static_shape(fpn_feat)
+            # task decomposition
+            conv_cls_feat, se_feat = self.conv_feat(fpn_feat, i)
+            cls_logit = self.head_cls_list[i](se_feat)
+            reg_pred = self.head_reg_list[i](se_feat)
+
+            # cls prediction and alignment
+            if self.use_align_head:
+                cls_prob = F.sigmoid(self.cls_align[i](conv_cls_feat))
+                cls_score = (F.sigmoid(cls_logit) * cls_prob + eps).sqrt()
+            else:
+                cls_score = F.sigmoid(cls_logit)
+
+            cls_score_out = cls_score.transpose([0, 2, 3, 1])
+            bbox_pred = reg_pred.transpose([0, 2, 3, 1])
+            b, cell_h, cell_w, _ = paddle.shape(cls_score_out)
+            y, x = self.get_single_level_center_point(
+                [cell_h, cell_w], stride, cell_offset=self.cell_offset)
+            center_points = paddle.stack([x, y], axis=-1)
+            cls_score_out = cls_score_out.reshape(
+                [b, -1, self.cls_out_channels])
+            bbox_pred = self.distribution_project(bbox_pred) * stride
+            bbox_pred = bbox_pred.reshape([b, cell_h * cell_w, 4])
+            bbox_pred = batch_distance2bbox(
+                center_points, bbox_pred, max_shapes=None)
+            cls_score_list.append(cls_score.flatten(2).transpose([0, 2, 1]))
+            reg_list.append(reg_pred.flatten(2).transpose([0, 2, 1]))
+            box_list.append(bbox_pred / stride)
+
+        cls_score_list = paddle.concat(cls_score_list, axis=1)
+        box_list = paddle.concat(box_list, axis=1)
+        reg_list = paddle.concat(reg_list, axis=1)
+        return cls_score_list, reg_list, box_list, fpn_feats
+
+    def forward_eval(self, fpn_feats, export_post_process=True):
+        if self.eval_size:
+            anchor_points, stride_tensor = self.anchor_points, self.stride_tensor
+        else:
+            anchor_points, stride_tensor = self._generate_anchors(fpn_feats)
+        cls_score_list, box_list = [], []
+        for i, (fpn_feat,
+                stride) in enumerate(zip(fpn_feats, self.fpn_stride)):
+            b, _, h, w = fpn_feat.shape
+            # task decomposition
+            conv_cls_feat, se_feat = self.conv_feat(fpn_feat, i)
+            cls_logit = self.head_cls_list[i](se_feat)
+            reg_pred = self.head_reg_list[i](se_feat)
+
+            # cls prediction and alignment
+            if self.use_align_head:
+                cls_prob = F.sigmoid(self.cls_align[i](conv_cls_feat))
+                cls_score = (F.sigmoid(cls_logit) * cls_prob + eps).sqrt()
+            else:
+                cls_score = F.sigmoid(cls_logit)
+
+            if not export_post_process:
+                # Now only supports batch size = 1 in deploy
+                cls_score_list.append(
+                    cls_score.reshape([1, self.cls_out_channels, -1])
+                    .transpose([0, 2, 1]))
+                box_list.append(
+                    reg_pred.reshape([1, (self.reg_max + 1) * 4, -1])
+                    .transpose([0, 2, 1]))
+            else:
+                l = h * w
+                cls_score_out = cls_score.reshape(
+                    [b, self.cls_out_channels, l])
+                bbox_pred = reg_pred.transpose([0, 2, 3, 1])
+                bbox_pred = self.distribution_project(bbox_pred)
+                bbox_pred = bbox_pred.reshape([b, l, 4])
+                cls_score_list.append(cls_score_out)
+                box_list.append(bbox_pred)
+
+        if export_post_process:
+            cls_score_list = paddle.concat(cls_score_list, axis=-1)
+            box_list = paddle.concat(box_list, axis=1)
+            box_list = batch_distance2bbox(anchor_points, box_list)
+            box_list *= stride_tensor
+
+        return cls_score_list, box_list
+
+    def get_loss(self, head_outs, gt_meta):
+        pred_scores, pred_regs, pred_bboxes, fpn_feats = head_outs
+        gt_labels = gt_meta['gt_class']
+        gt_bboxes = gt_meta['gt_bbox']
+        gt_scores = gt_meta['gt_score'] if 'gt_score' in gt_meta else None
+        num_imgs = gt_meta['im_id'].shape[0]
+        pad_gt_mask = gt_meta['pad_gt_mask']
+
+        anchors, _, num_anchors_list, stride_tensor_list = generate_anchors_for_grid_cell(
+            fpn_feats, self.fpn_stride, self.grid_cell_scale, self.cell_offset)
+
+        centers = bbox_center(anchors)
+
+        # label assignment
+        if gt_meta['epoch_id'] < self.static_assigner_epoch:
+            assigned_labels, assigned_bboxes, assigned_scores = self.static_assigner(
+                anchors,
+                num_anchors_list,
+                gt_labels,
+                gt_bboxes,
+                pad_gt_mask,
+                bg_index=self.num_classes,
+                gt_scores=gt_scores,
+                pred_bboxes=pred_bboxes.detach() * stride_tensor_list)
+
+        else:
+            assigned_labels, assigned_bboxes, assigned_scores = self.assigner(
+                pred_scores.detach(),
+                pred_bboxes.detach() * stride_tensor_list,
+                centers,
+                num_anchors_list,
+                gt_labels,
+                gt_bboxes,
+                pad_gt_mask,
+                bg_index=self.num_classes,
+                gt_scores=gt_scores)
+
+        assigned_bboxes /= stride_tensor_list
+
+        centers_shape = centers.shape
+        flatten_centers = centers.expand(
+            [num_imgs, centers_shape[0], centers_shape[1]]).reshape([-1, 2])
+        flatten_strides = stride_tensor_list.expand(
+            [num_imgs, centers_shape[0], 1]).reshape([-1, 1])
+        flatten_cls_preds = pred_scores.reshape([-1, self.num_classes])
+        flatten_regs = pred_regs.reshape([-1, 4 * (self.reg_max + 1)])
+        flatten_bboxes = pred_bboxes.reshape([-1, 4])
+        flatten_bbox_targets = assigned_bboxes.reshape([-1, 4])
+        flatten_labels = assigned_labels.reshape([-1])
+        flatten_assigned_scores = assigned_scores.reshape(
+            [-1, self.num_classes])
+
+        pos_inds = paddle.nonzero(
+            paddle.logical_and((flatten_labels >= 0),
+                               (flatten_labels < self.num_classes)),
+            as_tuple=False).squeeze(1)
+
+        num_total_pos = len(pos_inds)
+
+        if num_total_pos > 0:
+            pos_bbox_targets = paddle.gather(
+                flatten_bbox_targets, pos_inds, axis=0)
+            pos_decode_bbox_pred = paddle.gather(
+                flatten_bboxes, pos_inds, axis=0)
+            pos_reg = paddle.gather(flatten_regs, pos_inds, axis=0)
+            pos_strides = paddle.gather(flatten_strides, pos_inds, axis=0)
+            pos_centers = paddle.gather(
+                flatten_centers, pos_inds, axis=0) / pos_strides
+
+            weight_targets = flatten_assigned_scores.detach()
+            weight_targets = paddle.gather(
+                weight_targets.max(axis=1, keepdim=True), pos_inds, axis=0)
+
+            pred_corners = pos_reg.reshape([-1, self.reg_max + 1])
+            target_corners = bbox2distance(pos_centers, pos_bbox_targets,
+                                           self.reg_max).reshape([-1])
+            # regression loss
+            loss_bbox = paddle.sum(
+                self.loss_bbox(pos_decode_bbox_pred,
+                               pos_bbox_targets) * weight_targets)
+
+            # dfl loss
+            loss_dfl = self.loss_dfl(
+                pred_corners,
+                target_corners,
+                weight=weight_targets.expand([-1, 4]).reshape([-1]),
+                avg_factor=4.0)
+        else:
+            loss_bbox = paddle.zeros([1])
+            loss_dfl = paddle.zeros([1])
+
+        avg_factor = flatten_assigned_scores.sum()
+        if paddle.distributed.get_world_size() > 1:
+            paddle.distributed.all_reduce(avg_factor)
+            avg_factor = paddle.clip(
+                avg_factor / paddle.distributed.get_world_size(), min=1)
+        loss_vfl = self.loss_vfl(
+            flatten_cls_preds, flatten_assigned_scores, avg_factor=avg_factor)
+
+        loss_bbox = loss_bbox / avg_factor
+        loss_dfl = loss_dfl / avg_factor
+
+        loss_states = dict(
+            loss_vfl=loss_vfl, loss_bbox=loss_bbox, loss_dfl=loss_dfl)
+
+        return loss_states
+
+    def _generate_anchors(self, feats=None):
+        # just use in eval time
+        anchor_points = []
+        stride_tensor = []
+        for i, stride in enumerate(self.fpn_stride):
+            if feats is not None:
+                _, _, h, w = feats[i].shape
+            else:
+                h = math.ceil(self.eval_size[0] / stride)
+                w = math.ceil(self.eval_size[1] / stride)
+            shift_x = paddle.arange(end=w) + self.cell_offset
+            shift_y = paddle.arange(end=h) + self.cell_offset
+            shift_y, shift_x = paddle.meshgrid(shift_y, shift_x)
+            anchor_point = paddle.cast(
+                paddle.stack(
+                    [shift_x, shift_y], axis=-1), dtype='float32')
+            anchor_points.append(anchor_point.reshape([-1, 2]))
+            stride_tensor.append(
+                paddle.full(
+                    [h * w, 1], stride, dtype='float32'))
+        anchor_points = paddle.concat(anchor_points)
+        stride_tensor = paddle.concat(stride_tensor)
+        return anchor_points, stride_tensor
+
+    def post_process(self, head_outs, scale_factor, export_nms=True):
+        pred_scores, pred_bboxes = head_outs
+        if not export_nms:
+            return pred_bboxes, pred_scores
+        else:
+            # rescale: [h_scale, w_scale] -> [w_scale, h_scale, w_scale, h_scale]
+            scale_y, scale_x = paddle.split(scale_factor, 2, axis=-1)
+            scale_factor = paddle.concat(
+                [scale_x, scale_y, scale_x, scale_y],
+                axis=-1).reshape([-1, 1, 4])
+            # scale bbox to origin image size.
+            pred_bboxes /= scale_factor
+            bbox_pred, bbox_num, _ = self.nms(pred_bboxes, pred_scores)
+            return bbox_pred, bbox_num

+ 385 - 0
paddlex/ppdet/modeling/heads/ppyoloe_head.py

@@ -0,0 +1,385 @@
+# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import paddle
+import paddle.nn as nn
+import paddle.nn.functional as F
+from paddlex.ppdet.core.workspace import register
+
+from ..bbox_utils import batch_distance2bbox
+from ..losses import GIoULoss
+from ..initializer import bias_init_with_prob, constant_, normal_
+from ..assigners.utils import generate_anchors_for_grid_cell
+from paddlex.ppdet.modeling.backbones.cspresnet import ConvBNLayer
+from paddlex.ppdet.modeling.ops import get_static_shape, get_act_fn
+from paddlex.ppdet.modeling.layers import MultiClassNMS
+
+__all__ = ['PPYOLOEHead']
+
+
+class ESEAttn(nn.Layer):
+    def __init__(self, feat_channels, act='swish'):
+        super(ESEAttn, self).__init__()
+        self.fc = nn.Conv2D(feat_channels, feat_channels, 1)
+        self.conv = ConvBNLayer(feat_channels, feat_channels, 1, act=act)
+
+        self._init_weights()
+
+    def _init_weights(self):
+        normal_(self.fc.weight, std=0.001)
+
+    def forward(self, feat, avg_feat):
+        weight = F.sigmoid(self.fc(avg_feat))
+        return self.conv(feat * weight)
+
+
+@register
+class PPYOLOEHead(nn.Layer):
+    __shared__ = ['num_classes', 'eval_size', 'trt', 'exclude_nms']
+    __inject__ = ['static_assigner', 'assigner', 'nms']
+
+    def __init__(self,
+                 in_channels=[1024, 512, 256],
+                 num_classes=80,
+                 act='swish',
+                 fpn_strides=(32, 16, 8),
+                 grid_cell_scale=5.0,
+                 grid_cell_offset=0.5,
+                 reg_max=16,
+                 static_assigner_epoch=4,
+                 use_varifocal_loss=True,
+                 static_assigner='ATSSAssigner',
+                 assigner='TaskAlignedAssigner',
+                 nms='MultiClassNMS',
+                 eval_size=None,
+                 loss_weight={
+                     'class': 1.0,
+                     'iou': 2.5,
+                     'dfl': 0.5,
+                 },
+                 trt=False,
+                 exclude_nms=False):
+        super(PPYOLOEHead, self).__init__()
+        assert len(in_channels) > 0, "len(in_channels) should > 0"
+        self.in_channels = in_channels
+        self.num_classes = num_classes
+        self.fpn_strides = fpn_strides
+        self.grid_cell_scale = grid_cell_scale
+        self.grid_cell_offset = grid_cell_offset
+        self.reg_max = reg_max
+        self.iou_loss = GIoULoss()
+        self.loss_weight = loss_weight
+        self.use_varifocal_loss = use_varifocal_loss
+        self.eval_size = eval_size
+
+        self.static_assigner_epoch = static_assigner_epoch
+        self.static_assigner = static_assigner
+        self.assigner = assigner
+        self.nms = nms
+        if isinstance(self.nms, MultiClassNMS) and trt:
+            self.nms.trt = trt
+        self.exclude_nms = exclude_nms
+        # stem
+        self.stem_cls = nn.LayerList()
+        self.stem_reg = nn.LayerList()
+        act = get_act_fn(
+            act, trt=trt) if act is None or isinstance(act,
+                                                       (str, dict)) else act
+        for in_c in self.in_channels:
+            self.stem_cls.append(ESEAttn(in_c, act=act))
+            self.stem_reg.append(ESEAttn(in_c, act=act))
+        # pred head
+        self.pred_cls = nn.LayerList()
+        self.pred_reg = nn.LayerList()
+        for in_c in self.in_channels:
+            self.pred_cls.append(
+                nn.Conv2D(
+                    in_c, self.num_classes, 3, padding=1))
+            self.pred_reg.append(
+                nn.Conv2D(
+                    in_c, 4 * (self.reg_max + 1), 3, padding=1))
+        # projection conv
+        self.proj_conv = nn.Conv2D(self.reg_max + 1, 1, 1, bias_attr=False)
+        self.proj_conv.skip_quant = True
+        self._init_weights()
+
+    @classmethod
+    def from_config(cls, cfg, input_shape):
+        return {'in_channels': [i.channels for i in input_shape], }
+
+    def _init_weights(self):
+        bias_cls = bias_init_with_prob(0.01)
+        for cls_, reg_ in zip(self.pred_cls, self.pred_reg):
+            constant_(cls_.weight)
+            constant_(cls_.bias, bias_cls)
+            constant_(reg_.weight)
+            constant_(reg_.bias, 1.0)
+
+        self.proj = paddle.linspace(0, self.reg_max, self.reg_max + 1)
+        self.proj_conv.weight.set_value(
+            self.proj.reshape([1, self.reg_max + 1, 1, 1]))
+        self.proj_conv.weight.stop_gradient = True
+
+        if self.eval_size:
+            anchor_points, stride_tensor = self._generate_anchors()
+            self.anchor_points = anchor_points
+            self.stride_tensor = stride_tensor
+
+    def forward_train(self, feats, targets):
+        anchors, anchor_points, num_anchors_list, stride_tensor = \
+            generate_anchors_for_grid_cell(
+                feats, self.fpn_strides, self.grid_cell_scale,
+                self.grid_cell_offset)
+
+        cls_score_list, reg_distri_list = [], []
+        for i, feat in enumerate(feats):
+            avg_feat = F.adaptive_avg_pool2d(feat, (1, 1))
+            cls_logit = self.pred_cls[i](self.stem_cls[i](feat, avg_feat) +
+                                         feat)
+            reg_distri = self.pred_reg[i](self.stem_reg[i](feat, avg_feat))
+            # cls and reg
+            cls_score = F.sigmoid(cls_logit)
+            cls_score_list.append(cls_score.flatten(2).transpose([0, 2, 1]))
+            reg_distri_list.append(reg_distri.flatten(2).transpose([0, 2, 1]))
+        cls_score_list = paddle.concat(cls_score_list, axis=1)
+        reg_distri_list = paddle.concat(reg_distri_list, axis=1)
+
+        return self.get_loss([
+            cls_score_list, reg_distri_list, anchors, anchor_points,
+            num_anchors_list, stride_tensor
+        ], targets)
+
+    def _generate_anchors(self, feats=None):
+        # just use in eval time
+        anchor_points = []
+        stride_tensor = []
+        for i, stride in enumerate(self.fpn_strides):
+            if feats is not None:
+                _, _, h, w = feats[i].shape
+            else:
+                h = int(self.eval_size[0] / stride)
+                w = int(self.eval_size[1] / stride)
+            shift_x = paddle.arange(end=w) + self.grid_cell_offset
+            shift_y = paddle.arange(end=h) + self.grid_cell_offset
+            shift_y, shift_x = paddle.meshgrid(shift_y, shift_x)
+            anchor_point = paddle.cast(
+                paddle.stack(
+                    [shift_x, shift_y], axis=-1), dtype='float32')
+            anchor_points.append(anchor_point.reshape([-1, 2]))
+            stride_tensor.append(
+                paddle.full(
+                    [h * w, 1], stride, dtype='float32'))
+        anchor_points = paddle.concat(anchor_points)
+        stride_tensor = paddle.concat(stride_tensor)
+        return anchor_points, stride_tensor
+
+    def forward_eval(self, feats):
+        if self.eval_size:
+            anchor_points, stride_tensor = self.anchor_points, self.stride_tensor
+        else:
+            anchor_points, stride_tensor = self._generate_anchors(feats)
+        cls_score_list, reg_dist_list = [], []
+        for i, feat in enumerate(feats):
+            b, _, h, w = feat.shape
+            l = h * w
+            avg_feat = F.adaptive_avg_pool2d(feat, (1, 1))
+            cls_logit = self.pred_cls[i](self.stem_cls[i](feat, avg_feat) +
+                                         feat)
+            reg_dist = self.pred_reg[i](self.stem_reg[i](feat, avg_feat))
+            reg_dist = reg_dist.reshape(
+                [-1, 4, self.reg_max + 1, l]).transpose([0, 2, 1, 3])
+            reg_dist = self.proj_conv(F.softmax(reg_dist, axis=1))
+            # cls and reg
+            cls_score = F.sigmoid(cls_logit)
+            cls_score_list.append(cls_score.reshape([b, self.num_classes, l]))
+            reg_dist_list.append(reg_dist.reshape([b, 4, l]))
+
+        cls_score_list = paddle.concat(cls_score_list, axis=-1)
+        reg_dist_list = paddle.concat(reg_dist_list, axis=-1)
+
+        return cls_score_list, reg_dist_list, anchor_points, stride_tensor
+
+    def forward(self, feats, targets=None):
+        assert len(feats) == len(self.fpn_strides), \
+            "The size of feats is not equal to size of fpn_strides"
+
+        if self.training:
+            return self.forward_train(feats, targets)
+        else:
+            return self.forward_eval(feats)
+
+    @staticmethod
+    def _focal_loss(score, label, alpha=0.25, gamma=2.0):
+        weight = (score - label).pow(gamma)
+        if alpha > 0:
+            alpha_t = alpha * label + (1 - alpha) * (1 - label)
+            weight *= alpha_t
+        loss = F.binary_cross_entropy(
+            score, label, weight=weight, reduction='sum')
+        return loss
+
+    @staticmethod
+    def _varifocal_loss(pred_score, gt_score, label, alpha=0.75, gamma=2.0):
+        weight = alpha * pred_score.pow(gamma) * (1 - label) + gt_score * label
+        loss = F.binary_cross_entropy(
+            pred_score, gt_score, weight=weight, reduction='sum')
+        return loss
+
+    def _bbox_decode(self, anchor_points, pred_dist):
+        b, l, _ = get_static_shape(pred_dist)
+        pred_dist = F.softmax(pred_dist.reshape([b, l, 4, self.reg_max + 1
+                                                 ])).matmul(self.proj)
+        return batch_distance2bbox(anchor_points, pred_dist)
+
+    def _bbox2distance(self, points, bbox):
+        x1y1, x2y2 = paddle.split(bbox, 2, -1)
+        lt = points - x1y1
+        rb = x2y2 - points
+        return paddle.concat([lt, rb], -1).clip(0, self.reg_max - 0.01)
+
+    def _df_loss(self, pred_dist, target):
+        target_left = paddle.cast(target, 'int64')
+        target_right = target_left + 1
+        weight_left = target_right.astype('float32') - target
+        weight_right = 1 - weight_left
+        loss_left = F.cross_entropy(
+            pred_dist, target_left, reduction='none') * weight_left
+        loss_right = F.cross_entropy(
+            pred_dist, target_right, reduction='none') * weight_right
+        return (loss_left + loss_right).mean(-1, keepdim=True)
+
+    def _bbox_loss(self, pred_dist, pred_bboxes, anchor_points,
+                   assigned_labels, assigned_bboxes, assigned_scores,
+                   assigned_scores_sum):
+        # select positive samples mask
+        mask_positive = (assigned_labels != self.num_classes)
+        num_pos = mask_positive.sum()
+        # pos/neg loss
+        if num_pos > 0:
+            # l1 + iou
+            bbox_mask = mask_positive.unsqueeze(-1).tile([1, 1, 4])
+            pred_bboxes_pos = paddle.masked_select(pred_bboxes,
+                                                   bbox_mask).reshape([-1, 4])
+            assigned_bboxes_pos = paddle.masked_select(
+                assigned_bboxes, bbox_mask).reshape([-1, 4])
+            bbox_weight = paddle.masked_select(
+                assigned_scores.sum(-1), mask_positive).unsqueeze(-1)
+
+            loss_l1 = F.l1_loss(pred_bboxes_pos, assigned_bboxes_pos)
+
+            loss_iou = self.iou_loss(pred_bboxes_pos,
+                                     assigned_bboxes_pos) * bbox_weight
+            loss_iou = loss_iou.sum() / assigned_scores_sum
+
+            dist_mask = mask_positive.unsqueeze(-1).tile(
+                [1, 1, (self.reg_max + 1) * 4])
+            pred_dist_pos = paddle.masked_select(
+                pred_dist, dist_mask).reshape([-1, 4, self.reg_max + 1])
+            assigned_ltrb = self._bbox2distance(anchor_points, assigned_bboxes)
+            assigned_ltrb_pos = paddle.masked_select(
+                assigned_ltrb, bbox_mask).reshape([-1, 4])
+            loss_dfl = self._df_loss(pred_dist_pos,
+                                     assigned_ltrb_pos) * bbox_weight
+            loss_dfl = loss_dfl.sum() / assigned_scores_sum
+        else:
+            loss_l1 = paddle.zeros([1])
+            loss_iou = paddle.zeros([1])
+            loss_dfl = pred_dist.sum() * 0.
+        return loss_l1, loss_iou, loss_dfl
+
+    def get_loss(self, head_outs, gt_meta):
+        pred_scores, pred_distri, anchors,\
+        anchor_points, num_anchors_list, stride_tensor = head_outs
+
+        anchor_points_s = anchor_points / stride_tensor
+        pred_bboxes = self._bbox_decode(anchor_points_s, pred_distri)
+
+        gt_labels = gt_meta['gt_class']
+        gt_bboxes = gt_meta['gt_bbox']
+        pad_gt_mask = gt_meta['pad_gt_mask']
+        # label assignment
+        if gt_meta['epoch_id'] < self.static_assigner_epoch:
+            assigned_labels, assigned_bboxes, assigned_scores = \
+                self.static_assigner(
+                    anchors,
+                    num_anchors_list,
+                    gt_labels,
+                    gt_bboxes,
+                    pad_gt_mask,
+                    bg_index=self.num_classes,
+                    pred_bboxes=pred_bboxes.detach() * stride_tensor)
+            alpha_l = 0.25
+        else:
+            assigned_labels, assigned_bboxes, assigned_scores = \
+                self.assigner(
+                pred_scores.detach(),
+                pred_bboxes.detach() * stride_tensor,
+                anchor_points,
+                num_anchors_list,
+                gt_labels,
+                gt_bboxes,
+                pad_gt_mask,
+                bg_index=self.num_classes)
+            alpha_l = -1
+        # rescale bbox
+        assigned_bboxes /= stride_tensor
+        # cls loss
+        if self.use_varifocal_loss:
+            one_hot_label = F.one_hot(assigned_labels,
+                                      self.num_classes + 1)[..., :-1]
+            loss_cls = self._varifocal_loss(pred_scores, assigned_scores,
+                                            one_hot_label)
+        else:
+            loss_cls = self._focal_loss(pred_scores, assigned_scores, alpha_l)
+
+        assigned_scores_sum = assigned_scores.sum()
+        if paddle.distributed.get_world_size() > 1:
+            paddle.distributed.all_reduce(assigned_scores_sum)
+            assigned_scores_sum = paddle.clip(
+                assigned_scores_sum / paddle.distributed.get_world_size(),
+                min=1)
+        loss_cls /= assigned_scores_sum
+
+        loss_l1, loss_iou, loss_dfl = \
+            self._bbox_loss(pred_distri, pred_bboxes, anchor_points_s,
+                            assigned_labels, assigned_bboxes, assigned_scores,
+                            assigned_scores_sum)
+        loss = self.loss_weight['class'] * loss_cls + \
+               self.loss_weight['iou'] * loss_iou + \
+               self.loss_weight['dfl'] * loss_dfl
+        out_dict = {
+            'loss': loss,
+            'loss_cls': loss_cls,
+            'loss_iou': loss_iou,
+            'loss_dfl': loss_dfl,
+            'loss_l1': loss_l1,
+        }
+        return out_dict
+
+    def post_process(self, head_outs, img_shape, scale_factor):
+        pred_scores, pred_dist, anchor_points, stride_tensor = head_outs
+        pred_bboxes = batch_distance2bbox(anchor_points,
+                                          pred_dist.transpose([0, 2, 1]))
+        pred_bboxes *= stride_tensor
+        # scale bbox to origin
+        scale_y, scale_x = paddle.split(scale_factor, 2, axis=-1)
+        scale_factor = paddle.concat(
+            [scale_x, scale_y, scale_x, scale_y], axis=-1).reshape([-1, 1, 4])
+        pred_bboxes /= scale_factor
+        if self.exclude_nms:
+            # `exclude_nms=True` just use in benchmark
+            return pred_bboxes.sum(), pred_scores.sum()
+        else:
+            bbox_pred, bbox_num, _ = self.nms(pred_bboxes, pred_scores)
+            return bbox_pred, bbox_num

+ 249 - 0
paddlex/ppdet/modeling/heads/retina_head.py

@@ -0,0 +1,249 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import math
+import paddle
+import paddle.nn as nn
+import paddle.nn.functional as F
+from paddle import ParamAttr
+from paddle.nn.initializer import Normal, Constant
+from paddlex.ppdet.modeling.bbox_utils import bbox2delta, delta2bbox
+from paddlex.ppdet.modeling.heads.fcos_head import FCOSFeat
+
+from paddlex.ppdet.core.workspace import register
+
+__all__ = ['RetinaHead']
+
+
+@register
+class RetinaFeat(FCOSFeat):
+    """We use FCOSFeat to construct conv layers in RetinaNet.
+    We rename FCOSFeat to RetinaFeat to avoid confusion.
+    """
+    pass
+
+
+@register
+class RetinaHead(nn.Layer):
+    """Used in RetinaNet proposed in paper https://arxiv.org/pdf/1708.02002.pdf
+    """
+    __shared__ = ['num_classes']
+    __inject__ = [
+        'conv_feat', 'anchor_generator', 'bbox_assigner', 'loss_class',
+        'loss_bbox', 'nms'
+    ]
+
+    def __init__(self,
+                 num_classes=80,
+                 conv_feat='RetinaFeat',
+                 anchor_generator='RetinaAnchorGenerator',
+                 bbox_assigner='MaxIoUAssigner',
+                 loss_class='FocalLoss',
+                 loss_bbox='SmoothL1Loss',
+                 nms='MultiClassNMS',
+                 prior_prob=0.01,
+                 nms_pre=1000,
+                 weights=[1., 1., 1., 1.]):
+        super(RetinaHead, self).__init__()
+        self.num_classes = num_classes
+        self.conv_feat = conv_feat
+        self.anchor_generator = anchor_generator
+        self.bbox_assigner = bbox_assigner
+        self.loss_class = loss_class
+        self.loss_bbox = loss_bbox
+        self.nms = nms
+        self.nms_pre = nms_pre
+        self.weights = weights
+
+        bias_init_value = -math.log((1 - prior_prob) / prior_prob)
+        num_anchors = self.anchor_generator.num_anchors
+        self.retina_cls = nn.Conv2D(
+            in_channels=self.conv_feat.feat_out,
+            out_channels=self.num_classes * num_anchors,
+            kernel_size=3,
+            stride=1,
+            padding=1,
+            weight_attr=ParamAttr(initializer=Normal(
+                mean=0.0, std=0.01)),
+            bias_attr=ParamAttr(initializer=Constant(value=bias_init_value)))
+        self.retina_reg = nn.Conv2D(
+            in_channels=self.conv_feat.feat_out,
+            out_channels=4 * num_anchors,
+            kernel_size=3,
+            stride=1,
+            padding=1,
+            weight_attr=ParamAttr(initializer=Normal(
+                mean=0.0, std=0.01)),
+            bias_attr=ParamAttr(initializer=Constant(value=0)))
+
+    def forward(self, neck_feats, targets=None):
+        cls_logits_list = []
+        bboxes_reg_list = []
+        for neck_feat in neck_feats:
+            conv_cls_feat, conv_reg_feat = self.conv_feat(neck_feat)
+            cls_logits = self.retina_cls(conv_cls_feat)
+            bbox_reg = self.retina_reg(conv_reg_feat)
+            cls_logits_list.append(cls_logits)
+            bboxes_reg_list.append(bbox_reg)
+
+        if self.training:
+            return self.get_loss([cls_logits_list, bboxes_reg_list], targets)
+        else:
+            return [cls_logits_list, bboxes_reg_list]
+
+    def get_loss(self, head_outputs, targets):
+        """Here we calculate loss for a batch of images.
+        We assign anchors to gts in each image and gather all the assigned
+        postive and negative samples. Then loss is calculated on the gathered
+        samples.
+        """
+        cls_logits_list, bboxes_reg_list = head_outputs
+        anchors = self.anchor_generator(cls_logits_list)
+        anchors = paddle.concat(anchors)
+
+        # matches: contain gt_inds
+        # match_labels: -1(ignore), 0(neg) or 1(pos)
+        matches_list, match_labels_list = [], []
+        # assign anchors to gts, no sampling is involved
+        for gt_bbox in targets['gt_bbox']:
+            matches, match_labels = self.bbox_assigner(anchors, gt_bbox)
+            matches_list.append(matches)
+            match_labels_list.append(match_labels)
+
+        # reshape network outputs
+        cls_logits = [
+            _.transpose([0, 2, 3, 1]).reshape([0, -1, self.num_classes])
+            for _ in cls_logits_list
+        ]
+        bboxes_reg = [
+            _.transpose([0, 2, 3, 1]).reshape([0, -1, 4])
+            for _ in bboxes_reg_list
+        ]
+        cls_logits = paddle.concat(cls_logits, axis=1)
+        bboxes_reg = paddle.concat(bboxes_reg, axis=1)
+
+        cls_pred_list, cls_tar_list = [], []
+        reg_pred_list, reg_tar_list = [], []
+        # find and gather preds and targets in each image
+        for matches, match_labels, cls_logit, bbox_reg, gt_bbox, gt_class in \
+            zip(matches_list, match_labels_list, cls_logits, bboxes_reg,
+                targets['gt_bbox'], targets['gt_class']):
+            pos_mask = (match_labels == 1)
+            neg_mask = (match_labels == 0)
+            chosen_mask = paddle.logical_or(pos_mask, neg_mask)
+
+            gt_class = gt_class.reshape([-1])
+            bg_class = paddle.to_tensor(
+                [self.num_classes], dtype=gt_class.dtype)
+            # a trick to assign num_classes to negative targets
+            gt_class = paddle.concat([gt_class, bg_class], axis=-1)
+            matches = paddle.where(
+                neg_mask,
+                paddle.full_like(matches, gt_class.size - 1), matches)
+
+            cls_pred = cls_logit[chosen_mask]
+            cls_tar = gt_class[matches[chosen_mask]]
+            reg_pred = bbox_reg[pos_mask].reshape([-1, 4])
+            reg_tar = gt_bbox[matches[pos_mask]].reshape([-1, 4])
+            reg_tar = bbox2delta(anchors[pos_mask], reg_tar, self.weights)
+            cls_pred_list.append(cls_pred)
+            cls_tar_list.append(cls_tar)
+            reg_pred_list.append(reg_pred)
+            reg_tar_list.append(reg_tar)
+        cls_pred = paddle.concat(cls_pred_list)
+        cls_tar = paddle.concat(cls_tar_list)
+        reg_pred = paddle.concat(reg_pred_list)
+        reg_tar = paddle.concat(reg_tar_list)
+
+        avg_factor = max(1.0, reg_pred.shape[0])
+        cls_loss = self.loss_class(
+            cls_pred, cls_tar, reduction='sum') / avg_factor
+
+        if reg_pred.shape[0] == 0:
+            reg_loss = paddle.zeros([1])
+            reg_loss.stop_gradient = False
+        else:
+            reg_loss = self.loss_bbox(
+                reg_pred, reg_tar, reduction='sum') / avg_factor
+
+        loss = cls_loss + reg_loss
+        out_dict = {
+            'loss_cls': cls_loss,
+            'loss_reg': reg_loss,
+            'loss': loss,
+        }
+        return out_dict
+
+    def get_bboxes_single(self,
+                          anchors,
+                          cls_scores_list,
+                          bbox_preds_list,
+                          im_shape,
+                          scale_factor,
+                          rescale=True):
+        assert len(cls_scores_list) == len(bbox_preds_list)
+        mlvl_bboxes = []
+        mlvl_scores = []
+        for anchor, cls_score, bbox_pred in zip(anchors, cls_scores_list,
+                                                bbox_preds_list):
+            cls_score = cls_score.reshape([-1, self.num_classes])
+            bbox_pred = bbox_pred.reshape([-1, 4])
+            if self.nms_pre is not None and cls_score.shape[0] > self.nms_pre:
+                max_score = cls_score.max(axis=1)
+                _, topk_inds = max_score.topk(self.nms_pre)
+                bbox_pred = bbox_pred.gather(topk_inds)
+                anchor = anchor.gather(topk_inds)
+                cls_score = cls_score.gather(topk_inds)
+            bbox_pred = delta2bbox(bbox_pred, anchor, self.weights).squeeze()
+            mlvl_bboxes.append(bbox_pred)
+            mlvl_scores.append(F.sigmoid(cls_score))
+        mlvl_bboxes = paddle.concat(mlvl_bboxes)
+        mlvl_bboxes = paddle.squeeze(mlvl_bboxes)
+        if rescale:
+            mlvl_bboxes = mlvl_bboxes / paddle.concat(
+                [scale_factor[::-1], scale_factor[::-1]])
+        mlvl_scores = paddle.concat(mlvl_scores)
+        mlvl_scores = mlvl_scores.transpose([1, 0])
+        return mlvl_bboxes, mlvl_scores
+
+    def decode(self, anchors, cls_logits, bboxes_reg, im_shape, scale_factor):
+        batch_bboxes = []
+        batch_scores = []
+        for img_id in range(cls_logits[0].shape[0]):
+            num_lvls = len(cls_logits)
+            cls_scores_list = [cls_logits[i][img_id] for i in range(num_lvls)]
+            bbox_preds_list = [bboxes_reg[i][img_id] for i in range(num_lvls)]
+            bboxes, scores = self.get_bboxes_single(
+                anchors, cls_scores_list, bbox_preds_list, im_shape[img_id],
+                scale_factor[img_id])
+            batch_bboxes.append(bboxes)
+            batch_scores.append(scores)
+        batch_bboxes = paddle.stack(batch_bboxes, axis=0)
+        batch_scores = paddle.stack(batch_scores, axis=0)
+        return batch_bboxes, batch_scores
+
+    def post_process(self, head_outputs, im_shape, scale_factor):
+        cls_logits_list, bboxes_reg_list = head_outputs
+        anchors = self.anchor_generator(cls_logits_list)
+        cls_logits = [_.transpose([0, 2, 3, 1]) for _ in cls_logits_list]
+        bboxes_reg = [_.transpose([0, 2, 3, 1]) for _ in bboxes_reg_list]
+        bboxes, scores = self.decode(anchors, cls_logits, bboxes_reg, im_shape,
+                                     scale_factor)
+
+        bbox_pred, bbox_num, _ = self.nms(bboxes, scores)
+        return bbox_pred, bbox_num

+ 2 - 1
paddlex/ppdet/modeling/heads/s2anet_head.py

@@ -23,6 +23,7 @@ from paddlex.ppdet.core.workspace import register
 from paddlex.ppdet.modeling import ops
 from paddlex.ppdet.modeling import bbox_utils
 from paddlex.ppdet.modeling.proposal_generator.target_layer import RBoxAssigner
+from ..cls_utils import _get_class_default_kwargs
 import numpy as np
 
 
@@ -232,7 +233,7 @@ class S2ANetHead(nn.Layer):
                  align_conv_type='AlignConv',
                  align_conv_size=3,
                  use_sigmoid_cls=True,
-                 anchor_assign=RBoxAssigner().__dict__,
+                 anchor_assign=_get_class_default_kwargs(RBoxAssigner),
                  reg_loss_weight=[1.0, 1.0, 1.0, 1.0, 1.1],
                  cls_loss_weight=[1.1, 1.05],
                  reg_loss_type='l1'):

+ 12 - 10
paddlex/ppdet/modeling/heads/ssd_head.py

@@ -20,6 +20,7 @@ from paddle.regularizer import L2Decay
 from paddle import ParamAttr
 
 from ..layers import AnchorGeneratorSSD
+from ..cls_utils import _get_class_default_kwargs
 
 
 class SepConvLayer(nn.Layer):
@@ -110,16 +111,17 @@ class SSDHead(nn.Layer):
     __shared__ = ['num_classes']
     __inject__ = ['anchor_generator', 'loss']
 
-    def __init__(self,
-                 num_classes=80,
-                 in_channels=(512, 1024, 512, 256, 256, 256),
-                 anchor_generator=AnchorGeneratorSSD().__dict__,
-                 kernel_size=3,
-                 padding=1,
-                 use_sepconv=False,
-                 conv_decay=0.,
-                 loss='SSDLoss',
-                 use_extra_head=False):
+    def __init__(
+            self,
+            num_classes=80,
+            in_channels=(512, 1024, 512, 256, 256, 256),
+            anchor_generator=_get_class_default_kwargs(AnchorGeneratorSSD),
+            kernel_size=3,
+            padding=1,
+            use_sepconv=False,
+            conv_decay=0.,
+            loss='SSDLoss',
+            use_extra_head=False):
         super(SSDHead, self).__init__()
         # add background class
         self.num_classes = num_classes + 1

+ 18 - 13
paddlex/ppdet/modeling/heads/tood_head.py

@@ -218,13 +218,17 @@ class TOODHead(nn.Layer):
         assert len(feats) == len(self.fpn_strides), \
             "The size of feats is not equal to size of fpn_strides"
 
-        anchors, num_anchors_list, stride_tensor_list = generate_anchors_for_grid_cell(
+        anchors, anchor_points, num_anchors_list, stride_tensor =\
+            generate_anchors_for_grid_cell(
             feats, self.fpn_strides, self.grid_cell_scale,
             self.grid_cell_offset)
+        anchor_centers_split = paddle.split(anchor_points / stride_tensor,
+                                            num_anchors_list)
 
         cls_score_list, bbox_pred_list = [], []
-        for feat, scale_reg, anchor, stride in zip(feats, self.scales_regs,
-                                                   anchors, self.fpn_strides):
+        for feat, scale_reg, anchor_centers, stride in zip(
+                feats, self.scales_regs, anchor_centers_split,
+                self.fpn_strides):
             b, _, h, w = get_static_shape(feat)
             inter_feats = []
             for inter_conv in self.inter_convs:
@@ -250,8 +254,8 @@ class TOODHead(nn.Layer):
             # reg prediction and alignment
             reg_dist = scale_reg(self.tood_reg(reg_feat).exp())
             reg_dist = reg_dist.flatten(2).transpose([0, 2, 1])
-            anchor_centers = bbox_center(anchor).unsqueeze(0) / stride
-            reg_bbox = batch_distance2bbox(anchor_centers, reg_dist)
+            reg_bbox = batch_distance2bbox(
+                anchor_centers.unsqueeze(0), reg_dist)
             if self.use_align_head:
                 reg_offset = F.relu(self.reg_offset_conv1(feat))
                 reg_offset = self.reg_offset_conv2(reg_offset)
@@ -268,12 +272,8 @@ class TOODHead(nn.Layer):
             bbox_pred_list.append(bbox_pred)
         cls_score_list = paddle.concat(cls_score_list, axis=1)
         bbox_pred_list = paddle.concat(bbox_pred_list, axis=1)
-        anchors = paddle.concat(anchors)
-        anchors.stop_gradient = True
-        stride_tensor_list = paddle.concat(stride_tensor_list).unsqueeze(0)
-        stride_tensor_list.stop_gradient = True
 
-        return cls_score_list, bbox_pred_list, anchors, num_anchors_list, stride_tensor_list
+        return cls_score_list, bbox_pred_list, anchors, num_anchors_list, stride_tensor
 
     @staticmethod
     def _focal_loss(score, label, alpha=0.25, gamma=2.0):
@@ -286,9 +286,11 @@ class TOODHead(nn.Layer):
         return loss
 
     def get_loss(self, head_outs, gt_meta):
-        pred_scores, pred_bboxes, anchors, num_anchors_list, stride_tensor_list = head_outs
+        pred_scores, pred_bboxes, anchors, \
+        num_anchors_list, stride_tensor = head_outs
         gt_labels = gt_meta['gt_class']
         gt_bboxes = gt_meta['gt_bbox']
+        pad_gt_mask = gt_meta['pad_gt_mask']
         # label assignment
         if gt_meta['epoch_id'] < self.static_assigner_epoch:
             assigned_labels, assigned_bboxes, assigned_scores = self.static_assigner(
@@ -296,20 +298,23 @@ class TOODHead(nn.Layer):
                 num_anchors_list,
                 gt_labels,
                 gt_bboxes,
+                pad_gt_mask,
                 bg_index=self.num_classes)
             alpha_l = 0.25
         else:
             assigned_labels, assigned_bboxes, assigned_scores = self.assigner(
                 pred_scores.detach(),
-                pred_bboxes.detach() * stride_tensor_list,
+                pred_bboxes.detach() * stride_tensor,
                 bbox_center(anchors),
+                num_anchors_list,
                 gt_labels,
                 gt_bboxes,
+                pad_gt_mask,
                 bg_index=self.num_classes)
             alpha_l = -1
 
         # rescale bbox
-        assigned_bboxes /= stride_tensor_list
+        assigned_bboxes /= stride_tensor
         # classification loss
         loss_cls = self._focal_loss(
             pred_scores, assigned_scores, alpha=alpha_l)

+ 2 - 2
paddlex/ppdet/modeling/heads/ttf_head.py

@@ -284,8 +284,8 @@ class TTFHead(nn.Layer):
 
         pred_boxes = paddle.concat(
             [
-                0 - pred_wh[:, 0:2, :, :] + base_loc,
-                pred_wh[:, 2:4] + base_loc
+                0 - pred_wh[:, 0:2, :, :] + base_loc, pred_wh[:, 2:4] +
+                base_loc
             ],
             axis=1)
         pred_boxes = paddle.transpose(pred_boxes, [0, 2, 3, 1])

+ 295 - 0
paddlex/ppdet/modeling/heads/yolo_head.py

@@ -1,3 +1,17 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
 import paddle
 import paddle.nn as nn
 import paddle.nn.functional as F
@@ -5,6 +19,17 @@ from paddle import ParamAttr
 from paddle.regularizer import L2Decay
 from paddlex.ppdet.core.workspace import register
 
+import math
+import numpy as np
+from ..initializer import bias_init_with_prob, constant_
+from ..backbones.csp_darknet import BaseConv, DWConv
+from ..losses import IouLoss
+from paddlex.ppdet.modeling.assigners.simota_assigner import SimOTAAssigner
+from paddlex.ppdet.modeling.bbox_utils import bbox_overlaps
+from paddlex.ppdet.modeling.layers import MultiClassNMS
+
+__all__ = ['YOLOv3Head', 'YOLOXHead']
+
 
 def _de_sigmoid(x, eps=1e-7):
     x = paddle.clip(x, eps, 1. / eps)
@@ -122,3 +147,273 @@ class YOLOv3Head(nn.Layer):
     @classmethod
     def from_config(cls, cfg, input_shape):
         return {'in_channels': [i.channels for i in input_shape], }
+
+
+@register
+class YOLOXHead(nn.Layer):
+    __shared__ = ['num_classes', 'width_mult', 'act', 'trt', 'exclude_nms']
+    __inject__ = ['assigner', 'nms']
+
+    def __init__(self,
+                 num_classes=80,
+                 width_mult=1.0,
+                 depthwise=False,
+                 in_channels=[256, 512, 1024],
+                 feat_channels=256,
+                 fpn_strides=(8, 16, 32),
+                 l1_epoch=285,
+                 act='silu',
+                 assigner=SimOTAAssigner(use_vfl=False),
+                 nms='MultiClassNMS',
+                 loss_weight={
+                     'cls': 1.0,
+                     'obj': 1.0,
+                     'iou': 5.0,
+                     'l1': 1.0,
+                 },
+                 trt=False,
+                 exclude_nms=False):
+        super(YOLOXHead, self).__init__()
+        self._dtype = paddle.framework.get_default_dtype()
+        self.num_classes = num_classes
+        assert len(in_channels) > 0, "in_channels length should > 0"
+        self.in_channels = in_channels
+        feat_channels = int(feat_channels * width_mult)
+        self.fpn_strides = fpn_strides
+        self.l1_epoch = l1_epoch
+        self.assigner = assigner
+        self.nms = nms
+        if isinstance(self.nms, MultiClassNMS) and trt:
+            self.nms.trt = trt
+        self.exclude_nms = exclude_nms
+        self.loss_weight = loss_weight
+        self.iou_loss = IouLoss(loss_weight=1.0)  # default loss_weight 2.5
+
+        ConvBlock = DWConv if depthwise else BaseConv
+
+        self.stem_conv = nn.LayerList()
+        self.conv_cls = nn.LayerList()
+        self.conv_reg = nn.LayerList()  # reg [x,y,w,h] + obj
+        for in_c in self.in_channels:
+            self.stem_conv.append(BaseConv(in_c, feat_channels, 1, 1, act=act))
+
+            self.conv_cls.append(
+                nn.Sequential(* [
+                    ConvBlock(
+                        feat_channels, feat_channels, 3, 1,
+                        act=act), ConvBlock(
+                            feat_channels, feat_channels, 3, 1, act=act),
+                    nn.Conv2D(
+                        feat_channels,
+                        self.num_classes,
+                        1,
+                        bias_attr=ParamAttr(regularizer=L2Decay(0.0)))
+                ]))
+
+            self.conv_reg.append(
+                nn.Sequential(* [
+                    ConvBlock(
+                        feat_channels, feat_channels, 3, 1, act=act),
+                    ConvBlock(
+                        feat_channels, feat_channels, 3, 1, act=act),
+                    nn.Conv2D(
+                        feat_channels,
+                        4 + 1,  # reg [x,y,w,h] + obj
+                        1,
+                        bias_attr=ParamAttr(regularizer=L2Decay(0.0)))
+                ]))
+
+        self._init_weights()
+
+    @classmethod
+    def from_config(cls, cfg, input_shape):
+        return {'in_channels': [i.channels for i in input_shape], }
+
+    def _init_weights(self):
+        bias_cls = bias_init_with_prob(0.01)
+        bias_reg = paddle.full([5], math.log(5.), dtype=self._dtype)
+        bias_reg[:2] = 0.
+        bias_reg[-1] = bias_cls
+        for cls_, reg_ in zip(self.conv_cls, self.conv_reg):
+            constant_(cls_[-1].weight)
+            constant_(cls_[-1].bias, bias_cls)
+            constant_(reg_[-1].weight)
+            reg_[-1].bias.set_value(bias_reg)
+
+    def _generate_anchor_point(self, feat_sizes, strides, offset=0.):
+        anchor_points, stride_tensor = [], []
+        num_anchors_list = []
+        for feat_size, stride in zip(feat_sizes, strides):
+            h, w = feat_size
+            x = (paddle.arange(w) + offset) * stride
+            y = (paddle.arange(h) + offset) * stride
+            y, x = paddle.meshgrid(y, x)
+            anchor_points.append(
+                paddle.stack(
+                    [x, y], axis=-1).reshape([-1, 2]))
+            stride_tensor.append(
+                paddle.full(
+                    [len(anchor_points[-1]), 1], stride, dtype=self._dtype))
+            num_anchors_list.append(len(anchor_points[-1]))
+        anchor_points = paddle.concat(anchor_points).astype(self._dtype)
+        anchor_points.stop_gradient = True
+        stride_tensor = paddle.concat(stride_tensor)
+        stride_tensor.stop_gradient = True
+        return anchor_points, stride_tensor, num_anchors_list
+
+    def forward(self, feats, targets=None):
+        assert len(feats) == len(self.fpn_strides), \
+            "The size of feats is not equal to size of fpn_strides"
+
+        feat_sizes = [[f.shape[-2], f.shape[-1]] for f in feats]
+        cls_score_list, reg_pred_list = [], []
+        obj_score_list = []
+        for i, feat in enumerate(feats):
+            feat = self.stem_conv[i](feat)
+            cls_logit = self.conv_cls[i](feat)
+            reg_pred = self.conv_reg[i](feat)
+            # cls prediction
+            cls_score = F.sigmoid(cls_logit)
+            cls_score_list.append(cls_score.flatten(2).transpose([0, 2, 1]))
+            # reg prediction
+            reg_xywh, obj_logit = paddle.split(reg_pred, [4, 1], axis=1)
+            reg_xywh = reg_xywh.flatten(2).transpose([0, 2, 1])
+            reg_pred_list.append(reg_xywh)
+            # obj prediction
+            obj_score = F.sigmoid(obj_logit)
+            obj_score_list.append(obj_score.flatten(2).transpose([0, 2, 1]))
+
+        cls_score_list = paddle.concat(cls_score_list, axis=1)
+        reg_pred_list = paddle.concat(reg_pred_list, axis=1)
+        obj_score_list = paddle.concat(obj_score_list, axis=1)
+
+        # bbox decode
+        anchor_points, stride_tensor, _ =\
+            self._generate_anchor_point(feat_sizes, self.fpn_strides)
+        reg_xy, reg_wh = paddle.split(reg_pred_list, 2, axis=-1)
+        reg_xy += (anchor_points / stride_tensor)
+        reg_wh = paddle.exp(reg_wh) * 0.5
+        bbox_pred_list = paddle.concat(
+            [reg_xy - reg_wh, reg_xy + reg_wh], axis=-1)
+
+        if self.training:
+            anchor_points, stride_tensor, num_anchors_list =\
+                self._generate_anchor_point(feat_sizes, self.fpn_strides, 0.5)
+            yolox_losses = self.get_loss([
+                cls_score_list, bbox_pred_list, obj_score_list, anchor_points,
+                stride_tensor, num_anchors_list
+            ], targets)
+            return yolox_losses
+        else:
+            pred_scores = (cls_score_list * obj_score_list).sqrt()
+            return pred_scores, bbox_pred_list, stride_tensor
+
+    def get_loss(self, head_outs, targets):
+        pred_cls, pred_bboxes, pred_obj,\
+        anchor_points, stride_tensor, num_anchors_list = head_outs
+        gt_labels = targets['gt_class']
+        gt_bboxes = targets['gt_bbox']
+        pred_scores = (pred_cls * pred_obj).sqrt()
+        # label assignment
+        center_and_strides = paddle.concat(
+            [anchor_points, stride_tensor, stride_tensor], axis=-1)
+        pos_num_list, label_list, bbox_target_list = [], [], []
+        for pred_score, pred_bbox, gt_box, gt_label in zip(
+                pred_scores.detach(),
+                pred_bboxes.detach() * stride_tensor, gt_bboxes, gt_labels):
+            pos_num, label, _, bbox_target = self.assigner(
+                pred_score, center_and_strides, pred_bbox, gt_box, gt_label)
+            pos_num_list.append(pos_num)
+            label_list.append(label)
+            bbox_target_list.append(bbox_target)
+        labels = paddle.to_tensor(np.stack(label_list, axis=0))
+        bbox_targets = paddle.to_tensor(np.stack(bbox_target_list, axis=0))
+        bbox_targets /= stride_tensor  # rescale bbox
+
+        # 1. obj score loss
+        mask_positive = (labels != self.num_classes)
+        loss_obj = F.binary_cross_entropy(
+            pred_obj,
+            mask_positive.astype(pred_obj.dtype).unsqueeze(-1),
+            reduction='sum')
+
+        num_pos = sum(pos_num_list)
+
+        if num_pos > 0:
+            num_pos = paddle.to_tensor(num_pos, dtype=self._dtype).clip(min=1)
+            loss_obj /= num_pos
+
+            # 2. iou loss
+            bbox_mask = mask_positive.unsqueeze(-1).tile([1, 1, 4])
+            pred_bboxes_pos = paddle.masked_select(pred_bboxes,
+                                                   bbox_mask).reshape([-1, 4])
+            assigned_bboxes_pos = paddle.masked_select(
+                bbox_targets, bbox_mask).reshape([-1, 4])
+            bbox_iou = bbox_overlaps(pred_bboxes_pos, assigned_bboxes_pos)
+            bbox_iou = paddle.diag(bbox_iou)
+
+            loss_iou = self.iou_loss(
+                pred_bboxes_pos.split(
+                    4, axis=-1),
+                assigned_bboxes_pos.split(
+                    4, axis=-1))
+            loss_iou = loss_iou.sum() / num_pos
+
+            # 3. cls loss
+            cls_mask = mask_positive.unsqueeze(-1).tile(
+                [1, 1, self.num_classes])
+            pred_cls_pos = paddle.masked_select(
+                pred_cls, cls_mask).reshape([-1, self.num_classes])
+            assigned_cls_pos = paddle.masked_select(labels, mask_positive)
+            assigned_cls_pos = F.one_hot(assigned_cls_pos,
+                                         self.num_classes + 1)[..., :-1]
+            assigned_cls_pos *= bbox_iou.unsqueeze(-1)
+            loss_cls = F.binary_cross_entropy(
+                pred_cls_pos, assigned_cls_pos, reduction='sum')
+            loss_cls /= num_pos
+
+            # 4. l1 loss
+            if targets['epoch_id'] >= self.l1_epoch:
+                loss_l1 = F.l1_loss(
+                    pred_bboxes_pos, assigned_bboxes_pos, reduction='sum')
+                loss_l1 /= num_pos
+            else:
+                loss_l1 = paddle.zeros([1])
+                loss_l1.stop_gradient = False
+        else:
+            loss_cls = paddle.zeros([1])
+            loss_iou = paddle.zeros([1])
+            loss_l1 = paddle.zeros([1])
+            loss_cls.stop_gradient = False
+            loss_iou.stop_gradient = False
+            loss_l1.stop_gradient = False
+
+        loss = self.loss_weight['obj'] * loss_obj + \
+               self.loss_weight['cls'] * loss_cls + \
+               self.loss_weight['iou'] * loss_iou
+
+        if targets['epoch_id'] >= self.l1_epoch:
+            loss += (self.loss_weight['l1'] * loss_l1)
+
+        yolox_losses = {
+            'loss': loss,
+            'loss_cls': loss_cls,
+            'loss_obj': loss_obj,
+            'loss_iou': loss_iou,
+            'loss_l1': loss_l1,
+        }
+        return yolox_losses
+
+    def post_process(self, head_outs, img_shape, scale_factor):
+        pred_scores, pred_bboxes, stride_tensor = head_outs
+        pred_scores = pred_scores.transpose([0, 2, 1])
+        pred_bboxes *= stride_tensor
+        # scale bbox to origin image
+        scale_factor = scale_factor.flip(-1).tile([1, 2]).unsqueeze(1)
+        pred_bboxes /= scale_factor
+        if self.exclude_nms:
+            # `exclude_nms=True` just use in benchmark
+            return pred_bboxes.sum(), pred_scores.sum()
+        else:
+            bbox_pred, bbox_num, _ = self.nms(pred_bboxes, pred_scores)
+            return bbox_pred, bbox_num

+ 2 - 1
paddlex/ppdet/modeling/initializer.py

@@ -273,7 +273,8 @@ def linear_init_(module):
 def conv_init_(module):
     bound = 1 / np.sqrt(np.prod(module.weight.shape[1:]))
     uniform_(module.weight, -bound, bound)
-    uniform_(module.bias, -bound, bound)
+    if module.bias is not None:
+        uniform_(module.bias, -bound, bound)
 
 
 def bias_init_with_prob(prior_prob=0.01):

+ 98 - 155
paddlex/ppdet/modeling/layers.py

@@ -128,7 +128,7 @@ class ConvNormLayer(nn.Layer):
                  dcn_lr_scale=2.,
                  dcn_regularizer=L2Decay(0.)):
         super(ConvNormLayer, self).__init__()
-        assert norm_type in ['bn', 'sync_bn', 'gn']
+        assert norm_type in ['bn', 'sync_bn', 'gn', None]
 
         if bias_on:
             bias_attr = ParamAttr(
@@ -185,10 +185,13 @@ class ConvNormLayer(nn.Layer):
                 num_channels=ch_out,
                 weight_attr=param_attr,
                 bias_attr=bias_attr)
+        else:
+            self.norm = None
 
     def forward(self, inputs):
         out = self.conv(inputs)
-        out = self.norm(out)
+        if self.norm is not None:
+            out = self.norm(out)
         return out
 
 
@@ -250,7 +253,7 @@ class LiteConv(nn.Layer):
 
 
 class DropBlock(nn.Layer):
-    def __init__(self, block_size, keep_prob, name, data_format='NCHW'):
+    def __init__(self, block_size, keep_prob, name=None, data_format='NCHW'):
         """
         DropBlock layer, see https://arxiv.org/abs/1810.12890
 
@@ -363,18 +366,20 @@ class AnchorGeneratorSSD(object):
 @register
 @serializable
 class RCNNBox(object):
-    __shared__ = ['num_classes']
+    __shared__ = ['num_classes', 'export_onnx']
 
     def __init__(self,
                  prior_box_var=[10., 10., 5., 5.],
                  code_type="decode_center_size",
                  box_normalized=False,
-                 num_classes=80):
+                 num_classes=80,
+                 export_onnx=False):
         super(RCNNBox, self).__init__()
         self.prior_box_var = prior_box_var
         self.code_type = code_type
         self.box_normalized = box_normalized
         self.num_classes = num_classes
+        self.export_onnx = export_onnx
 
     def __call__(self, bbox_head_out, rois, im_shape, scale_factor):
         bbox_pred = bbox_head_out[0]
@@ -382,39 +387,39 @@ class RCNNBox(object):
         roi = rois[0]
         rois_num = rois[1]
 
-        origin_shape = paddle.floor(im_shape / scale_factor + 0.5)
-        scale_list = []
-        origin_shape_list = []
+        if self.export_onnx:
+            onnx_rois_num_per_im = rois_num[0]
+            origin_shape = paddle.expand(im_shape[0, :],
+                                         [onnx_rois_num_per_im, 2])
 
-        batch_size = 1
-        if isinstance(roi, list):
-            batch_size = len(roi)
         else:
-            batch_size = paddle.slice(paddle.shape(im_shape), [0], [0], [1])
-        # bbox_pred.shape: [N, C*4]
-        for idx in range(batch_size):
-            roi_per_im = roi[idx]
-            rois_num_per_im = rois_num[idx]
-            expand_im_shape = paddle.expand(im_shape[idx, :],
-                                            [rois_num_per_im, 2])
-            origin_shape_list.append(expand_im_shape)
+            origin_shape_list = []
+            if isinstance(roi, list):
+                batch_size = len(roi)
+            else:
+                batch_size = paddle.slice(
+                    paddle.shape(im_shape), [0], [0], [1])
+
+            # bbox_pred.shape: [N, C*4]
+            for idx in range(batch_size):
+                rois_num_per_im = rois_num[idx]
+                expand_im_shape = paddle.expand(im_shape[idx, :],
+                                                [rois_num_per_im, 2])
+                origin_shape_list.append(expand_im_shape)
 
-        origin_shape = paddle.concat(origin_shape_list)
+            origin_shape = paddle.concat(origin_shape_list)
 
         # bbox_pred.shape: [N, C*4]
         # C=num_classes in faster/mask rcnn(bbox_head), C=1 in cascade rcnn(cascade_head)
         bbox = paddle.concat(roi)
-        if bbox.shape[0] == 0:
-            bbox = paddle.zeros([0, bbox_pred.shape[1]], dtype='float32')
-        else:
-            bbox = delta2bbox(bbox_pred, bbox, self.prior_box_var)
+        bbox = delta2bbox(bbox_pred, bbox, self.prior_box_var)
         scores = cls_prob[:, :-1]
 
         # bbox.shape: [N, C, 4]
         # bbox.shape[1] must be equal to scores.shape[1]
-        bbox_num_class = bbox.shape[1]
-        if bbox_num_class == 1:
-            bbox = paddle.tile(bbox, [1, self.num_classes, 1])
+        total_num = bbox.shape[0]
+        bbox_dim = bbox.shape[-1]
+        bbox = paddle.expand(bbox, [total_num, self.num_classes, bbox_dim])
 
         origin_h = paddle.unsqueeze(origin_shape[:, 0], axis=1)
         origin_w = paddle.unsqueeze(origin_shape[:, 1], axis=1)
@@ -439,7 +444,8 @@ class MultiClassNMS(object):
                  normalized=True,
                  nms_eta=1.0,
                  return_index=False,
-                 return_rois_num=True):
+                 return_rois_num=True,
+                 trt=False):
         super(MultiClassNMS, self).__init__()
         self.score_threshold = score_threshold
         self.nms_top_k = nms_top_k
@@ -449,6 +455,7 @@ class MultiClassNMS(object):
         self.nms_eta = nms_eta
         self.return_index = return_index
         self.return_rois_num = return_rois_num
+        self.trt = trt
 
     def __call__(self, bboxes, score, background_label=-1):
         """
@@ -470,7 +477,19 @@ class MultiClassNMS(object):
             kwargs.update({'rois_num': bbox_num})
         if background_label > -1:
             kwargs.update({'background_label': background_label})
-        return ops.multiclass_nms(bboxes, score, **kwargs)
+        kwargs.pop('trt')
+        # TODO(wangxinxin08): paddle version should be develop or 2.3 and above to run nms on tensorrt
+        if self.trt and (int(paddle.version.major) == 0 or
+                         (int(paddle.version.major) >= 2 and
+                          int(paddle.version.minor) >= 3)):
+            # TODO(wangxinxin08): tricky switch to run nms on tensorrt
+            kwargs.update({'nms_eta': 1.1})
+            bbox, bbox_num, _ = ops.multiclass_nms(bboxes, score, **kwargs)
+            mask = paddle.slice(bbox, [-1], [0], [1]) != -1
+            bbox = paddle.masked_select(bbox, mask).reshape((-1, 6))
+            return bbox, bbox_num, None
+        else:
+            return ops.multiclass_nms(bboxes, score, **kwargs)
 
 
 @register
@@ -539,10 +558,15 @@ class YOLOBox(object):
         origin_shape = im_shape / scale_factor
         origin_shape = paddle.cast(origin_shape, 'int32')
         for i, head_out in enumerate(yolo_head_out):
-            boxes, scores = ops.yolo_box(head_out, origin_shape, anchors[i],
-                                         self.num_classes, self.conf_thresh,
-                                         self.downsample_ratio // 2**i,
-                                         self.clip_bbox, self.scale_x_y)
+            boxes, scores = paddle.vision.ops.yolo_box(
+                head_out,
+                origin_shape,
+                anchors[i],
+                self.num_classes,
+                self.conf_thresh,
+                self.downsample_ratio // 2**i,
+                self.clip_bbox,
+                scale_x_y=self.scale_x_y)
             boxes_list.append(boxes)
             scores_list.append(paddle.transpose(scores, perm=[0, 2, 1]))
         yolo_boxes = paddle.concat(boxes_list, axis=1)
@@ -553,9 +577,14 @@ class YOLOBox(object):
 @register
 @serializable
 class SSDBox(object):
-    def __init__(self, is_normalized=True):
+    def __init__(self,
+                 is_normalized=True,
+                 prior_box_var=[0.1, 0.1, 0.2, 0.2],
+                 use_fuse_decode=False):
         self.is_normalized = is_normalized
         self.norm_delta = float(not self.is_normalized)
+        self.prior_box_var = prior_box_var
+        self.use_fuse_decode = use_fuse_decode
 
     def __call__(self,
                  preds,
@@ -564,128 +593,42 @@ class SSDBox(object):
                  scale_factor,
                  var_weight=None):
         boxes, scores = preds
-        outputs = []
-        for box, score, prior_box in zip(boxes, scores, prior_boxes):
-            pb_w = prior_box[:, 2] - prior_box[:, 0] + self.norm_delta
-            pb_h = prior_box[:, 3] - prior_box[:, 1] + self.norm_delta
-            pb_x = prior_box[:, 0] + pb_w * 0.5
-            pb_y = prior_box[:, 1] + pb_h * 0.5
-            out_x = pb_x + box[:, :, 0] * pb_w * 0.1
-            out_y = pb_y + box[:, :, 1] * pb_h * 0.1
-            out_w = paddle.exp(box[:, :, 2] * 0.2) * pb_w
-            out_h = paddle.exp(box[:, :, 3] * 0.2) * pb_h
-
-            if self.is_normalized:
-                h = paddle.unsqueeze(
-                    im_shape[:, 0] / scale_factor[:, 0], axis=-1)
-                w = paddle.unsqueeze(
-                    im_shape[:, 1] / scale_factor[:, 1], axis=-1)
-                output = paddle.stack(
-                    [(out_x - out_w / 2.) * w, (out_y - out_h / 2.) * h,
-                     (out_x + out_w / 2.) * w, (out_y + out_h / 2.) * h],
-                    axis=-1)
-            else:
-                output = paddle.stack(
-                    [
-                        out_x - out_w / 2., out_y - out_h / 2.,
-                        out_x + out_w / 2. - 1., out_y + out_h / 2. - 1.
-                    ],
-                    axis=-1)
-            outputs.append(output)
-        boxes = paddle.concat(outputs, axis=1)
-
-        scores = F.softmax(paddle.concat(scores, axis=1))
-        scores = paddle.transpose(scores, [0, 2, 1])
-
-        return boxes, scores
-
-
-@register
-@serializable
-class AnchorGrid(object):
-    """Generate anchor grid
-
-    Args:
-        image_size (int or list): input image size, may be a single integer or
-            list of [h, w]. Default: 512
-        min_level (int): min level of the feature pyramid. Default: 3
-        max_level (int): max level of the feature pyramid. Default: 7
-        anchor_base_scale: base anchor scale. Default: 4
-        num_scales: number of anchor scales. Default: 3
-        aspect_ratios: aspect ratios. default: [[1, 1], [1.4, 0.7], [0.7, 1.4]]
-    """
-
-    def __init__(self,
-                 image_size=512,
-                 min_level=3,
-                 max_level=7,
-                 anchor_base_scale=4,
-                 num_scales=3,
-                 aspect_ratios=[[1, 1], [1.4, 0.7], [0.7, 1.4]]):
-        super(AnchorGrid, self).__init__()
-        if isinstance(image_size, Integral):
-            self.image_size = [image_size, image_size]
+        boxes = paddle.concat(boxes, axis=1)
+        prior_boxes = paddle.concat(prior_boxes)
+        if self.use_fuse_decode:
+            output_boxes = ops.box_coder(
+                prior_boxes,
+                self.prior_box_var,
+                boxes,
+                code_type="decode_center_size",
+                box_normalized=self.is_normalized)
         else:
-            self.image_size = image_size
-        for dim in self.image_size:
-            assert dim % 2 ** max_level == 0, \
-                "image size should be multiple of the max level stride"
-        self.min_level = min_level
-        self.max_level = max_level
-        self.anchor_base_scale = anchor_base_scale
-        self.num_scales = num_scales
-        self.aspect_ratios = aspect_ratios
+            pb_w = prior_boxes[:, 2] - prior_boxes[:, 0] + self.norm_delta
+            pb_h = prior_boxes[:, 3] - prior_boxes[:, 1] + self.norm_delta
+            pb_x = prior_boxes[:, 0] + pb_w * 0.5
+            pb_y = prior_boxes[:, 1] + pb_h * 0.5
+            out_x = pb_x + boxes[:, :, 0] * pb_w * self.prior_box_var[0]
+            out_y = pb_y + boxes[:, :, 1] * pb_h * self.prior_box_var[1]
+            out_w = paddle.exp(boxes[:, :, 2] * self.prior_box_var[2]) * pb_w
+            out_h = paddle.exp(boxes[:, :, 3] * self.prior_box_var[3]) * pb_h
+            output_boxes = paddle.stack(
+                [
+                    out_x - out_w / 2., out_y - out_h / 2., out_x + out_w / 2.,
+                    out_y + out_h / 2.
+                ],
+                axis=-1)
+
+        if self.is_normalized:
+            h = (im_shape[:, 0] / scale_factor[:, 0]).unsqueeze(-1)
+            w = (im_shape[:, 1] / scale_factor[:, 1]).unsqueeze(-1)
+            im_shape = paddle.stack([w, h, w, h], axis=-1)
+            output_boxes *= im_shape
+        else:
+            output_boxes[..., -2:] -= 1.0
+        output_scores = F.softmax(paddle.concat(
+            scores, axis=1)).transpose([0, 2, 1])
 
-    @property
-    def base_cell(self):
-        if not hasattr(self, '_base_cell'):
-            self._base_cell = self.make_cell()
-        return self._base_cell
-
-    def make_cell(self):
-        scales = [2**(i / self.num_scales) for i in range(self.num_scales)]
-        scales = np.array(scales)
-        ratios = np.array(self.aspect_ratios)
-        ws = np.outer(scales, ratios[:, 0]).reshape(-1, 1)
-        hs = np.outer(scales, ratios[:, 1]).reshape(-1, 1)
-        anchors = np.hstack((-0.5 * ws, -0.5 * hs, 0.5 * ws, 0.5 * hs))
-        return anchors
-
-    def make_grid(self, stride):
-        cell = self.base_cell * stride * self.anchor_base_scale
-        x_steps = np.arange(stride // 2, self.image_size[1], stride)
-        y_steps = np.arange(stride // 2, self.image_size[0], stride)
-        offset_x, offset_y = np.meshgrid(x_steps, y_steps)
-        offset_x = offset_x.flatten()
-        offset_y = offset_y.flatten()
-        offsets = np.stack((offset_x, offset_y, offset_x, offset_y), axis=-1)
-        offsets = offsets[:, np.newaxis, :]
-        return (cell + offsets).reshape(-1, 4)
-
-    def generate(self):
-        return [
-            self.make_grid(2**l)
-            for l in range(self.min_level, self.max_level + 1)
-        ]
-
-    def __call__(self):
-        if not hasattr(self, '_anchor_vars'):
-            anchor_vars = []
-            helper = LayerHelper('anchor_grid')
-            for idx, l in enumerate(range(self.min_level, self.max_level + 1)):
-                stride = 2**l
-                anchors = self.make_grid(stride)
-                var = helper.create_parameter(
-                    attr=ParamAttr(name='anchors_{}'.format(idx)),
-                    shape=anchors.shape,
-                    dtype='float32',
-                    stop_gradient=True,
-                    default_initializer=NumpyArrayInitializer(anchors))
-                anchor_vars.append(var)
-                var.persistable = True
-            self._anchor_vars = anchor_vars
-
-        return self._anchor_vars
+        return output_boxes, output_scores
 
 
 @register
@@ -1418,7 +1361,7 @@ class ConvMixer(nn.Layer):
         Seq, ActBn = nn.Sequential, lambda x: Seq(x, nn.GELU(), nn.BatchNorm2D(dim))
         Residual = type('Residual', (Seq, ),
                         {'forward': lambda self, x: self[0](x) + x})
-        return Seq(*[
+        return Seq(* [
             Seq(Residual(
                 ActBn(
                     nn.Conv2D(

+ 4 - 2
paddlex/ppdet/modeling/losses/__init__.py

@@ -25,7 +25,8 @@ from . import fairmot_loss
 from . import gfocal_loss
 from . import detr_loss
 from . import sparsercnn_loss
-from . import varifocal_loss
+from . import focal_loss
+from . import smooth_l1_loss
 
 from .yolo_loss import *
 from .iou_aware_loss import *
@@ -40,4 +41,5 @@ from .fairmot_loss import *
 from .gfocal_loss import *
 from .detr_loss import *
 from .sparsercnn_loss import *
-from .varifocal_loss import *
+from .focal_loss import *
+from .smooth_l1_loss import *

+ 1 - 1
paddlex/ppdet/modeling/losses/detr_loss.py

@@ -80,7 +80,7 @@ class DETRLoss(nn.Layer):
             target_label = target_label.reshape([bs, num_query_objects])
         if self.use_focal_loss:
             target_label = F.one_hot(target_label,
-                                     self.num_classes + 1)[:, :, :-1]
+                                     self.num_classes + 1)[..., :-1]
         return {
             'loss_class': self.loss_coeff['class'] * sigmoid_focal_loss(
                 logits, target_label, num_gts / num_query_objects)

+ 66 - 0
paddlex/ppdet/modeling/losses/focal_loss.py

@@ -0,0 +1,66 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import paddle
+import paddle.nn.functional as F
+import paddle.nn as nn
+from paddlex.ppdet.core.workspace import register
+
+__all__ = ['FocalLoss']
+
+
+@register
+class FocalLoss(nn.Layer):
+    """A wrapper around paddle.nn.functional.sigmoid_focal_loss.
+    Args:
+        use_sigmoid (bool): currently only support use_sigmoid=True
+        alpha (float): parameter alpha in Focal Loss
+        gamma (float): parameter gamma in Focal Loss
+        loss_weight (float): final loss will be multiplied by this
+    """
+
+    def __init__(self,
+                 use_sigmoid=True,
+                 alpha=0.25,
+                 gamma=2.0,
+                 loss_weight=1.0):
+        super(FocalLoss, self).__init__()
+        assert use_sigmoid == True, \
+            'Focal Loss only supports sigmoid at the moment'
+        self.use_sigmoid = use_sigmoid
+        self.alpha = alpha
+        self.gamma = gamma
+        self.loss_weight = loss_weight
+
+    def forward(self, pred, target, reduction='none'):
+        """forward function.
+        Args:
+            pred (Tensor): logits of class prediction, of shape (N, num_classes)
+            target (Tensor): target class label, of shape (N, )
+            reduction (str): the way to reduce loss, one of (none, sum, mean)
+        """
+        num_classes = pred.shape[1]
+        target = F.one_hot(target, num_classes + 1).cast(pred.dtype)
+        target = target[:, :-1].detach()
+        loss = F.sigmoid_focal_loss(
+            pred,
+            target,
+            alpha=self.alpha,
+            gamma=self.gamma,
+            reduction=reduction)
+        return loss * self.loss_weight

+ 61 - 0
paddlex/ppdet/modeling/losses/smooth_l1_loss.py

@@ -0,0 +1,61 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import paddle
+import paddle.nn as nn
+import paddle.nn.functional as F
+from paddlex.ppdet.core.workspace import register
+
+__all__ = ['SmoothL1Loss']
+
+
+@register
+class SmoothL1Loss(nn.Layer):
+    """Smooth L1 Loss.
+    Args:
+        beta (float): controls smooth region, it becomes L1 Loss when beta=0.0
+        loss_weight (float): the final loss will be multiplied by this
+    """
+
+    def __init__(self, beta=1.0, loss_weight=1.0):
+        super(SmoothL1Loss, self).__init__()
+        assert beta >= 0
+        self.beta = beta
+        self.loss_weight = loss_weight
+
+    def forward(self, pred, target, reduction='none'):
+        """forward function, based on fvcore.
+        Args:
+            pred (Tensor): prediction tensor
+            target (Tensor): target tensor, pred.shape must be the same as target.shape
+            reduction (str): the way to reduce loss, one of (none, sum, mean)
+        """
+        assert reduction in ('none', 'sum', 'mean')
+        target = target.detach()
+        if self.beta < 1e-5:
+            loss = paddle.abs(pred - target)
+        else:
+            n = paddle.abs(pred - target)
+            cond = n < self.beta
+            loss = paddle.where(cond, 0.5 * n**2 / self.beta,
+                                n - 0.5 * self.beta)
+        if reduction == 'mean':
+            loss = loss.mean() if loss.size > 0 else 0.0 * loss.sum()
+        elif reduction == 'sum':
+            loss = loss.sum()
+        return loss * self.loss_weight

+ 1 - 1
paddlex/ppdet/modeling/losses/sparsercnn_loss.py

@@ -198,7 +198,7 @@ class SparseRCNNLoss(nn.Layer):
         # Retrieve the matching between the outputs of the last layer and the targets
         indices = self.matcher(outputs_without_aux, targets)
 
-        # Compute the average number of target boxes accross all nodes, for normalization purposes
+        # Compute the average number of target boxes across all nodes, for normalization purposes
         num_boxes = sum(len(t["labels"]) for t in targets)
         num_boxes = paddle.to_tensor(
             [num_boxes],

+ 1 - 2
paddlex/ppdet/modeling/losses/ssd_loss.py

@@ -20,8 +20,7 @@ import paddle
 import paddle.nn as nn
 import paddle.nn.functional as F
 from paddlex.ppdet.core.workspace import register
-from ..ops import iou_similarity
-from ..bbox_utils import bbox2delta
+from ..bbox_utils import iou_similarity, bbox2delta
 
 __all__ = ['SSDLoss']
 

+ 2 - 2
paddlex/ppdet/modeling/losses/yolo_loss.py

@@ -21,7 +21,7 @@ import paddle.nn as nn
 import paddle.nn.functional as F
 from paddlex.ppdet.core.workspace import register
 
-from ..bbox_utils import decode_yolo, xywh2xyxy, iou_similarity
+from ..bbox_utils import decode_yolo, xywh2xyxy, batch_iou_similarity
 
 __all__ = ['YOLOv3Loss']
 
@@ -80,7 +80,7 @@ class YOLOv3Loss(nn.Layer):
         gwh = gbox[:, :, 0:2] + gbox[:, :, 2:4] * 0.5
         gbox = paddle.concat([gxy, gwh], axis=-1)
 
-        iou = iou_similarity(pbox, gbox)
+        iou = batch_iou_similarity(pbox, gbox)
         iou.stop_gradient = True
         iou_max = iou.max(2)  # [N, M1]
         iou_mask = paddle.cast(iou_max <= self.ignore_thresh, dtype=pbox.dtype)

+ 23 - 17
paddlex/ppdet/modeling/mot/matching/jde_matching.py

@@ -26,7 +26,7 @@ warnings.filterwarnings("ignore")
 __all__ = [
     'merge_matches',
     'linear_assignment',
-    'cython_bbox_ious',
+    'bbox_ious',
     'iou_distance',
     'embedding_distance',
     'fuse_motion',
@@ -68,22 +68,28 @@ def linear_assignment(cost_matrix, thresh):
     return matches, unmatched_a, unmatched_b
 
 
-def cython_bbox_ious(atlbrs, btlbrs):
-    ious = np.zeros((len(atlbrs), len(btlbrs)), dtype=np.float)
-    if ious.size == 0:
+def bbox_ious(atlbrs, btlbrs):
+    boxes = np.ascontiguousarray(atlbrs, dtype=np.float)
+    query_boxes = np.ascontiguousarray(btlbrs, dtype=np.float)
+    N = boxes.shape[0]
+    K = query_boxes.shape[0]
+    ious = np.zeros((N, K), dtype=boxes.dtype)
+    if N * K == 0:
         return ious
-    try:
-        import cython_bbox
-    except Exception as e:
-        print('cython_bbox not found, please install cython_bbox.'
-              'for example: `pip install cython_bbox`.')
-        raise e
-
-    ious = cython_bbox.bbox_overlaps(
-        np.ascontiguousarray(
-            atlbrs, dtype=np.float),
-        np.ascontiguousarray(
-            btlbrs, dtype=np.float))
+
+    for k in range(K):
+        box_area = ((query_boxes[k, 2] - query_boxes[k, 0] + 1) *
+                    (query_boxes[k, 3] - query_boxes[k, 1] + 1))
+        for n in range(N):
+            iw = (min(boxes[n, 2], query_boxes[k, 2]) - max(
+                boxes[n, 0], query_boxes[k, 0]) + 1)
+            if iw > 0:
+                ih = (min(boxes[n, 3], query_boxes[k, 3]) - max(
+                    boxes[n, 1], query_boxes[k, 1]) + 1)
+                if ih > 0:
+                    ua = float((boxes[n, 2] - boxes[n, 0] + 1) * (boxes[
+                        n, 3] - boxes[n, 1] + 1) + box_area - iw * ih)
+                    ious[n, k] = iw * ih / ua
     return ious
 
 
@@ -98,7 +104,7 @@ def iou_distance(atracks, btracks):
     else:
         atlbrs = [track.tlbr for track in atracks]
         btlbrs = [track.tlbr for track in btracks]
-    _ious = cython_bbox_ious(atlbrs, btlbrs)
+    _ious = bbox_ious(atlbrs, btlbrs)
     cost_matrix = 1 - _ious
 
     return cost_matrix

+ 1 - 1
paddlex/ppdet/modeling/mot/tracker/base_jde_tracker.py

@@ -51,7 +51,7 @@ class BaseTrack(object):
 
     history = OrderedDict()
     features = []
-    curr_feature = None
+    curr_feat = None
     score = 0
     start_frame = 0
     frame_id = 0

+ 9 - 7
paddlex/ppdet/modeling/mot/tracker/deepsort_tracker.py

@@ -96,18 +96,20 @@ class DeepSORTTracker(object):
         Perform measurement update and track management.
         Args:
             pred_dets (np.array): Detection results of the image, the shape is
-                [N, 6], means 'x0, y0, x1, y1, score, cls_id'.
+                [N, 6], means 'cls_id, score, x0, y0, x1, y1'.
             pred_embs (np.array): Embedding results of the image, the shape is
                 [N, 128], usually pred_embs.shape[1] is a multiple of 128.
         """
-        pred_tlwhs = pred_dets[:, :4]
-        pred_scores = pred_dets[:, 4:5]
-        pred_cls_ids = pred_dets[:, 5:]
+        pred_cls_ids = pred_dets[:, 0:1]
+        pred_scores = pred_dets[:, 1:2]
+        pred_xyxys = pred_dets[:, 2:6]
+        pred_tlwhs = np.concatenate(
+            (pred_xyxys[:, 0:2], pred_xyxys[:, 2:4] - pred_xyxys[:, 0:2] + 1),
+            axis=1)
 
         detections = [
-            Detection(tlwh, score, feat, cls_id)
-            for tlwh, score, feat, cls_id in zip(pred_tlwhs, pred_scores,
-                                                 pred_embs, pred_cls_ids)
+            Detection(tlwh, score, feat, cls_id) for tlwh, score, feat, cls_id
+            in zip(pred_tlwhs, pred_scores, pred_embs, pred_cls_ids)
         ]
 
         # Run matching cascade.

+ 50 - 28
paddlex/ppdet/modeling/mot/tracker/jde_tracker.py

@@ -38,12 +38,13 @@ class JDETracker(object):
     JDE tracker, support single class and multi classes
 
     Args:
+        use_byte (bool): Whether use ByteTracker, default False
         num_classes (int): the number of classes
         det_thresh (float): threshold of detection score
         track_buffer (int): buffer for tracker
         min_box_area (int): min box area to filter out low quality boxes
         vertical_ratio (float): w/h, the vertical ratio of the bbox to filter
-            bad results. If set <0 means no need to filter bboxes,usually set
+            bad results. If set <= 0 means no need to filter bboxes,usually set
             1.6 for pedestrian tracking.
         tracked_thresh (float): linear assignment threshold of tracked
             stracks and detections
@@ -51,8 +52,15 @@ class JDETracker(object):
             tracked stracks and unmatched detections
         unconfirmed_thresh (float): linear assignment threshold of
             unconfirmed stracks and unmatched detections
+        conf_thres (float): confidence threshold for tracking, also used in
+            ByteTracker as higher confidence threshold
+        match_thres (float): linear assignment threshold of tracked
+            stracks and detections in ByteTracker
+        low_conf_thres (float): lower confidence threshold for tracking in
+            ByteTracker
+        input_size (list): input feature map size to reid model, [h, w] format,
+            [64, 192] as default.
         motion (str): motion model, KalmanFilter as default
-        conf_thres (float): confidence threshold for tracking
         metric_type (str): either "euclidean" or "cosine", the distance metric
             used for measurement to track association.
     """
@@ -62,14 +70,15 @@ class JDETracker(object):
                  num_classes=1,
                  det_thresh=0.3,
                  track_buffer=30,
-                 min_box_area=200,
-                 vertical_ratio=1.6,
+                 min_box_area=0,
+                 vertical_ratio=0,
                  tracked_thresh=0.7,
                  r_tracked_thresh=0.5,
                  unconfirmed_thresh=0.7,
                  conf_thres=0,
                  match_thres=0.8,
                  low_conf_thres=0.2,
+                 input_size=[64, 192],
                  motion='KalmanFilter',
                  metric_type='euclidean'):
         self.use_byte = use_byte
@@ -86,6 +95,7 @@ class JDETracker(object):
         self.match_thres = match_thres
         self.low_conf_thres = low_conf_thres
 
+        self.input_size = input_size
         if motion == 'KalmanFilter':
             self.motion = KalmanFilter()
         self.metric_type = metric_type
@@ -106,13 +116,13 @@ class JDETracker(object):
 
         Args:
             pred_dets (np.array): Detection results of the image, the shape is
-                [N, 6], means 'x0, y0, x1, y1, score, cls_id'.
+                [N, 6], means 'cls_id, score, x0, y0, x1, y1'.
             pred_embs (np.array): Embedding results of the image, the shape is
                 [N, 128] or [N, 512].
 
         Return:
             output_stracks_dict (dict(list)): The list contains information
-                regarding the online_tracklets for the recieved image tensor.
+                regarding the online_tracklets for the received image tensor.
         """
         self.frame_id += 1
         if self.frame_id == 1:
@@ -128,7 +138,7 @@ class JDETracker(object):
 
         # unify single and multi classes detection and embedding results
         for cls_id in range(self.num_classes):
-            cls_idx = (pred_dets[:, 5:] == cls_id).squeeze(-1)
+            cls_idx = (pred_dets[:, 0:1] == cls_id).squeeze(-1)
             pred_dets_dict[cls_id] = pred_dets[cls_idx]
             if pred_embs is not None:
                 pred_embs_dict[cls_id] = pred_embs[cls_idx]
@@ -139,14 +149,15 @@ class JDETracker(object):
             """ Step 1: Get detections by class"""
             pred_dets_cls = pred_dets_dict[cls_id]
             pred_embs_cls = pred_embs_dict[cls_id]
-            remain_inds = (pred_dets_cls[:, 4:5] > self.conf_thres).squeeze(-1)
+            remain_inds = (pred_dets_cls[:, 1:2] > self.conf_thres).squeeze(-1)
             if remain_inds.sum() > 0:
                 pred_dets_cls = pred_dets_cls[remain_inds]
-                if self.use_byte:
+                if pred_embs_cls is None:
+                    # in original ByteTrack
                     detections = [
                         STrack(
-                            STrack.tlbr_to_tlwh(tlbrs[:4]),
-                            tlbrs[4],
+                            STrack.tlbr_to_tlwh(tlbrs[2:6]),
+                            tlbrs[1],
                             cls_id,
                             30,
                             temp_feat=None) for tlbrs in pred_dets_cls
@@ -155,10 +166,9 @@ class JDETracker(object):
                     pred_embs_cls = pred_embs_cls[remain_inds]
                     detections = [
                         STrack(
-                            STrack.tlbr_to_tlwh(tlbrs[:4]), tlbrs[4], cls_id,
-                            30, temp_feat)
-                        for (tlbrs, temp_feat
-                             ) in zip(pred_dets_cls, pred_embs_cls)
+                            STrack.tlbr_to_tlwh(tlbrs[2:6]), tlbrs[1], cls_id,
+                            30, temp_feat) for (tlbrs, temp_feat) in
+                        zip(pred_dets_cls, pred_embs_cls)
                     ]
             else:
                 detections = []
@@ -181,11 +191,12 @@ class JDETracker(object):
             # Predict the current location with KalmanFilter
             STrack.multi_predict(track_pool_dict[cls_id], self.motion)
 
-            if self.use_byte:
+            if pred_embs_cls is None:
+                # in original ByteTrack
                 dists = matching.iou_distance(track_pool_dict[cls_id],
                                               detections)
                 matches, u_track, u_detection = matching.linear_assignment(
-                    dists, thresh=self.match_thres)  #
+                    dists, thresh=self.match_thres)  # not self.tracked_thresh
             else:
                 dists = matching.embedding_distance(
                     track_pool_dict[cls_id],
@@ -213,22 +224,33 @@ class JDETracker(object):
             # None of the steps below happen if there are no undetected tracks.
             """ Step 3: Second association, with IOU"""
             if self.use_byte:
-                inds_low = pred_dets_dict[cls_id][:, 4:5] > self.low_conf_thres
-                inds_high = pred_dets_dict[cls_id][:, 4:5] < self.conf_thres
+                inds_low = pred_dets_dict[cls_id][:, 1:2] > self.low_conf_thres
+                inds_high = pred_dets_dict[cls_id][:, 1:2] < self.conf_thres
                 inds_second = np.logical_and(inds_low, inds_high).squeeze(-1)
                 pred_dets_cls_second = pred_dets_dict[cls_id][inds_second]
 
                 # association the untrack to the low score detections
                 if len(pred_dets_cls_second) > 0:
-                    detections_second = [
-                        STrack(
-                            STrack.tlbr_to_tlwh(tlbrs[:4]),
-                            tlbrs[4],
-                            cls_id,
-                            30,
-                            temp_feat=None)
-                        for tlbrs in pred_dets_cls_second[:, :5]
-                    ]
+                    if pred_embs_dict[cls_id] is None:
+                        # in original ByteTrack
+                        detections_second = [
+                            STrack(
+                                STrack.tlbr_to_tlwh(tlbrs[2:6]),
+                                tlbrs[1],
+                                cls_id,
+                                30,
+                                temp_feat=None)
+                            for tlbrs in pred_dets_cls_second
+                        ]
+                    else:
+                        pred_embs_cls_second = pred_embs_dict[cls_id][
+                            inds_second]
+                        detections_second = [
+                            STrack(
+                                STrack.tlbr_to_tlwh(tlbrs[2:6]), tlbrs[1],
+                                cls_id, 30, temp_feat) for (tlbrs, temp_feat)
+                            in zip(pred_dets_cls_second, pred_embs_cls_second)
+                        ]
                 else:
                     detections_second = []
                 r_tracked_stracks = [

+ 6 - 0
paddlex/ppdet/modeling/necks/__init__.py

@@ -19,6 +19,9 @@ from . import ttf_fpn
 from . import centernet_fpn
 from . import bifpn
 from . import csp_pan
+from . import es_pan
+from . import lc_pan
+from . import custom_pan
 
 from .fpn import *
 from .yolo_fpn import *
@@ -28,3 +31,6 @@ from .centernet_fpn import *
 from .blazeface_fpn import *
 from .bifpn import *
 from .csp_pan import *
+from .es_pan import *
+from .lc_pan import *
+from .custom_pan import *

+ 18 - 19
paddlex/ppdet/modeling/necks/csp_pan.py

@@ -19,7 +19,6 @@ import paddle
 import paddle.nn as nn
 import paddle.nn.functional as F
 from paddle import ParamAttr
-from paddle.regularizer import L2Decay
 from paddlex.ppdet.core.workspace import register, serializable
 from ..shape_spec import ShapeSpec
 
@@ -36,8 +35,6 @@ class ConvBNLayer(nn.Layer):
                  act='leaky_relu'):
         super(ConvBNLayer, self).__init__()
         initializer = nn.initializer.KaimingUniform()
-        self.act = act
-        assert self.act in ['leaky_relu', "hard_swish"]
         self.conv = nn.Conv2D(
             in_channels=in_channel,
             out_channels=out_channel,
@@ -48,13 +45,14 @@ class ConvBNLayer(nn.Layer):
             weight_attr=ParamAttr(initializer=initializer),
             bias_attr=False)
         self.bn = nn.BatchNorm2D(out_channel)
+        if act == "hard_swish":
+            act = 'hardswish'
+        self.act = act
 
     def forward(self, x):
         x = self.bn(self.conv(x))
-        if self.act == "leaky_relu":
-            x = F.leaky_relu(x)
-        elif self.act == "hard_swish":
-            x = F.hardswish(x)
+        if self.act:
+            x = getattr(F, self.act)(x)
         return x
 
 
@@ -75,10 +73,11 @@ class DPModule(nn.Layer):
                  out_channel=96,
                  kernel_size=3,
                  stride=1,
-                 act='leaky_relu'):
+                 act='leaky_relu',
+                 use_act_in_out=True):
         super(DPModule, self).__init__()
         initializer = nn.initializer.KaimingUniform()
-        self.act = act
+        self.use_act_in_out = use_act_in_out
         self.dwconv = nn.Conv2D(
             in_channels=in_channel,
             out_channels=out_channel,
@@ -98,17 +97,17 @@ class DPModule(nn.Layer):
             weight_attr=ParamAttr(initializer=initializer),
             bias_attr=False)
         self.bn2 = nn.BatchNorm2D(out_channel)
-
-    def act_func(self, x):
-        if self.act == "leaky_relu":
-            x = F.leaky_relu(x)
-        elif self.act == "hard_swish":
-            x = F.hardswish(x)
-        return x
+        if act == "hard_swish":
+            act = 'hardswish'
+        self.act = act
 
     def forward(self, x):
-        x = self.act_func(self.bn1(self.dwconv(x)))
-        x = self.act_func(self.bn2(self.pwconv(x)))
+        x = self.bn1(self.dwconv(x))
+        if self.act:
+            x = getattr(F, self.act)(x)
+        x = self.bn2(self.pwconv(x))
+        if self.use_act_in_out and self.act:
+            x = getattr(F, self.act)(x)
         return x
 
 
@@ -197,7 +196,7 @@ class CSPLayer(nn.Layer):
         self.final_conv = ConvBNLayer(
             2 * mid_channels, out_channels, 1, act=act)
 
-        self.blocks = nn.Sequential(*[
+        self.blocks = nn.Sequential(* [
             DarknetBottleneck(
                 mid_channels,
                 mid_channels,

Some files were not shown because too many files changed in this diff