Browse Source

Merge pull request #1067 from will-jl944/develop_jf

rcnn models support prediction w/ bs greater than 1
FlyingQianMM 4 years ago
parent
commit
6dba2ab54e
3 changed files with 20 additions and 71 deletions
  1. 6 32
      paddlex/cv/models/detector.py
  2. 2 12
      paddlex/cv/transforms/batch_operators.py
  3. 12 27
      paddlex/deploy.py

+ 6 - 32
paddlex/cv/models/detector.py

@@ -511,13 +511,8 @@ class BaseDetector(BaseModel):
         batch_transforms = self._compose_batch_transform(transforms, 'test')
         batch_transforms = self._compose_batch_transform(transforms, 'test')
         batch_samples = batch_transforms(batch_samples)
         batch_samples = batch_transforms(batch_samples)
         if to_tensor:
         if to_tensor:
-            if isinstance(batch_samples, dict):
-                for k in batch_samples:
-                    batch_samples[k] = paddle.to_tensor(batch_samples[k])
-            else:
-                for sample in batch_samples:
-                    for k in sample:
-                        sample[k] = paddle.to_tensor(sample[k])
+            for k in batch_samples:
+                batch_samples[k] = paddle.to_tensor(batch_samples[k])
 
 
         return batch_samples
         return batch_samples
 
 
@@ -987,18 +982,6 @@ class FasterRCNN(BaseDetector):
         super(FasterRCNN, self).__init__(
         super(FasterRCNN, self).__init__(
             model_name='FasterRCNN', num_classes=num_classes, **params)
             model_name='FasterRCNN', num_classes=num_classes, **params)
 
 
-    def run(self, net, inputs, mode):
-        if mode in ['train', 'eval']:
-            outputs = net(inputs)
-        else:
-            outputs = []
-            for sample in inputs:
-                net_out = net(sample)
-                for key in net_out:
-                    net_out[key] = net_out[key].numpy()
-                outputs.append(net_out)
-        return outputs
-
     def _compose_batch_transform(self, transforms, mode='train'):
     def _compose_batch_transform(self, transforms, mode='train'):
         if mode == 'train':
         if mode == 'train':
             default_batch_transforms = [
             default_batch_transforms = [
@@ -1022,8 +1005,7 @@ class FasterRCNN(BaseDetector):
 
 
         batch_transforms = BatchCompose(
         batch_transforms = BatchCompose(
             custom_batch_transforms + default_batch_transforms,
             custom_batch_transforms + default_batch_transforms,
-            collate_batch=collate_batch,
-            return_list=mode == 'test')
+            collate_batch=collate_batch)
 
 
         return batch_transforms
         return batch_transforms
 
 
@@ -1069,13 +1051,6 @@ class FasterRCNN(BaseDetector):
         self.fixed_input_shape = image_shape
         self.fixed_input_shape = image_shape
         return self._define_input_spec(image_shape)
         return self._define_input_spec(image_shape)
 
 
-    def _postprocess(self, batch_pred):
-        prediction = [
-            super(FasterRCNN, self)._postprocess(pred)[0]
-            for pred in batch_pred
-        ]
-        return prediction
-
 
 
 class PPYOLO(YOLOv3):
 class PPYOLO(YOLOv3):
     def __init__(self,
     def __init__(self,
@@ -1555,7 +1530,7 @@ class PPYOLOv2(YOLOv3):
         return self._define_input_spec(image_shape)
         return self._define_input_spec(image_shape)
 
 
 
 
-class MaskRCNN(FasterRCNN):
+class MaskRCNN(BaseDetector):
     def __init__(self,
     def __init__(self,
                  num_classes=80,
                  num_classes=80,
                  backbone='ResNet50_vd',
                  backbone='ResNet50_vd',
@@ -1790,7 +1765,7 @@ class MaskRCNN(FasterRCNN):
                 'mask_post_process': mask_post_process
                 'mask_post_process': mask_post_process
             })
             })
         self.with_fpn = with_fpn
         self.with_fpn = with_fpn
-        super(FasterRCNN, self).__init__(
+        super(MaskRCNN, self).__init__(
             model_name='MaskRCNN', num_classes=num_classes, **params)
             model_name='MaskRCNN', num_classes=num_classes, **params)
 
 
     def _compose_batch_transform(self, transforms, mode='train'):
     def _compose_batch_transform(self, transforms, mode='train'):
@@ -1816,8 +1791,7 @@ class MaskRCNN(FasterRCNN):
 
 
         batch_transforms = BatchCompose(
         batch_transforms = BatchCompose(
             custom_batch_transforms + default_batch_transforms,
             custom_batch_transforms + default_batch_transforms,
-            collate_batch=collate_batch,
-            return_list=mode == 'test')
+            collate_batch=collate_batch)
 
 
         return batch_transforms
         return batch_transforms
 
 

+ 2 - 12
paddlex/cv/transforms/batch_operators.py

@@ -26,14 +26,10 @@ from paddlex.utils import logging
 
 
 
 
 class BatchCompose(Transform):
 class BatchCompose(Transform):
-    def __init__(self,
-                 batch_transforms=None,
-                 collate_batch=True,
-                 return_list=False):
+    def __init__(self, batch_transforms=None, collate_batch=True):
         super(BatchCompose, self).__init__()
         super(BatchCompose, self).__init__()
         self.batch_transforms = batch_transforms
         self.batch_transforms = batch_transforms
         self.collate_batch = collate_batch
         self.collate_batch = collate_batch
-        self.return_list = return_list
 
 
     def __call__(self, samples):
     def __call__(self, samples):
         if self.batch_transforms is not None:
         if self.batch_transforms is not None:
@@ -55,13 +51,7 @@ class BatchCompose(Transform):
                 if k in sample:
                 if k in sample:
                     sample.pop(k)
                     sample.pop(k)
 
 
-        if self.return_list:
-            batch_data = [{
-                k: np.expand_dims(
-                    sample[k], axis=0)
-                for k in sample
-            } for sample in samples]
-        elif self.collate_batch:
+        if self.collate_batch:
             batch_data = default_collate_fn(samples)
             batch_data = default_collate_fn(samples)
         else:
         else:
             batch_data = {}
             batch_data = {}

+ 12 - 27
paddlex/deploy.py

@@ -166,16 +166,10 @@ class Predictor(object):
                     'score_map': s
                     'score_map': s
                 } for l, s in zip(label_map, score_map)]
                 } for l, s in zip(label_map, score_map)]
         elif self._model.model_type == 'detector':
         elif self._model.model_type == 'detector':
-            if 'RCNN' in self._model.__class__.__name__:
-                net_outputs = [{
-                    k: v
-                    for k, v in zip(['bbox', 'bbox_num', 'mask'], res)
-                } for res in net_outputs]
-            else:
-                net_outputs = {
-                    k: v
-                    for k, v in zip(['bbox', 'bbox_num', 'mask'], net_outputs)
-                }
+            net_outputs = {
+                k: v
+                for k, v in zip(['bbox', 'bbox_num', 'mask'], net_outputs)
+            }
             preds = self._model._postprocess(net_outputs)
             preds = self._model._postprocess(net_outputs)
             if len(preds) == 1:
             if len(preds) == 1:
                 preds = preds[0]
                 preds = preds[0]
@@ -210,25 +204,16 @@ class Predictor(object):
         preprocessed_input = self.preprocess(images, transforms)
         preprocessed_input = self.preprocess(images, transforms)
         self.timer.preprocess_time_s.end(iter_num=len(images))
         self.timer.preprocess_time_s.end(iter_num=len(images))
 
 
-        ori_shape = None
         self.timer.inference_time_s.start()
         self.timer.inference_time_s.start()
-        if 'RCNN' in self._model.__class__.__name__:
-            if len(preprocessed_input) > 1:
-                logging.warning(
-                    "{} only supports inference with batch size equal to 1."
-                    .format(self._model.__class__.__name__))
-            net_outputs = [
-                self.raw_predict(sample) for sample in preprocessed_input
-            ]
-            self.timer.inference_time_s.end(iter_num=len(images))
-        else:
-            net_outputs = self.raw_predict(preprocessed_input)
-            self.timer.inference_time_s.end(iter_num=1)
-            ori_shape = preprocessed_input.get('ori_shape', None)
+        net_outputs = self.raw_predict(preprocessed_input)
+        self.timer.inference_time_s.end(iter_num=1)
 
 
         self.timer.postprocess_time_s.start()
         self.timer.postprocess_time_s.start()
         results = self.postprocess(
         results = self.postprocess(
-            net_outputs, topk, ori_shape=ori_shape, transforms=transforms)
+            net_outputs,
+            topk,
+            ori_shape=preprocessed_input.get('ori_shape', None),
+            transforms=transforms)
         self.timer.postprocess_time_s.end(iter_num=len(images))
         self.timer.postprocess_time_s.end(iter_num=len(images))
 
 
         return results
         return results
@@ -259,11 +244,11 @@ class Predictor(object):
         else:
         else:
             images = img_file
             images = img_file
 
 
-        for step in range(warmup_iters):
+        for _ in range(warmup_iters):
             self._run(images=images, topk=topk, transforms=transforms)
             self._run(images=images, topk=topk, transforms=transforms)
         self.timer.reset()
         self.timer.reset()
 
 
-        for step in range(repeats):
+        for _ in range(repeats):
             results = self._run(
             results = self._run(
                 images=images, topk=topk, transforms=transforms)
                 images=images, topk=topk, transforms=transforms)