فهرست منبع

Refactor v2 (#2585)

* upgrade inference model

* change arch

* debugging

* simplify

* type hint

* doc string

* change dir name

* unified flag

* debug ocr pipeline for new structure

* PR review

* enhance image batch sampler
Tingquan Gao 11 ماه پیش
والد
کامیت
e98fde5b8c
40فایلهای تغییر یافته به همراه3270 افزوده شده و 55 حذف شده
  1. 6 2
      paddlex/inference/__init__.py
  2. 13 0
      paddlex/inference/common/__init__.py
  3. 16 0
      paddlex/inference/common/batch_sampler/__init__.py
  4. 90 0
      paddlex/inference/common/batch_sampler/base_batch_sampler.py
  5. 101 0
      paddlex/inference/common/batch_sampler/image_batch_sampler.py
  6. 15 0
      paddlex/inference/common/reader/__init__.py
  7. 69 0
      paddlex/inference/common/reader/image_reader.py
  8. 25 0
      paddlex/inference/common/result/__init__.py
  9. 44 0
      paddlex/inference/common/result/base_cv_result.py
  10. 44 0
      paddlex/inference/common/result/base_result.py
  11. 432 0
      paddlex/inference/common/result/mixin.py
  12. 108 0
      paddlex/inference/models_new/__init__.py
  13. 16 0
      paddlex/inference/models_new/base/__init__.py
  14. 15 0
      paddlex/inference/models_new/base/pp_infer/__init__.py
  15. 17 0
      paddlex/inference/models_new/base/pp_infer/base_infer.py
  16. 16 0
      paddlex/inference/models_new/base/predictor/__init__.py
  17. 100 0
      paddlex/inference/models_new/base/predictor/base_predictor.py
  18. 196 0
      paddlex/inference/models_new/base/predictor/basic_predictor.py
  19. 25 0
      paddlex/inference/models_new/common/__init__.py
  20. 233 0
      paddlex/inference/models_new/common/static_infer.py
  21. 23 0
      paddlex/inference/models_new/common/vision/__init__.py
  22. 69 0
      paddlex/inference/models_new/common/vision/funcs.py
  23. 235 0
      paddlex/inference/models_new/common/vision/processors.py
  24. 15 0
      paddlex/inference/models_new/image_classification/__init__.py
  25. 163 0
      paddlex/inference/models_new/image_classification/predictor.py
  26. 89 0
      paddlex/inference/models_new/image_classification/processors.py
  27. 81 0
      paddlex/inference/models_new/image_classification/result.py
  28. 15 0
      paddlex/inference/models_new/text_detection/__init__.py
  29. 135 0
      paddlex/inference/models_new/text_detection/predictor.py
  30. 415 0
      paddlex/inference/models_new/text_detection/processors.py
  31. 33 0
      paddlex/inference/models_new/text_detection/result.py
  32. 15 0
      paddlex/inference/models_new/text_recognition/__init__.py
  33. 106 0
      paddlex/inference/models_new/text_recognition/predictor.py
  34. 186 0
      paddlex/inference/models_new/text_recognition/processors.py
  35. 64 0
      paddlex/inference/models_new/text_recognition/result.py
  36. 2 12
      paddlex/inference/pipelines_new/components/common/crop_image_regions.py
  37. 12 28
      paddlex/inference/pipelines_new/ocr/pipeline.py
  38. 28 11
      paddlex/inference/utils/benchmark.py
  39. 2 0
      paddlex/utils/flags.py
  40. 1 2
      paddlex/utils/logging.py

+ 6 - 2
paddlex/inference/__init__.py

@@ -12,11 +12,15 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from .models import create_predictor
 from ..utils import flags
+from ..utils.flags import USE_NEW_INFERENCE, NEW_PREDICTOR
 
-if flags.USE_NEW_INFERENCE:
+if USE_NEW_INFERENCE:
     from .pipelines_new import create_pipeline
 else:
     from .pipelines import create_pipeline
+if NEW_PREDICTOR:
+    from .models_new import create_predictor
+else:
+    from .models import create_predictor
 from .utils.pp_option import PaddlePredictorOption

+ 13 - 0
paddlex/inference/common/__init__.py

@@ -0,0 +1,13 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.

+ 16 - 0
paddlex/inference/common/batch_sampler/__init__.py

@@ -0,0 +1,16 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from .base_batch_sampler import BaseBatchSampler
+from .image_batch_sampler import ImageBatchSampler

+ 90 - 0
paddlex/inference/common/batch_sampler/base_batch_sampler.py

@@ -0,0 +1,90 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import Union, Tuple, List, Dict, Any, Iterator
+from abc import ABC, abstractmethod
+
+from ....utils.flags import (
+    INFER_BENCHMARK,
+    INFER_BENCHMARK_ITER,
+    INFER_BENCHMARK_DATA_SIZE,
+)
+
+
+class BaseBatchSampler:
+    """BaseBatchSampler"""
+
+    def __init__(self, batch_size: int = 1) -> None:
+        """Initializes the BaseBatchSampler.
+
+        Args:
+            batch_size (int, optional): The size of each batch. Defaults to 1.
+        """
+        super().__init__()
+        self._batch_size = batch_size
+        self._benchmark = INFER_BENCHMARK
+        self._benchmark_iter = INFER_BENCHMARK_ITER
+        self._benchmark_data_size = INFER_BENCHMARK_DATA_SIZE
+
+    @property
+    def batch_size(self) -> int:
+        """Gets the batch size."""
+        return self._batch_size
+
+    @batch_size.setter
+    def batch_size(self, batch_size: int) -> None:
+        """Sets the batch size.
+
+        Args:
+            batch_size (int): The batch size to set.
+
+        Raises:
+            AssertionError: If the batch size is not greater than 0.
+        """
+        assert batch_size > 0
+        self._batch_size = batch_size
+
+    def __call__(self, input: Any) -> Iterator[List[Any]]:
+        """
+        Sample batch data with the specified input.
+
+        If input is None and benchmarking is enabled, it will yield batches
+        of random data for the specified number of iterations.
+        Otherwise, it will yield from the apply() function.
+
+        Args:
+            input (Any): The input data to sampled.
+
+        Yields:
+            Iterator[List[Any]]: An iterator yielding the batch data.
+        """
+        if input is None and self._benchmark:
+            for _ in range(self._benchmark_iter):
+                yield self._rand_batch(self._benchmark_data_size)
+        else:
+            yield from self.sample(input)
+
+    @abstractmethod
+    def sample(self, *args: Tuple[Any], **kwargs: Dict[str, Any]) -> Iterator[list]:
+        """sample batch data"""
+        raise NotImplementedError
+
+    @abstractmethod
+    def _rand_batch(self, batch_size: int) -> List[Any]:
+        """rand batch data
+
+        Args:
+            batch_size (int): batch size
+        """
+        raise NotImplementedError

+ 101 - 0
paddlex/inference/common/batch_sampler/image_batch_sampler.py

@@ -0,0 +1,101 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import ast
+from pathlib import Path
+import numpy as np
+
+from ....utils import logging
+from ....utils.download import download
+from ....utils.cache import CACHE_DIR
+from .base_batch_sampler import BaseBatchSampler
+
+
+class ImageBatchSampler(BaseBatchSampler):
+
+    SUFFIX = ["jpg", "png", "jpeg", "JPEG", "JPG", "bmp"]
+
+    # XXX: auto download for url
+    def _download_from_url(self, in_path):
+        file_name = Path(in_path).name
+        save_path = Path(CACHE_DIR) / "predict_input" / file_name
+        download(in_path, save_path, overwrite=True)
+        return save_path.as_posix()
+
+    def _get_files_list(self, fp):
+        file_list = []
+        if fp is None or not os.path.exists(fp):
+            raise Exception(f"Not found any img file in path: {fp}")
+
+        if os.path.isfile(fp) and fp.split(".")[-1] in self.SUFFIX:
+            file_list.append(fp)
+        elif os.path.isdir(fp):
+            for root, dirs, files in os.walk(fp):
+                for single_file in files:
+                    if single_file.split(".")[-1] in self.SUFFIX:
+                        file_list.append(os.path.join(root, single_file))
+        if len(file_list) == 0:
+            raise Exception("Not found any file in {}".format(fp))
+        file_list = sorted(file_list)
+        return file_list
+
+    def sample(self, inputs):
+        if not isinstance(inputs, list):
+            inputs = [inputs]
+
+        batch = []
+        for input in inputs:
+            if isinstance(input, np.ndarray):
+                # yield [input]
+                batch.append(input)
+                if len(batch) == self.batch_size:
+                    yield batch
+                    batch = []
+            elif isinstance(input, str):
+                file_path = (
+                    self._download_from_url(input)
+                    if input.startswith("http")
+                    else input
+                )
+                file_list = self._get_files_list(file_path)
+                for file_path in file_list:
+                    batch.append(file_path)
+                    if len(batch) == self.batch_size:
+                        yield batch
+                        batch = []
+            else:
+                logging.warning(
+                    f"Not supported input data type! Only `numpy.ndarray` and `str` are supported! So has been ignored: {input}."
+                )
+        if len(batch) > 0:
+            yield batch
+
+    def _rand_batch(self, data_size):
+        def parse_size(s):
+            res = ast.literal_eval(s)
+            if isinstance(res, int):
+                return (res, res)
+            else:
+                assert isinstance(res, (tuple, list))
+                assert len(res) == 2
+                assert all(isinstance(item, int) for item in res)
+                return res
+
+        size = parse_size(data_size)
+        rand_batch = [
+            np.random.randint(0, 256, (*size, 3), dtype=np.uint8)
+            for _ in range(self.batch_size)
+        ]
+        return rand_batch

+ 15 - 0
paddlex/inference/common/reader/__init__.py

@@ -0,0 +1,15 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from .image_reader import ReadImage

+ 69 - 0
paddlex/inference/common/reader/image_reader.py

@@ -0,0 +1,69 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import numpy as np
+import cv2
+
+from ...utils.io import ImageReader, PDFReader
+
+
+class ReadImage:
+    """Load image from the file."""
+
+    _FLAGS_DICT = {
+        "BGR": cv2.IMREAD_COLOR,
+        "RGB": cv2.IMREAD_COLOR,
+        "GRAY": cv2.IMREAD_GRAYSCALE,
+    }
+
+    def __init__(self, format="BGR"):
+        """
+        Initialize the instance.
+
+        Args:
+            format (str, optional): Target color format to convert the image to.
+                Choices are 'BGR', 'RGB', and 'GRAY'. Default: 'BGR'.
+        """
+        super().__init__()
+        self.format = format
+        flags = self._FLAGS_DICT[self.format]
+        self._img_reader = ImageReader(backend="opencv", flags=flags)
+
+    def __call__(self, imgs):
+        """apply"""
+        return [self.read(img) for img in imgs]
+
+    def read(self, img):
+        if isinstance(img, np.ndarray):
+            if self.format == "RGB":
+                img = img[:, :, ::-1]
+            return img
+        elif isinstance(img, str):
+            blob = self._img_reader.read(img)
+            if blob is None:
+                raise Exception(f"Image read Error: {img}")
+
+            if self.format == "RGB":
+                if blob.ndim != 3:
+                    raise RuntimeError("Array is not 3-dimensional.")
+                # BGR to RGB
+                blob = blob[..., ::-1]
+            return blob
+        else:
+            raise TypeError(
+                f"ReadImage only supports the following types:\n"
+                f"1. str, indicating a image file path or a directory containing image files.\n"
+                f"2. numpy.ndarray.\n"
+                f"However, got type: {type(img).__name__}."
+            )

+ 25 - 0
paddlex/inference/common/result/__init__.py

@@ -0,0 +1,25 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from .base_result import BaseResult
+from .base_cv_result import BaseCVResult
+from .mixin import (
+    StrMixin,
+    JsonMixin,
+    Base64Mixin,
+    ImgMixin,
+    CSVMixin,
+    HtmlMixin,
+    XlsxMixin,
+)

+ 44 - 0
paddlex/inference/common/result/base_cv_result.py

@@ -0,0 +1,44 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from .base_result import BaseResult
+from .mixin import StrMixin, JsonMixin, ImgMixin
+from ...utils.io import ImageWriter
+
+
+class BaseCVResult(BaseResult, StrMixin, JsonMixin, ImgMixin):
+    """Base class for computer vision results."""
+
+    INPUT_IMG_KEY = "input_img"
+
+    def __init__(self, data: dict) -> None:
+        """
+        Initialize the BaseCVResult.
+
+        Args:
+            data (dict): The initial data.
+
+        Raises:
+            AssertionError: If the required key (`BaseCVResult.INPUT_IMG_KEY`) are not found in the data.
+        """
+        assert (
+            BaseCVResult.INPUT_IMG_KEY in data
+        ), f"`{BaseCVResult.INPUT_IMG_KEY}` is needed, but not found in `{list(data.keys())}`!"
+        self._input_img = data.pop("input_img", None)
+        self._img_writer = ImageWriter(backend="pillow")
+
+        super().__init__(data)
+        StrMixin.__init__(self)
+        JsonMixin.__init__(self)
+        ImgMixin.__init__(self, "pillow")

+ 44 - 0
paddlex/inference/common/result/base_result.py

@@ -0,0 +1,44 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import inspect
+
+
+class BaseResult(dict):
+    """Base class for result objects that can save themselves.
+
+    This class inherits from dict and provides properties and methods for handling result.
+    """
+
+    def __init__(self, data: dict) -> None:
+        """Initializes the BaseResult with the given data.
+
+        Args:
+            data (dict): The initial data.
+        """
+        super().__init__(data)
+        self._save_funcs = []
+
+    def save_all(self, save_path: str) -> None:
+        """Calls all registered save methods with the given save path.
+
+        Args:
+            save_path (str): The path to save the result to.
+        """
+        for func in self._save_funcs:
+            signature = inspect.signature(func)
+            if "save_path" in signature.parameters:
+                func(save_path=save_path)
+            else:
+                func()

+ 432 - 0
paddlex/inference/common/result/mixin.py

@@ -0,0 +1,432 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import Union, Tuple, List, Dict, Any, Iterator
+from abc import abstractmethod
+from pathlib import Path
+import json
+import copy
+import numpy as np
+from PIL import Image
+import pandas as pd
+
+from ....utils import logging
+from ...utils.io import (
+    JsonWriter,
+    ImageReader,
+    ImageWriter,
+    CSVWriter,
+    HtmlWriter,
+    XlsxWriter,
+    TextWriter,
+)
+
+
+def _save_list_data(save_func, save_path, data, *args, **kwargs):
+    """
+    Save list type data to the specified path.
+    If data type is a list, iterate through it and save each element using save_func with a modified filename (appending an index and the original file extension).
+
+    Args:
+        save_func (Callable): The function to be used for saving data.
+        save_path (Union[str, Path]): The path to save the data.
+        data (Union[None, list, Any]): The data to be saved. If None, the function will return immediately.
+        *args: Additional positional arguments to be passed to save_func.
+        **kwargs: Additional keyword arguments to be passed to save_func.
+
+    Returns:
+        None
+    """
+    save_path = Path(save_path)
+    if data is None:
+        return
+    if isinstance(data, list):
+        for idx, single in enumerate(data):
+            save_func(
+                (
+                    save_path.parent / f"{save_path.stem}_{idx}{save_path.suffix}"
+                ).as_posix(),
+                single,
+                *args,
+                **kwargs,
+            )
+    save_func(save_path.as_posix(), data, *args, **kwargs)
+    logging.info(f"The result has been saved in {save_path}.")
+
+
+class StrMixin:
+    """Mixin class for adding string conversion capabilities."""
+
+    @property
+    def str(self) -> str:
+        """Property to get the string representation of the result.
+
+        Returns:
+            str: The str type string representation of the result.
+        """
+
+        return self._to_str(self)
+
+    def _to_str(
+        self,
+        data: dict,
+        json_format: bool = False,
+        indent: int = 4,
+        ensure_ascii: bool = False,
+    ) -> str:
+        """Convert the given result data to a string representation.
+
+        Args:
+            data (dict): The data would be converted to str.
+            json_format (bool): If True, return a JSON formatted string. Default is False.
+            indent (int): Number of spaces to indent for JSON formatting. Default is 4.
+            ensure_ascii (bool): If True, ensure all characters are ASCII. Default is False.
+
+        Returns:
+            str: The string representation of the data.
+        """
+        if json_format:
+            return json.dumps(data.json, indent=indent, ensure_ascii=ensure_ascii)
+        else:
+            return str(data)
+
+    def print(
+        self, json_format: bool = False, indent: int = 4, ensure_ascii: bool = False
+    ) -> None:
+        """Print the string representation of the result.
+
+        Args:
+            json_format (bool): If True, print a JSON formatted string. Default is False.
+            indent (int): Number of spaces to indent for JSON formatting. Default is 4.
+            ensure_ascii (bool): If True, ensure all characters are ASCII. Default is False.
+        """
+        str_ = self._to_str(
+            self, json_format=json_format, indent=indent, ensure_ascii=ensure_ascii
+        )
+        logging.info(str_)
+
+
+class JsonMixin:
+    """Mixin class for adding JSON serialization capabilities."""
+
+    def __init__(self) -> None:
+        self._json_writer = JsonWriter()
+        self._save_funcs.append(self.save_to_json)
+
+    def _to_json(self) -> dict:
+        """Convert the object to a JSON-serializable format.
+
+        Returns:
+            dict: A dictionary representation of the object that is JSON-serializable.
+        """
+
+        def _format_data(obj):
+            """Helper function to format data into a JSON-serializable format.
+
+            Args:
+                obj: The object to be formatted.
+
+            Returns:
+                Any: The formatted object.
+            """
+            if isinstance(obj, np.float32):
+                return float(obj)
+            elif isinstance(obj, np.ndarray):
+                return [_format_data(item) for item in obj.tolist()]
+            elif isinstance(obj, pd.DataFrame):
+                return obj.to_json(orient="records", force_ascii=False)
+            elif isinstance(obj, Path):
+                return obj.as_posix()
+            elif isinstance(obj, dict):
+                return dict({k: _format_data(v) for k, v in obj.items()})
+            elif isinstance(obj, (list, tuple)):
+                return [_format_data(i) for i in obj]
+            else:
+                return obj
+
+        return _format_data(copy.deepcopy(self))
+
+    @property
+    def json(self) -> Dict[str, Any]:
+        """Property to get the JSON representation of the result.
+
+        Returns:
+            dict: The dict type JSON representation of the result.
+        """
+
+        return self._to_json()
+
+    def save_to_json(
+        self,
+        save_path: str,
+        indent: int = 4,
+        ensure_ascii: bool = False,
+        *args: List,
+        **kwargs: Dict,
+    ) -> None:
+        """Save the JSON representation of the object to a file.
+
+        Args:
+            save_path (str): The path to save the JSON file. If the save path does not end with '.json', it appends the base name and suffix of the input path.
+            indent (int): The number of spaces to indent for pretty printing. Default is 4.
+            ensure_ascii (bool): If False, non-ASCII characters will be included in the output. Default is False.
+            *args: Additional positional arguments to pass to the underlying writer.
+            **kwargs: Additional keyword arguments to pass to the underlying writer.
+        """
+        if not str(save_path).endswith(".json"):
+            save_path = Path(save_path) / f"{Path(self['input_path']).stem}.json"
+        self._json_writer.write(
+            save_path.as_posix(),
+            self.json,
+            indent=indent,
+            ensure_ascii=ensure_ascii,
+            *args,
+            **kwargs,
+        )
+
+
+class Base64Mixin:
+    """Mixin class for adding Base64 encoding capabilities."""
+
+    def __init__(self, *args: List, **kwargs: Dict) -> None:
+        """Initializes the Base64Mixin.
+
+        Args:
+            *args: Positional arguments to pass to the TextWriter.
+            **kwargs: Keyword arguments to pass to the TextWriter.
+        """
+        self._base64_writer = TextWriter(*args, **kwargs)
+        self._save_funcs.append(self.save_to_base64)
+
+    @abstractmethod
+    def _to_base64(self) -> str:
+        """Abstract method to convert the result to Base64.
+
+        Returns:
+        str: The str type Base64 representation result.
+        """
+        raise NotImplementedError
+
+    @property
+    def base64(self) -> str:
+        """
+        Property that returns the Base64 encoded content.
+
+        Returns:
+            str: The base64 representation of the result.
+        """
+        return self._to_base64()
+
+    def save_to_base64(self, save_path: str, *args: List, **kwargs: Dict) -> None:
+        """Saves the Base64 encoded content to the specified path.
+
+        Args:
+            save_path (str): The path to save the base64 representation result. If the save path does not end with '.b64', it appends the base name and suffix of the input path.
+
+            *args: Additional positional arguments that will be passed to the base64 writer.
+            **kwargs: Additional keyword arguments that will be passed to the base64 writer.
+        """
+
+        if not str(save_path).lower().endswith((".b64")):
+            fp = Path(self["input_path"])
+            save_path = Path(save_path) / f"{fp.stem}{fp.suffix}"
+        self._base64_writer.write(save_path.as_posix(), self.base64, *args, **kwargs)
+
+
+class ImgMixin:
+    """Mixin class for adding image handling capabilities."""
+
+    def __init__(self, backend: str = "pillow", *args: List, **kwargs: Dict) -> None:
+        """Initializes ImgMixin.
+
+        Args:
+            backend (str): The backend to use for image processing. Defaults to "pillow".
+            *args: Additional positional arguments to pass to the ImageWriter.
+            **kwargs: Additional keyword arguments to pass to the ImageWriter.
+        """
+        self._img_writer = ImageWriter(backend=backend, *args, **kwargs)
+        self._save_funcs.append(self.save_to_img)
+
+    @abstractmethod
+    def _to_img(self) -> Union[np.ndarray, Image.Image]:
+        """Abstract method to convert the result to an image.
+
+        Returns:
+        Union[np.ndarray, Image.Image]: The image representation result.
+        """
+        raise NotImplementedError
+
+    @property
+    def img(self) -> Image.Image:
+        """Property to get the image representation of the result.
+
+        Returns:
+            Image.Image: The image representation of the result.
+        """
+        image = self._to_img()
+        # The img must be a PIL.Image obj
+        if isinstance(image, np.ndarray):
+            return Image.fromarray(image)
+        return image
+
+    def save_to_img(self, save_path: str, *args: List, **kwargs: Dict) -> None:
+        """Saves the image representation of the result to the specified path.
+
+        Args:
+            save_path (str): The path to save the image. If the save path does not end with .jpg or .png, it appends the input path's stem and suffix to the save path.
+            *args: Additional positional arguments that will be passed to the image writer.
+            **kwargs: Additional keyword arguments that will be passed to the image writer.
+        """
+        if not str(save_path).lower().endswith((".jpg", ".png")):
+            fp = Path(self["input_path"])
+            save_path = Path(save_path) / f"{fp.stem}{fp.suffix}"
+        self._img_writer.write(save_path.as_posix(), self.img, *args, **kwargs)
+
+
+class CSVMixin:
+    """Mixin class for adding CSV handling capabilities."""
+
+    def __init__(self, backend: str = "pandas", *args: List, **kwargs: Dict) -> None:
+        """Initializes the CSVMixin.
+
+        Args:
+            backend (str): The backend to use for CSV operations (default is "pandas").
+            *args: Optional positional arguments to pass to the CSVWriter.
+            **kwargs: Optional keyword arguments to pass to the CSVWriter.
+        """
+        self._csv_writer = CSVWriter(backend=backend, *args, **kwargs)
+        if not hasattr(self, "_save_funcs"):
+            self._save_funcs = []
+        self._save_funcs.append(self.save_to_csv)
+
+    @property
+    def csv(self) -> pd.DataFrame:
+        """Property to get the pandas Dataframe representation of the result.
+
+        Returns:
+            pandas.DataFrame: The pandas.DataFrame representation of the result.
+        """
+        return self._to_csv()
+
+    @abstractmethod
+    def _to_csv(self) -> pd.DataFrame:
+        """Abstract method to convert the result to pandas.DataFrame.
+
+        Returns:
+        pandas.DataFrame: The pandas.DataFrame representation result.
+        """
+        raise NotImplementedError
+
+    def save_to_csv(self, save_path: str, *args: List, **kwargs: Dict) -> None:
+        """Saves the result to a CSV file.
+
+        Args:
+            save_path (str): The path to save the CSV file. If the path does not end with ".csv",
+                the stem of the input path attribute (self['input_path']) will be used as the filename.
+            *args: Optional positional arguments to pass to the CSV writer's write method.
+            **kwargs: Optional keyword arguments to pass to the CSV writer's write method.
+        """
+        if not str(save_path).endswith(".csv"):
+            save_path = Path(save_path) / f"{Path(self['input_path']).stem}.csv"
+        self._csv_writer.write(save_path.as_posix(), self.csv, *args, **kwargs)
+
+
+class HtmlMixin:
+    """Mixin class for adding HTML handling capabilities."""
+
+    def __init__(self, *args: List, **kwargs: Dict) -> None:
+        """
+        Initializes the HTML writer and appends the save_to_html method to the save functions list.
+
+        Args:
+            *args: Positional arguments passed to the HtmlWriter.
+            **kwargs: Keyword arguments passed to the HtmlWriter.
+        """
+        self._html_writer = HtmlWriter(*args, **kwargs)
+        self._save_funcs.append(self.save_to_html)
+
+    @property
+    def html(self) -> str:
+        """Property to get the HTML representation of the result.
+
+        Returns:
+            str: The str type HTML representation of the result.
+        """
+        return self._to_html()
+
+    @abstractmethod
+    def _to_html(self) -> str:
+        """Abstract method to convert the result to str type HTML representation.
+
+        Returns:
+        str: The str type HTML representation result.
+        """
+        raise NotImplementedError
+
+    def save_to_html(self, save_path: str, *args: List, **kwargs: Dict) -> None:
+        """Saves the HTML representation of the object to the specified path.
+
+        Args:
+            save_path (str): The path to save the HTML file.
+            *args: Additional positional arguments.
+            **kwargs: Additional keyword arguments.
+        """
+        if not str(save_path).endswith(".html"):
+            save_path = Path(save_path) / f"{Path(self['input_path']).stem}.html"
+        self._html_writer.write(save_path.as_posix(), self.html, *args, **kwargs)
+
+
+class XlsxMixin:
+    """Mixin class for adding XLSX handling capabilities."""
+
+    def __init__(self, *args: List, **kwargs: Dict) -> None:
+        """Initializes the XLSX writer and appends the save_to_xlsx method to the save functions.
+
+        Args:
+            *args: Positional arguments to be passed to the XlsxWriter constructor.
+            **kwargs: Keyword arguments to be passed to the XlsxWriter constructor.
+        """
+        self._xlsx_writer = XlsxWriter(*args, **kwargs)
+        self._save_funcs.append(self.save_to_xlsx)
+
+    @property
+    def xlsx(self) -> str:
+        """Property to get the XLSX representation of the result.
+
+        Returns:
+            str: The str type XLSX representation of the result.
+        """
+        return self._to_xlsx()
+
+    @abstractmethod
+    def _to_xlsx(self) -> str:
+        """Abstract method to convert the result to str type XLSX representation.
+
+        Returns:
+        str: The str type HTML representation result.
+        """
+        raise NotImplementedError
+
+    def save_to_xlsx(self, save_path: str, *args: List, **kwargs: Dict) -> None:
+        """Saves the HTML representation to an XLSX file.
+
+        Args:
+            save_path (str): The path to save the XLSX file. If the path does not end with ".xlsx",
+                             the filename will be set to the stem of the input path with ".xlsx" extension.
+            *args: Additional positional arguments to pass to the XLSX writer.
+            **kwargs: Additional keyword arguments to pass to the XLSX writer.
+        """
+        if not str(save_path).endswith(".xlsx"):
+            save_path = Path(save_path) / f"{Path(self['input_path']).stem}.xlsx"
+        self._xlsx_writer.write(save_path.as_posix(), self.xlsx, *args, **kwargs)

+ 108 - 0
paddlex/inference/models_new/__init__.py

@@ -0,0 +1,108 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from pathlib import Path
+from typing import Any, Dict, Optional
+
+from ...utils import errors
+from ..utils.official_models import official_models
+from .base import BasePredictor, BasicPredictor
+
+from .image_classification import ClasPredictor
+from .text_detection import TextDetPredictor
+from .text_recognition import TextRecPredictor
+
+# from .table_recognition import TablePredictor
+# from .object_detection import DetPredictor
+# from .instance_segmentation import InstanceSegPredictor
+# from .semantic_segmentation import SegPredictor
+# from .general_recognition import ShiTuRecPredictor
+# from .ts_fc import TSFcPredictor
+# from .ts_ad import TSAdPredictor
+# from .ts_cls import TSClsPredictor
+# from .image_unwarping import WarpPredictor
+# from .multilabel_classification import MLClasPredictor
+# from .anomaly_detection import UadPredictor
+# from .formula_recognition import LaTeXOCRPredictor
+# from .face_recognition import FaceRecPredictor
+
+
+def _create_hp_predictor(
+    model_name, model_dir, device, config, hpi_params, *args, **kwargs
+):
+    try:
+        from paddlex_hpi.models import HPPredictor
+    except ModuleNotFoundError:
+        raise RuntimeError(
+            "The PaddleX HPI plugin is not properly installed, and the high-performance model inference features are not available."
+        ) from None
+    try:
+        predictor = HPPredictor.get(model_name)(
+            model_dir=model_dir,
+            config=config,
+            device=device,
+            *args,
+            hpi_params=hpi_params,
+            **kwargs,
+        )
+    except errors.others.ClassNotFoundException:
+        raise ValueError(
+            f"{model_name} is not supported by the PaddleX HPI plugin."
+        ) from None
+    return predictor
+
+
+def create_predictor(
+    model: str,
+    device=None,
+    pp_option=None,
+    use_hpip: bool = False,
+    hpi_params: Optional[Dict[str, Any]] = None,
+    *args,
+    **kwargs,
+) -> BasePredictor:
+    model_dir = check_model(model)
+    config = BasePredictor.load_config(model_dir)
+    model_name = config["Global"]["model_name"]
+    if use_hpip:
+        return _create_hp_predictor(
+            model_name=model_name,
+            model_dir=model_dir,
+            config=config,
+            hpi_params=hpi_params,
+            device=device,
+            *args,
+            **kwargs,
+        )
+    else:
+        return BasicPredictor.get(model_name)(
+            model_dir=model_dir,
+            config=config,
+            device=device,
+            pp_option=pp_option,
+            *args,
+            **kwargs,
+        )
+
+
+def check_model(model):
+    if Path(model).exists():
+        return Path(model)
+    elif model in official_models:
+        return official_models[model]
+    else:
+        raise Exception(
+            f"The model ({model}) is no exists! Please using directory of local model files or model name supported by PaddleX!"
+        )

+ 16 - 0
paddlex/inference/models_new/base/__init__.py

@@ -0,0 +1,16 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from .pp_infer import BaseInfer
+from .predictor import BasePredictor, BasicPredictor

+ 15 - 0
paddlex/inference/models_new/base/pp_infer/__init__.py

@@ -0,0 +1,15 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from .base_infer import BaseInfer

+ 17 - 0
paddlex/inference/models_new/base/pp_infer/base_infer.py

@@ -0,0 +1,17 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+class BaseInfer:
+    pass

+ 16 - 0
paddlex/inference/models_new/base/predictor/__init__.py

@@ -0,0 +1,16 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from .base_predictor import BasePredictor
+from .basic_predictor import BasicPredictor

+ 100 - 0
paddlex/inference/models_new/base/predictor/base_predictor.py

@@ -0,0 +1,100 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import Union, Tuple, List, Dict, Any, Iterator
+from pathlib import Path
+from abc import abstractmethod, ABC
+
+from ....utils.io import YAMLReader
+
+
+class BasePredictor(ABC):
+    """BasePredictor."""
+
+    MODEL_FILE_PREFIX = "inference"
+
+    def __init__(self, model_dir: str, config: dict = None) -> None:
+        """Initializes the BasePredictor.
+
+        Args:
+            model_dir (str): The directory where the static model files is stored.
+            config (dict, optional): The configuration of model to infer. Defaults to None.
+        """
+        super().__init__()
+        self.model_dir = Path(model_dir)
+        self.config = config if config else self.load_config(self.model_dir)
+
+        # alias predict() to the __call__()
+        self.predict = self.__call__
+        self.benchmark = None
+
+    @property
+    def config_path(self) -> str:
+        """
+        Get the path to the configuration file.
+
+        Returns:
+            str: The path to the configuration file.
+        """
+        return self.get_config_path(self.model_dir)
+
+    @property
+    def model_name(self) -> str:
+        """
+        Get the model name.
+
+        Returns:
+            str: The model name.
+        """
+        return self.config["Global"]["model_name"]
+
+    @classmethod
+    def get_config_path(cls, model_dir) -> str:
+        """Get the path to the configuration file for the given model directory.
+
+        Args:
+            model_dir (Path): The directory where the static model files is stored.
+
+        Returns:
+            Path: The path to the configuration file.
+        """
+        return model_dir / f"{cls.MODEL_FILE_PREFIX}.yml"
+
+    @classmethod
+    def load_config(cls, model_dir) -> dict:
+        """Load the configuration from the specified model directory.
+
+        Args:
+            model_dir (Path): The where the static model files is stored.
+
+        Returns:
+            dict: The loaded configuration dictionary.
+        """
+        yaml_reader = YAMLReader()
+        return yaml_reader.read(cls.get_config_path(model_dir))
+
+    @abstractmethod
+    def __call__(self, input: Any, **kwargs: dict[str, Any]) -> Iterator[Any]:
+        """Predict with the given input and additional keyword arguments."""
+        raise NotImplementedError
+
+    @abstractmethod
+    def apply(self, input: Any) -> Iterator[Any]:
+        """Predict the given input."""
+        raise NotImplementedError
+
+    @abstractmethod
+    def set_predictor(self) -> None:
+        """Sets up the predictor."""
+        raise NotImplementedError

+ 196 - 0
paddlex/inference/models_new/base/predictor/basic_predictor.py

@@ -0,0 +1,196 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import Union, Tuple, List, Dict, Any, Iterator
+from abc import abstractmethod
+
+from .....utils.subclass_register import AutoRegisterABCMetaClass
+from .....utils.flags import (
+    INFER_BENCHMARK,
+    INFER_BENCHMARK_WARMUP,
+)
+from .....utils import logging
+from ....utils.pp_option import PaddlePredictorOption
+from ....utils.benchmark import benchmark
+from ....common.batch_sampler import BaseBatchSampler
+from .base_predictor import BasePredictor
+
+
+class PredictionWrap:
+    """Wraps the prediction data and supports get by index."""
+
+    def __init__(self, data: Dict[str, List[Any]], num: int) -> None:
+        """Initializes the PredictionWrap with prediction data.
+
+        Args:
+            data (Dict[str, List[Any]]): A dictionary where keys are string identifiers and values are lists of predictions.
+            num (int): The number of predictions, that is length of values per key in the data dictionary.
+
+        Raises:
+            AssertionError: If the length of any list in data does not match num.
+        """
+        assert isinstance(data, dict), "data must be a dictionary"
+        for k in data:
+            assert len(data[k]) == num, f"{len(data[k])} != {num} for key {k}!"
+        self._data = data
+        self._keys = data.keys()
+
+    def get_by_idx(self, idx: int) -> Dict[str, Any]:
+        """Get the prediction by specified index.
+
+        Args:
+            idx (int): The index to get predictions from.
+
+        Returns:
+            Dict[str, Any]: A dictionary with the same keys as the input data, but with the values at the specified index.
+        """
+        return {key: self._data[key][idx] for key in self._keys}
+
+
+class BasicPredictor(
+    BasePredictor,
+    metaclass=AutoRegisterABCMetaClass,
+):
+    """BasicPredictor."""
+
+    __is_base = True
+
+    def __init__(
+        self,
+        model_dir: str,
+        config: Dict[str, Any] = None,
+        device: str = None,
+        pp_option: PaddlePredictorOption = None,
+    ) -> None:
+        """Initializes the BasicPredictor.
+
+        Args:
+            model_dir (str): The directory where the model files are stored.
+            config (Dict[str, Any], optional): The configuration dictionary. Defaults to None.
+            device (str, optional): The device to run the inference engine on. Defaults to None.
+            pp_option (PaddlePredictorOption, optional): The inference engine options. Defaults to None.
+        """
+        super().__init__(model_dir=model_dir, config=config)
+        if not pp_option:
+            pp_option = PaddlePredictorOption(model_name=self.model_name)
+        if device:
+            pp_option.device = device
+        self.pp_option = pp_option
+        self.batch_sampler = self._build_batch_sampler()
+        self.result_class = self._get_result_class()
+        logging.debug(f"{self.__class__.__name__}: {self.model_dir}")
+        self.benchmark = benchmark
+
+    def __call__(self, input: Any, **kwargs: Dict[str, Any]) -> Iterator[Any]:
+        """
+        Predict with the input data.
+
+        Args:
+            input (Any): The input data to be predicted.
+            **kwargs (Dict[str, Any]): Additional keyword arguments to set up predictor.
+
+        Returns:
+            Iterator[Any]: An iterator yielding the prediction output.
+        """
+        self.set_predictor(**kwargs)
+        if self.benchmark:
+            self.benchmark.start()
+            if INFER_BENCHMARK_WARMUP > 0:
+                output = self.apply(input)
+                warmup_num = 0
+                for _ in range(INFER_BENCHMARK_WARMUP):
+                    try:
+                        next(output)
+                        warmup_num += 1
+                    except StopIteration:
+                        logging.warning(
+                            f"There are only {warmup_num} batches in input data, but `INFER_BENCHMARK_WARMUP` has been set to {INFER_BENCHMARK_WARMUP}."
+                        )
+                        break
+                self.benchmark.warmup_stop(warmup_num)
+            output = list(self.apply(input))
+            self.benchmark.collect(len(output))
+        else:
+            yield from self.apply(input)
+
+    def apply(self, input: Any) -> Iterator[Any]:
+        """
+        Do predicting with the input data and yields predictions.
+
+        Args:
+            input (Any): The input data to be predicted.
+
+        Yields:
+            Iterator[Any]: An iterator yielding prediction results.
+        """
+        for batch_data in self.batch_sampler(input):
+            prediction = self.process(batch_data)
+            prediction = PredictionWrap(prediction, len(batch_data))
+            for idx in range(len(batch_data)):
+                yield self.result_class(prediction.get_by_idx(idx))
+
+    def set_predictor(
+        self,
+        batch_size: int = None,
+        device: str = None,
+        pp_option: PaddlePredictorOption = None,
+    ) -> None:
+        """
+        Sets the predictor configuration.
+
+        Args:
+            batch_size (int, optional): The batch size to use. Defaults to None.
+            device (str, optional): The device to run the predictor on. Defaults to None.
+            pp_option (PaddlePredictorOption, optional): The predictor options to set. Defaults to None.
+
+        Returns:
+            None
+        """
+        if batch_size:
+            self.batch_sampler.batch_size = batch_size
+            self.pp_option.batch_size = batch_size
+        if device and device != self.pp_option.device:
+            self.pp_option.device = device
+        if pp_option and pp_option != self.pp_option:
+            self.pp_option = pp_option
+
+    @abstractmethod
+    def _build_batch_sampler(self) -> BaseBatchSampler:
+        """Build batch sampler.
+
+        Returns:
+            BaseBatchSampler: batch sampler object.
+        """
+        raise NotImplementedError
+
+    @abstractmethod
+    def process(self, batch_data: List[Any]) -> Dict[str, List[Any]]:
+        """process the batch data sampled from BatchSampler and return the prediction result.
+
+        Args:
+            batch_data (List[Any]): The batch data sampled from BatchSampler.
+
+        Returns:
+            Dict[str, List[Any]]: The prediction result.
+        """
+        raise NotImplementedError
+
+    @abstractmethod
+    def _get_result_class(self) -> type:
+        """Get the result class.
+
+        Returns:
+            type: The result class.
+        """
+        raise NotImplementedError

+ 25 - 0
paddlex/inference/models_new/common/__init__.py

@@ -0,0 +1,25 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from .vision import (
+    F,
+    Resize,
+    ResizeByLong,
+    ResizeByShort,
+    Normalize,
+    ToCHWImage,
+    ToBatch,
+)
+
+from .static_infer import StaticInfer

+ 233 - 0
paddlex/inference/models_new/common/static_infer.py

@@ -0,0 +1,233 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import Union, Tuple, List, Dict, Any, Iterator
+import os
+import inspect
+from abc import abstractmethod
+import lazy_paddle as paddle
+import numpy as np
+
+from ....utils.flags import FLAGS_json_format_model
+from ....utils import logging
+from ...utils.pp_option import PaddlePredictorOption
+
+
+class Copy2GPU:
+
+    def __init__(self, input_handlers):
+        super().__init__()
+        self.input_handlers = input_handlers
+
+    def __call__(self, x):
+        for idx in range(len(x)):
+            self.input_handlers[idx].reshape(x[idx].shape)
+            self.input_handlers[idx].copy_from_cpu(x[idx])
+
+
+class Copy2CPU:
+
+    def __init__(self, output_handlers):
+        super().__init__()
+        self.output_handlers = output_handlers
+
+    def __call__(self):
+        output = []
+        for out_tensor in self.output_handlers:
+            batch = out_tensor.copy_to_cpu()
+            output.append(batch)
+        return output
+
+
+class Infer:
+
+    def __init__(self, predictor):
+        super().__init__()
+        self.predictor = predictor
+
+    def __call__(self):
+        self.predictor.run()
+
+
+class StaticInfer:
+    """Predictor based on Paddle Inference"""
+
+    def __init__(
+        self, model_dir: str, model_prefix: str, option: PaddlePredictorOption
+    ) -> None:
+        super().__init__()
+        self.model_dir = model_dir
+        self.model_prefix = model_prefix
+        self._update_option(option)
+
+    def _update_option(self, option: PaddlePredictorOption) -> None:
+        if self.option and option == self.option:
+            return
+        self._option = option
+        self._reset()
+
+    @property
+    def option(self) -> PaddlePredictorOption:
+        return self._option if hasattr(self, "_option") else None
+
+    @option.setter
+    def option(self, option: Union[None, PaddlePredictorOption]) -> None:
+        if option:
+            self._update_option(option)
+
+    def _reset(self) -> None:
+        if not self.option:
+            self.option = PaddlePredictorOption()
+        logging.debug(f"Env: {self.option}")
+        (
+            predictor,
+            input_handlers,
+            output_handlers,
+        ) = self._create()
+        self.copy2gpu = Copy2GPU(input_handlers)
+        self.copy2cpu = Copy2CPU(output_handlers)
+        self.infer = Infer(predictor)
+        self.option.changed = False
+
+    def _create(
+        self,
+    ) -> Tuple[
+        paddle.base.libpaddle.PaddleInferPredictor,
+        paddle.base.libpaddle.PaddleInferTensor,
+        paddle.base.libpaddle.PaddleInferTensor,
+    ]:
+        """_create"""
+        from lazy_paddle.inference import Config, create_predictor
+
+        model_postfix = ".json" if FLAGS_json_format_model else ".pdmodel"
+        model_file = (self.model_dir / f"{self.model_prefix}{model_postfix}").as_posix()
+        params_file = (self.model_dir / f"{self.model_prefix}.pdiparams").as_posix()
+        config = Config(model_file, params_file)
+
+        config.enable_memory_optim()
+        if self.option.device in ("gpu", "dcu"):
+            if self.option.device == "gpu":
+                config.exp_disable_mixed_precision_ops({"feed", "fetch"})
+            config.enable_use_gpu(100, self.option.device_id)
+            if self.option.device == "gpu":
+                # NOTE: The pptrt settings are not aligned with those of FD.
+                precision_map = {
+                    "trt_int8": Config.Precision.Int8,
+                    "trt_fp32": Config.Precision.Float32,
+                    "trt_fp16": Config.Precision.Half,
+                }
+                if self.option.run_mode in precision_map.keys():
+                    config.enable_tensorrt_engine(
+                        workspace_size=(1 << 25) * self.option.batch_size,
+                        max_batch_size=self.option.batch_size,
+                        min_subgraph_size=self.option.min_subgraph_size,
+                        precision_mode=precision_map[self.option.run_mode],
+                        use_static=self.option.trt_use_static,
+                        use_calib_mode=self.option.trt_calib_mode,
+                    )
+
+                    if self.option.shape_info_filename is not None:
+                        if not os.path.exists(self.option.shape_info_filename):
+                            config.collect_shape_range_info(
+                                self.option.shape_info_filename
+                            )
+                            logging.info(
+                                f"Dynamic shape info is collected into: {self.option.shape_info_filename}"
+                            )
+                        else:
+                            logging.info(
+                                f"A dynamic shape info file ( {self.option.shape_info_filename} ) already exists. \
+        No need to generate again."
+                            )
+                        config.enable_tuned_tensorrt_dynamic_shape(
+                            self.option.shape_info_filename, True
+                        )
+        elif self.option.device == "npu":
+            config.enable_custom_device("npu")
+        elif self.option.device == "xpu":
+            pass
+        elif self.option.device == "mlu":
+            config.enable_custom_device("mlu")
+        else:
+            assert self.option.device == "cpu"
+            config.disable_gpu()
+            if "mkldnn" in self.option.run_mode:
+                try:
+                    config.enable_mkldnn()
+                    if "bf16" in self.option.run_mode:
+                        config.enable_mkldnn_bfloat16()
+                except Exception as e:
+                    logging.warning(
+                        "MKL-DNN is not available. We will disable MKL-DNN."
+                    )
+                config.set_mkldnn_cache_capacity(-1)
+            else:
+                if hasattr(config, "disable_mkldnn"):
+                    config.disable_mkldnn()
+
+        # Disable paddle inference logging
+        config.disable_glog_info()
+
+        config.set_cpu_math_library_num_threads(self.option.cpu_threads)
+
+        if self.option.device in ("cpu", "gpu"):
+            if not (
+                self.option.device == "gpu" and self.option.run_mode.startswith("trt")
+            ):
+                if hasattr(config, "enable_new_ir"):
+                    config.enable_new_ir(self.option.enable_new_ir)
+                if hasattr(config, "enable_new_executor"):
+                    config.enable_new_executor()
+                config.set_optimization_level(3)
+
+        for del_p in self.option.delete_pass:
+            config.delete_pass(del_p)
+
+        if self.option.device in ("gpu", "dcu"):
+            if paddle.is_compiled_with_rocm():
+                # Delete unsupported passes in dcu
+                config.delete_pass("conv2d_add_act_fuse_pass")
+                config.delete_pass("conv2d_add_fuse_pass")
+
+        predictor = create_predictor(config)
+
+        # Get input and output handlers
+        input_names = predictor.get_input_names()
+        input_names.sort()
+        input_handlers = []
+        output_handlers = []
+        for input_name in input_names:
+            input_handler = predictor.get_input_handle(input_name)
+            input_handlers.append(input_handler)
+        output_names = predictor.get_output_names()
+        for output_name in output_names:
+            output_handler = predictor.get_output_handle(output_name)
+            output_handlers.append(output_handler)
+        return predictor, input_handlers, output_handlers
+
+    def __call__(self, x) -> List[Any]:
+        if self.option.changed:
+            self._reset()
+        self.copy2gpu(x)
+        self.infer()
+        pred = self.copy2cpu()
+        return pred
+
+    @property
+    def benchmark(self):
+        return {
+            "Copy2GPU": self.copy2gpu,
+            "Infer": self.infer,
+            "Copy2CPU": self.copy2cpu,
+        }

+ 23 - 0
paddlex/inference/models_new/common/vision/__init__.py

@@ -0,0 +1,23 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from . import funcs as F
+from .processors import (
+    Resize,
+    ResizeByLong,
+    ResizeByShort,
+    Normalize,
+    ToCHWImage,
+    ToBatch,
+)

+ 69 - 0
paddlex/inference/models_new/common/vision/funcs.py

@@ -0,0 +1,69 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import cv2
+
+
+def check_image_size(input_):
+    """check image size"""
+    if not (
+        isinstance(input_, (list, tuple))
+        and len(input_) == 2
+        and isinstance(input_[0], int)
+        and isinstance(input_[1], int)
+    ):
+        raise TypeError(f"{input_} cannot represent a valid image size.")
+
+
+def resize(im, target_size, interp):
+    """resize image to target size"""
+    w, h = target_size
+    im = cv2.resize(im, (w, h), interpolation=interp)
+    return im
+
+
+def flip_h(im):
+    """flip image horizontally"""
+    if len(im.shape) == 3:
+        im = im[:, ::-1, :]
+    elif len(im.shape) == 2:
+        im = im[:, ::-1]
+    return im
+
+
+def flip_v(im):
+    """flip image vertically"""
+    if len(im.shape) == 3:
+        im = im[::-1, :, :]
+    elif len(im.shape) == 2:
+        im = im[::-1, :]
+    return im
+
+
+def slice(im, coords):
+    """slice the image"""
+    x1, y1, x2, y2 = coords
+    im = im[y1:y2, x1:x2, ...]
+    return im
+
+
+def pad(im, pad, val):
+    """padding image by value"""
+    if isinstance(pad, int):
+        pad = [pad] * 4
+    if len(pad) != 4:
+        raise ValueError
+    chns = 1 if im.ndim == 2 else im.shape[2]
+    im = cv2.copyMakeBorder(im, *pad, cv2.BORDER_CONSTANT, value=(val,) * chns)
+    return im

+ 235 - 0
paddlex/inference/models_new/common/vision/processors.py

@@ -0,0 +1,235 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import ast
+import math
+from pathlib import Path
+from copy import deepcopy
+
+import numpy as np
+import cv2
+
+from . import funcs as F
+
+
+class _BaseResize:
+    _INTERP_DICT = {
+        "NEAREST": cv2.INTER_NEAREST,
+        "LINEAR": cv2.INTER_LINEAR,
+        "CUBIC": cv2.INTER_CUBIC,
+        "AREA": cv2.INTER_AREA,
+        "LANCZOS4": cv2.INTER_LANCZOS4,
+    }
+
+    def __init__(self, size_divisor, interp):
+        super().__init__()
+
+        if size_divisor is not None:
+            assert isinstance(
+                size_divisor, int
+            ), "`size_divisor` should be None or int."
+        self.size_divisor = size_divisor
+
+        try:
+            interp = self._INTERP_DICT[interp]
+        except KeyError:
+            raise ValueError(
+                "`interp` should be one of {}.".format(self._INTERP_DICT.keys())
+            )
+        self.interp = interp
+
+    @staticmethod
+    def _rescale_size(img_size, target_size):
+        """rescale size"""
+        scale = min(max(target_size) / max(img_size), min(target_size) / min(img_size))
+        rescaled_size = [round(i * scale) for i in img_size]
+        return rescaled_size, scale
+
+
+class Resize(_BaseResize):
+    """Resize the image."""
+
+    def __init__(
+        self, target_size, keep_ratio=False, size_divisor=None, interp="LINEAR"
+    ):
+        """
+        Initialize the instance.
+
+        Args:
+            target_size (list|tuple|int): Target width and height.
+            keep_ratio (bool, optional): Whether to keep the aspect ratio of resized
+                image. Default: False.
+            size_divisor (int|None, optional): Divisor of resized image size.
+                Default: None.
+            interp (str, optional): Interpolation method. Choices are 'NEAREST',
+                'LINEAR', 'CUBIC', 'AREA', and 'LANCZOS4'. Default: 'LINEAR'.
+        """
+        super().__init__(size_divisor=size_divisor, interp=interp)
+
+        if isinstance(target_size, int):
+            target_size = [target_size, target_size]
+        F.check_image_size(target_size)
+        self.target_size = target_size
+
+        self.keep_ratio = keep_ratio
+
+    def __call__(self, imgs):
+        """apply"""
+        return [self.resize(img) for img in imgs]
+
+    def resize(self, img):
+        target_size = self.target_size
+        original_size = img.shape[:2][::-1]
+
+        if self.keep_ratio:
+            h, w = img.shape[0:2]
+            target_size, _ = self._rescale_size((w, h), self.target_size)
+
+        if self.size_divisor:
+            target_size = [
+                math.ceil(i / self.size_divisor) * self.size_divisor
+                for i in target_size
+            ]
+        img = F.resize(img, target_size, interp=self.interp)
+        return img
+
+
+class ResizeByLong(_BaseResize):
+    """
+    Proportionally resize the image by specifying the target length of the
+    longest side.
+    """
+
+    def __init__(self, target_long_edge, size_divisor=None, interp="LINEAR"):
+        """
+        Initialize the instance.
+
+        Args:
+            target_long_edge (int): Target length of the longest side of image.
+            size_divisor (int|None, optional): Divisor of resized image size.
+                Default: None.
+            interp (str, optional): Interpolation method. Choices are 'NEAREST',
+                'LINEAR', 'CUBIC', 'AREA', and 'LANCZOS4'. Default: 'LINEAR'.
+        """
+        super().__init__(size_divisor=size_divisor, interp=interp)
+        self.target_long_edge = target_long_edge
+
+    def __call__(self, imgs):
+        """apply"""
+        return [self.resize(img) for img in imgs]
+
+    def resize(self, img):
+        h, w = img.shape[:2]
+        scale = self.target_long_edge / max(h, w)
+        h_resize = round(h * scale)
+        w_resize = round(w * scale)
+        if self.size_divisor is not None:
+            h_resize = math.ceil(h_resize / self.size_divisor) * self.size_divisor
+            w_resize = math.ceil(w_resize / self.size_divisor) * self.size_divisor
+
+        img = F.resize(img, (w_resize, h_resize), interp=self.interp)
+        return img
+
+
+class ResizeByShort(_BaseResize):
+    """
+    Proportionally resize the image by specifying the target length of the
+    shortest side.
+    """
+
+    def __init__(self, target_short_edge, size_divisor=None, interp="LINEAR"):
+        """
+        Initialize the instance.
+
+        Args:
+            target_short_edge (int): Target length of the shortest side of image.
+            size_divisor (int|None, optional): Divisor of resized image size.
+                Default: None.
+            interp (str, optional): Interpolation method. Choices are 'NEAREST',
+                'LINEAR', 'CUBIC', 'AREA', and 'LANCZOS4'. Default: 'LINEAR'.
+        """
+        super().__init__(size_divisor=size_divisor, interp=interp)
+        self.target_short_edge = target_short_edge
+
+    def __call__(self, imgs):
+        """apply"""
+        return [self.resize(img) for img in imgs]
+
+    def resize(self, img):
+        h, w = img.shape[:2]
+        scale = self.target_short_edge / min(h, w)
+        h_resize = round(h * scale)
+        w_resize = round(w * scale)
+        if self.size_divisor is not None:
+            h_resize = math.ceil(h_resize / self.size_divisor) * self.size_divisor
+            w_resize = math.ceil(w_resize / self.size_divisor) * self.size_divisor
+
+        img = F.resize(img, (w_resize, h_resize), interp=self.interp)
+        return img
+
+
+class Normalize:
+    """Normalize the image."""
+
+    def __init__(self, scale=1.0 / 255, mean=0.5, std=0.5, preserve_dtype=False):
+        """
+        Initialize the instance.
+
+        Args:
+            scale (float, optional): Scaling factor to apply to the image before
+                applying normalization. Default: 1/255.
+            mean (float|tuple|list, optional): Means for each channel of the image.
+                Default: 0.5.
+            std (float|tuple|list, optional): Standard deviations for each channel
+                of the image. Default: 0.5.
+            preserve_dtype (bool, optional): Whether to preserve the original dtype
+                of the image.
+        """
+        super().__init__()
+
+        self.scale = np.float32(scale)
+        if isinstance(mean, float):
+            mean = [mean]
+        self.mean = np.asarray(mean).astype("float32")
+        if isinstance(std, float):
+            std = [std]
+        self.std = np.asarray(std).astype("float32")
+        self.preserve_dtype = preserve_dtype
+
+    def __call__(self, imgs):
+        """apply"""
+        old_type = imgs[0].dtype
+        # XXX: If `old_type` has higher precision than float32,
+        # we will lose some precision.
+        imgs = np.array(imgs).astype("float32", copy=False)
+        imgs *= self.scale
+        imgs -= self.mean
+        imgs /= self.std
+        if self.preserve_dtype:
+            imgs = imgs.astype(old_type, copy=False)
+        return list(imgs)
+
+
+class ToCHWImage:
+    """Reorder the dimensions of the image from HWC to CHW."""
+
+    def __call__(self, imgs):
+        """apply"""
+        return [img.transpose((2, 0, 1)) for img in imgs]
+
+
+class ToBatch:
+    def __call__(self, imgs):
+        return [np.stack(imgs, axis=0).astype(dtype=np.float32, copy=False)]

+ 15 - 0
paddlex/inference/models_new/image_classification/__init__.py

@@ -0,0 +1,15 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from .predictor import ClasPredictor

+ 163 - 0
paddlex/inference/models_new/image_classification/predictor.py

@@ -0,0 +1,163 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import Any, Union, Dict, List, Tuple
+import numpy as np
+
+from ....utils.func_register import FuncRegister
+from ....modules.image_classification.model_list import MODELS
+from ...common.batch_sampler import ImageBatchSampler
+from ...common.reader import ReadImage
+from ..common import (
+    Resize,
+    ResizeByShort,
+    Normalize,
+    ToCHWImage,
+    ToBatch,
+    StaticInfer,
+)
+from ..base import BasicPredictor
+from .processors import Crop, Topk
+from .result import TopkResult
+
+
+class ClasPredictor(BasicPredictor):
+    """ClasPredictor that inherits from BasicPredictor."""
+
+    entities = MODELS
+
+    _FUNC_MAP = {}
+    register = FuncRegister(_FUNC_MAP)
+
+    def __init__(self, *args: List, **kwargs: Dict) -> None:
+        """Initializes ClasPredictor.
+
+        Args:
+            *args: Arbitrary positional arguments passed to the superclass.
+            **kwargs: Arbitrary keyword arguments passed to the superclass.
+        """
+        super().__init__(*args, **kwargs)
+        self.preprocessors, self.infer, self.postprocessors = self._build()
+
+    def _build_batch_sampler(self) -> ImageBatchSampler:
+        """Builds and returns an ImageBatchSampler instance.
+
+        Returns:
+            ImageBatchSampler: An instance of ImageBatchSampler.
+        """
+        return ImageBatchSampler()
+
+    def _get_result_class(self) -> type:
+        """Returns the result class, TopkResult.
+
+        Returns:
+            type: The TopkResult class.
+        """
+        return TopkResult
+
+    def _build(self) -> Tuple:
+        """Build the preprocessors, inference engine, and postprocessors based on the configuration.
+
+        Returns:
+            tuple: A tuple containing the preprocessors, inference engine, and postprocessors.
+        """
+        preprocessors = {"Read": ReadImage(format="RGB")}
+        for cfg in self.config["PreProcess"]["transform_ops"]:
+            tf_key = list(cfg.keys())[0]
+            func = self._FUNC_MAP[tf_key]
+            args = cfg.get(tf_key, {})
+            name, op = func(self, **args) if args else func(self)
+            preprocessors[name] = op
+        preprocessors["ToBatch"] = ToBatch()
+
+        infer = StaticInfer(
+            model_dir=self.model_dir,
+            model_prefix=self.MODEL_FILE_PREFIX,
+            option=self.pp_option,
+        )
+
+        postprocessors = {}
+        for key in self.config["PostProcess"]:
+            func = self._FUNC_MAP.get(key)
+            args = self.config["PostProcess"].get(key, {})
+            name, op = func(self, **args) if args else func(self)
+            postprocessors[name] = op
+        return preprocessors, infer, postprocessors
+
+    def process(self, batch_data: List[Union[str, np.ndarray]]) -> Dict[str, Any]:
+        """
+        Process a batch of data through the preprocessing, inference, and postprocessing.
+
+        Args:
+            batch_data (List[Union[str, np.ndarray], ...]): A batch of input data (e.g., image file paths).
+
+        Returns:
+            dict: A dictionary containing the input path, raw image, class IDs, scores, and label names for every instance of the batch. Keys include 'input_path', 'input_img', 'class_ids', 'scores', and 'label_names'.
+        """
+        batch_raw_imgs = self.preprocessors["Read"](imgs=batch_data)
+        batch_imgs = self.preprocessors["Resize"](imgs=batch_raw_imgs)
+        batch_imgs = self.preprocessors["Crop"](imgs=batch_imgs)
+        batch_imgs = self.preprocessors["Normalize"](imgs=batch_imgs)
+        batch_imgs = self.preprocessors["ToCHW"](imgs=batch_imgs)
+        x = self.preprocessors["ToBatch"](imgs=batch_imgs)
+        batch_preds = self.infer(x=x)
+        batch_class_ids, batch_scores, batch_label_names = self.postprocessors["Topk"](
+            batch_preds
+        )
+        return {
+            "input_path": batch_data,
+            "input_img": batch_raw_imgs,
+            "class_ids": batch_class_ids,
+            "scores": batch_scores,
+            "label_names": batch_label_names,
+        }
+
+    @register("ResizeImage")
+    # TODO(gaotingquan): backend & interpolation
+    def build_resize(
+        self, resize_short=None, size=None, backend="cv2", interpolation="LINEAR"
+    ):
+        assert resize_short or size
+        if resize_short:
+            op = ResizeByShort(
+                target_short_edge=resize_short, size_divisor=None, interp="LINEAR"
+            )
+        else:
+            op = Resize(target_size=size)
+        return "Resize", op
+
+    @register("CropImage")
+    def build_crop(self, size=224):
+        return "Crop", Crop(crop_size=size)
+
+    @register("NormalizeImage")
+    def build_normalize(
+        self,
+        mean=[0.485, 0.456, 0.406],
+        std=[0.229, 0.224, 0.225],
+        scale=1 / 255,
+        order="",
+        channel_num=3,
+    ):
+        assert channel_num == 3
+        assert order == ""
+        return "Normalize", Normalize(scale=scale, mean=mean, std=std)
+
+    @register("ToCHWImage")
+    def build_to_chw(self):
+        return "ToCHW", ToCHWImage()
+
+    @register("Topk")
+    def build_topk(self, topk, label_list=None):
+        return "Topk", Topk(topk=int(topk), class_ids=label_list)

+ 89 - 0
paddlex/inference/models_new/image_classification/processors.py

@@ -0,0 +1,89 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import numpy as np
+
+from ....utils import logging
+from ..common.vision import F
+
+
+class Crop:
+    """Crop region from the image."""
+
+    def __init__(self, crop_size, mode="C"):
+        """
+        Initialize the instance.
+
+        Args:
+            crop_size (list|tuple|int): Width and height of the region to crop.
+            mode (str, optional): 'C' for cropping the center part and 'TL' for
+                cropping the top left part. Default: 'C'.
+        """
+        super().__init__()
+        if isinstance(crop_size, int):
+            crop_size = [crop_size, crop_size]
+        F.check_image_size(crop_size)
+
+        self.crop_size = crop_size
+
+        if mode not in ("C", "TL"):
+            raise ValueError("Unsupported interpolation method")
+        self.mode = mode
+
+    def __call__(self, imgs):
+        """apply"""
+        return [self.crop(img) for img in imgs]
+
+    def crop(self, img):
+        h, w = img.shape[:2]
+        cw, ch = self.crop_size
+        if self.mode == "C":
+            x1 = max(0, (w - cw) // 2)
+            y1 = max(0, (h - ch) // 2)
+        elif self.mode == "TL":
+            x1, y1 = 0, 0
+        x2 = min(w, x1 + cw)
+        y2 = min(h, y1 + ch)
+        coords = (x1, y1, x2, y2)
+        if coords == (0, 0, w, h):
+            raise ValueError(
+                f"Input image ({w}, {h}) smaller than the target size ({cw}, {ch})."
+            )
+        img = F.slice(img, coords=coords)
+        return img
+
+
+class Topk:
+    """Topk Transform"""
+
+    def __init__(self, topk, class_ids=None):
+        super().__init__()
+        assert isinstance(topk, (int,))
+        self.topk = topk
+        self.class_id_map = self._parse_class_id_map(class_ids)
+
+    def _parse_class_id_map(self, class_ids):
+        """parse class id to label map file"""
+        if class_ids is None:
+            return None
+        class_id_map = {id: str(lb) for id, lb in enumerate(class_ids)}
+        return class_id_map
+
+    def __call__(self, preds):
+        indexes = preds[0].argsort(axis=1)[:, -self.topk :][:, ::-1].astype("int32")
+        scores = [
+            np.around(pred[index], decimals=5) for pred, index in zip(preds[0], indexes)
+        ]
+        label_names = [[self.class_id_map[i] for i in index] for index in indexes]
+        return indexes, scores, label_names

+ 81 - 0
paddlex/inference/models_new/image_classification/result.py

@@ -0,0 +1,81 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import PIL
+from PIL import Image, ImageDraw, ImageFont
+import numpy as np
+
+from ....utils.fonts import PINGFANG_FONT_FILE_PATH
+from ...utils.color_map import get_colormap
+from ...common.result import BaseCVResult
+
+
+class TopkResult(BaseCVResult):
+
+    def _to_img(self):
+        """Draw label on image"""
+        labels = self.get("label_names", self["class_ids"])
+        label_str = f"{labels[0]} {self['scores'][0]:.2f}"
+
+        image = Image.fromarray(self._input_img)
+        image_size = image.size
+        draw = ImageDraw.Draw(image)
+        min_font_size = int(image_size[0] * 0.02)
+        max_font_size = int(image_size[0] * 0.05)
+        for font_size in range(max_font_size, min_font_size - 1, -1):
+            font = ImageFont.truetype(
+                PINGFANG_FONT_FILE_PATH, font_size, encoding="utf-8"
+            )
+            if tuple(map(int, PIL.__version__.split("."))) <= (10, 0, 0):
+                text_width_tmp, text_height_tmp = draw.textsize(label_str, font)
+            else:
+                left, top, right, bottom = draw.textbbox((0, 0), label_str, font)
+                text_width_tmp, text_height_tmp = right - left, bottom - top
+            if text_width_tmp <= image_size[0]:
+                break
+            else:
+                font = ImageFont.truetype(PINGFANG_FONT_FILE_PATH, min_font_size)
+        color_list = get_colormap(rgb=True)
+        color = tuple(color_list[0])
+        font_color = tuple(self._get_font_colormap(3))
+        if tuple(map(int, PIL.__version__.split("."))) <= (10, 0, 0):
+            text_width, text_height = draw.textsize(label_str, font)
+        else:
+            left, top, right, bottom = draw.textbbox((0, 0), label_str, font)
+            text_width, text_height = right - left, bottom - top
+
+        rect_left = 3
+        rect_top = 3
+        rect_right = rect_left + text_width + 3
+        rect_bottom = rect_top + text_height + 6
+
+        draw.rectangle([(rect_left, rect_top), (rect_right, rect_bottom)], fill=color)
+
+        text_x = rect_left + 3
+        text_y = rect_top
+        draw.text((text_x, text_y), label_str, fill=font_color, font=font)
+        return image
+
+    def _get_font_colormap(self, color_index):
+        """
+        Get font colormap
+        """
+        dark = np.array([0x14, 0x0E, 0x35])
+        light = np.array([0xFF, 0xFF, 0xFF])
+        light_indexs = [0, 3, 4, 8, 9, 13, 14, 18, 19]
+        if color_index in light_indexs:
+            return light.astype("int32")
+        else:
+            return dark.astype("int32")

+ 15 - 0
paddlex/inference/models_new/text_detection/__init__.py

@@ -0,0 +1,15 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from .predictor import TextDetPredictor

+ 135 - 0
paddlex/inference/models_new/text_detection/predictor.py

@@ -0,0 +1,135 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ....utils.func_register import FuncRegister
+from ....modules.text_detection.model_list import MODELS
+from ...common.batch_sampler import ImageBatchSampler
+from ...common.reader import ReadImage
+from ..common import (
+    Resize,
+    ResizeByShort,
+    Normalize,
+    ToCHWImage,
+    ToBatch,
+    StaticInfer,
+)
+from ..base import BasicPredictor
+from .processors import DetResizeForTest, NormalizeImage, DBPostProcess
+from .result import TextDetResult
+
+
+class TextDetPredictor(BasicPredictor):
+
+    entities = MODELS
+
+    _FUNC_MAP = {}
+    register = FuncRegister(_FUNC_MAP)
+
+    def __init__(self, *args, **kwargs):
+        super().__init__(*args, **kwargs)
+        self.pre_tfs, self.infer, self.post_op = self._build()
+
+    def _build_batch_sampler(self):
+        return ImageBatchSampler()
+
+    def _get_result_class(self):
+        return TextDetResult
+
+    def _build(self):
+        pre_tfs = {"Read": ReadImage(format="RGB")}
+
+        for cfg in self.config["PreProcess"]["transform_ops"]:
+            tf_key = list(cfg.keys())[0]
+            func = self._FUNC_MAP[tf_key]
+            args = cfg.get(tf_key, {})
+            name, op = func(self, **args) if args else func(self)
+            if op:
+                pre_tfs[name] = op
+        pre_tfs["ToBatch"] = ToBatch()
+
+        infer = StaticInfer(
+            model_dir=self.model_dir,
+            model_prefix=self.MODEL_FILE_PREFIX,
+            option=self.pp_option,
+        )
+
+        post_op = self.build_postprocess(**self.config["PostProcess"])
+        return pre_tfs, infer, post_op
+
+    def process(self, batch_data):
+        batch_raw_imgs = self.pre_tfs["Read"](imgs=batch_data)
+        batch_imgs, batch_shapes = self.pre_tfs["Resize"](imgs=batch_raw_imgs)
+        batch_imgs = self.pre_tfs["Normalize"](imgs=batch_imgs)
+        batch_imgs = self.pre_tfs["ToCHW"](imgs=batch_imgs)
+        x = self.pre_tfs["ToBatch"](imgs=batch_imgs)
+        batch_preds = self.infer(x=x)
+        polys, scores = self.post_op(batch_preds, batch_shapes)
+        return {
+            "input_path": batch_data,
+            "input_img": batch_raw_imgs,
+            "dt_polys": polys,
+            "dt_scores": scores,
+        }
+
+    @register("DecodeImage")
+    def build_readimg(self, channel_first, img_mode):
+        assert channel_first == False
+        return "Read", ReadImage(format=img_mode)
+
+    @register("DetResizeForTest")
+    def build_resize(self, **kwargs):
+        # TODO: align to PaddleOCR
+        assert self.model_name in ("PP-OCRv4_server_det", "PP-OCRv4_mobile_det")
+        resize_long = kwargs.get("resize_long", 960)
+        return "Resize", DetResizeForTest(limit_side_len=resize_long, limit_type="max")
+
+    @register("NormalizeImage")
+    def build_normalize(
+        self,
+        mean=[0.485, 0.456, 0.406],
+        std=[0.229, 0.224, 0.225],
+        scale=1 / 255,
+        order="",
+        channel_num=3,
+    ):
+        return "Normalize", NormalizeImage(
+            mean=mean, std=std, scale=scale, order=order, channel_num=channel_num
+        )
+
+    @register("ToCHWImage")
+    def build_to_chw(self):
+        return "ToCHW", ToCHWImage()
+
+    def build_postprocess(self, **kwargs):
+        if kwargs.get("name") == "DBPostProcess":
+            return DBPostProcess(
+                thresh=kwargs.get("thresh", 0.3),
+                box_thresh=kwargs.get("box_thresh", 0.7),
+                max_candidates=kwargs.get("max_candidates", 1000),
+                unclip_ratio=kwargs.get("unclip_ratio", 2.0),
+                use_dilation=kwargs.get("use_dilation", False),
+                score_mode=kwargs.get("score_mode", "fast"),
+                box_type=kwargs.get("box_type", "quad"),
+            )
+
+        else:
+            raise Exception()
+
+    @register("DetLabelEncode")
+    def foo(self, *args, **kwargs):
+        return None, None
+
+    @register("KeepKeys")
+    def foo(self, *args, **kwargs):
+        return None, None

+ 415 - 0
paddlex/inference/models_new/text_detection/processors.py

@@ -0,0 +1,415 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import os
+import sys
+import cv2
+import copy
+import math
+import pyclipper
+import numpy as np
+from numpy.linalg import norm
+from PIL import Image
+from shapely.geometry import Polygon
+
+from ...utils.io import ImageReader
+from ....utils import logging
+
+
+class DetResizeForTest:
+    """DetResizeForTest"""
+
+    def __init__(self, **kwargs):
+        super().__init__()
+        self.resize_type = 0
+        self.keep_ratio = False
+        if "image_shape" in kwargs:
+            self.image_shape = kwargs["image_shape"]
+            self.resize_type = 1
+            if "keep_ratio" in kwargs:
+                self.keep_ratio = kwargs["keep_ratio"]
+        elif "limit_side_len" in kwargs:
+            self.limit_side_len = kwargs["limit_side_len"]
+            self.limit_type = kwargs.get("limit_type", "min")
+        elif "resize_long" in kwargs:
+            self.resize_type = 2
+            self.resize_long = kwargs.get("resize_long", 960)
+        else:
+            self.limit_side_len = 736
+            self.limit_type = "min"
+
+    def __call__(self, imgs):
+        """apply"""
+        resize_imgs, img_shapes = [], []
+        for ori_img in imgs:
+            img, shape = self.resize(ori_img)
+            resize_imgs.append(img)
+            img_shapes.append(shape)
+        return resize_imgs, img_shapes
+
+    def resize(self, img):
+        src_h, src_w, _ = img.shape
+        if sum([src_h, src_w]) < 64:
+            img = self.image_padding(img)
+
+        if self.resize_type == 0:
+            # img, shape = self.resize_image_type0(img)
+            img, [ratio_h, ratio_w] = self.resize_image_type0(img)
+        elif self.resize_type == 2:
+            img, [ratio_h, ratio_w] = self.resize_image_type2(img)
+        else:
+            # img, shape = self.resize_image_type1(img)
+            img, [ratio_h, ratio_w] = self.resize_image_type1(img)
+        return img, np.array([src_h, src_w, ratio_h, ratio_w])
+
+    def image_padding(self, im, value=0):
+        """padding image"""
+        h, w, c = im.shape
+        im_pad = np.zeros((max(32, h), max(32, w), c), np.uint8) + value
+        im_pad[:h, :w, :] = im
+        return im_pad
+
+    def resize_image_type1(self, img):
+        """resize the image"""
+        resize_h, resize_w = self.image_shape
+        ori_h, ori_w = img.shape[:2]  # (h, w, c)
+        if self.keep_ratio is True:
+            resize_w = ori_w * resize_h / ori_h
+            N = math.ceil(resize_w / 32)
+            resize_w = N * 32
+        ratio_h = float(resize_h) / ori_h
+        ratio_w = float(resize_w) / ori_w
+        img = cv2.resize(img, (int(resize_w), int(resize_h)))
+        # return img, np.array([ori_h, ori_w])
+        return img, [ratio_h, ratio_w]
+
+    def resize_image_type0(self, img):
+        """
+        resize image to a size multiple of 32 which is required by the network
+        args:
+            img(array): array with shape [h, w, c]
+        return(tuple):
+            img, (ratio_h, ratio_w)
+        """
+        limit_side_len = self.limit_side_len
+        h, w, c = img.shape
+
+        # limit the max side
+        if self.limit_type == "max":
+            if max(h, w) > limit_side_len:
+                if h > w:
+                    ratio = float(limit_side_len) / h
+                else:
+                    ratio = float(limit_side_len) / w
+            else:
+                ratio = 1.0
+        elif self.limit_type == "min":
+            if min(h, w) < limit_side_len:
+                if h < w:
+                    ratio = float(limit_side_len) / h
+                else:
+                    ratio = float(limit_side_len) / w
+            else:
+                ratio = 1.0
+        elif self.limit_type == "resize_long":
+            ratio = float(limit_side_len) / max(h, w)
+        else:
+            raise Exception("not support limit type, image ")
+        resize_h = int(h * ratio)
+        resize_w = int(w * ratio)
+
+        resize_h = max(int(round(resize_h / 32) * 32), 32)
+        resize_w = max(int(round(resize_w / 32) * 32), 32)
+
+        try:
+            if int(resize_w) <= 0 or int(resize_h) <= 0:
+                return None, (None, None)
+            img = cv2.resize(img, (int(resize_w), int(resize_h)))
+        except:
+            logging.info(img.shape, resize_w, resize_h)
+            sys.exit(0)
+        ratio_h = resize_h / float(h)
+        ratio_w = resize_w / float(w)
+        return img, [ratio_h, ratio_w]
+
+    def resize_image_type2(self, img):
+        """resize image size"""
+        h, w, _ = img.shape
+
+        resize_w = w
+        resize_h = h
+
+        if resize_h > resize_w:
+            ratio = float(self.resize_long) / resize_h
+        else:
+            ratio = float(self.resize_long) / resize_w
+
+        resize_h = int(resize_h * ratio)
+        resize_w = int(resize_w * ratio)
+
+        max_stride = 128
+        resize_h = (resize_h + max_stride - 1) // max_stride * max_stride
+        resize_w = (resize_w + max_stride - 1) // max_stride * max_stride
+        img = cv2.resize(img, (int(resize_w), int(resize_h)))
+        ratio_h = resize_h / float(h)
+        ratio_w = resize_w / float(w)
+
+        return img, [ratio_h, ratio_w]
+
+
+class NormalizeImage:
+    """normalize image such as substract mean, divide std"""
+
+    def __init__(self, scale=None, mean=None, std=None, order="chw", **kwargs):
+        super().__init__()
+        if isinstance(scale, str):
+            scale = eval(scale)
+        self.scale = np.float32(scale if scale is not None else 1.0 / 255.0)
+        mean = mean if mean is not None else [0.485, 0.456, 0.406]
+        std = std if std is not None else [0.229, 0.224, 0.225]
+
+        shape = (3, 1, 1) if order == "chw" else (1, 1, 3)
+        self.mean = np.array(mean).reshape(shape).astype("float32")
+        self.std = np.array(std).reshape(shape).astype("float32")
+
+    def __call__(self, imgs):
+        """apply"""
+
+        def norm(img):
+            return (img.astype("float32") * self.scale - self.mean) / self.std
+
+        return [norm(img) for img in imgs]
+
+
+class DBPostProcess:
+    """
+    The post process for Differentiable Binarization (DB).
+    """
+
+    def __init__(
+        self,
+        thresh=0.3,
+        box_thresh=0.7,
+        max_candidates=1000,
+        unclip_ratio=2.0,
+        use_dilation=False,
+        score_mode="fast",
+        box_type="quad",
+        **kwargs
+    ):
+        super().__init__()
+        self.thresh = thresh
+        self.box_thresh = box_thresh
+        self.max_candidates = max_candidates
+        self.unclip_ratio = unclip_ratio
+        self.min_size = 3
+        self.score_mode = score_mode
+        self.box_type = box_type
+        assert score_mode in [
+            "slow",
+            "fast",
+        ], "Score mode must be in [slow, fast] but got: {}".format(score_mode)
+
+        self.dilation_kernel = None if not use_dilation else np.array([[1, 1], [1, 1]])
+
+    def polygons_from_bitmap(self, pred, _bitmap, dest_width, dest_height):
+        """_bitmap: single map with shape (1, H, W), whose values are binarized as {0, 1}"""
+
+        bitmap = _bitmap
+        height, width = bitmap.shape
+
+        boxes = []
+        scores = []
+
+        contours, _ = cv2.findContours(
+            (bitmap * 255).astype(np.uint8), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE
+        )
+
+        for contour in contours[: self.max_candidates]:
+            epsilon = 0.002 * cv2.arcLength(contour, True)
+            approx = cv2.approxPolyDP(contour, epsilon, True)
+            points = approx.reshape((-1, 2))
+            if points.shape[0] < 4:
+                continue
+
+            score = self.box_score_fast(pred, points.reshape(-1, 2))
+            if self.box_thresh > score:
+                continue
+
+            if points.shape[0] > 2:
+                box = self.unclip(points, self.unclip_ratio)
+                if len(box) > 1:
+                    continue
+            else:
+                continue
+            box = box.reshape(-1, 2)
+
+            if len(box) > 0:
+                _, sside = self.get_mini_boxes(box.reshape((-1, 1, 2)))
+                if sside < self.min_size + 2:
+                    continue
+            else:
+                continue
+
+            box = np.array(box)
+            box[:, 0] = np.clip(np.round(box[:, 0] / width * dest_width), 0, dest_width)
+            box[:, 1] = np.clip(
+                np.round(box[:, 1] / height * dest_height), 0, dest_height
+            )
+            boxes.append(box)
+            scores.append(score)
+        return boxes, scores
+
+    def boxes_from_bitmap(self, pred, _bitmap, dest_width, dest_height):
+        """_bitmap: single map with shape (1, H, W), whose values are binarized as {0, 1}"""
+
+        bitmap = _bitmap
+        height, width = bitmap.shape
+
+        outs = cv2.findContours(
+            (bitmap * 255).astype(np.uint8), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE
+        )
+        if len(outs) == 3:
+            img, contours, _ = outs[0], outs[1], outs[2]
+        elif len(outs) == 2:
+            contours, _ = outs[0], outs[1]
+
+        num_contours = min(len(contours), self.max_candidates)
+
+        boxes = []
+        scores = []
+        for index in range(num_contours):
+            contour = contours[index]
+            points, sside = self.get_mini_boxes(contour)
+            if sside < self.min_size:
+                continue
+            points = np.array(points)
+            if self.score_mode == "fast":
+                score = self.box_score_fast(pred, points.reshape(-1, 2))
+            else:
+                score = self.box_score_slow(pred, contour)
+            if self.box_thresh > score:
+                continue
+
+            box = self.unclip(points, self.unclip_ratio).reshape(-1, 1, 2)
+            box, sside = self.get_mini_boxes(box)
+            if sside < self.min_size + 2:
+                continue
+            box = np.array(box)
+
+            box[:, 0] = np.clip(np.round(box[:, 0] / width * dest_width), 0, dest_width)
+            box[:, 1] = np.clip(
+                np.round(box[:, 1] / height * dest_height), 0, dest_height
+            )
+            boxes.append(box.astype(np.int16))
+            scores.append(score)
+        return np.array(boxes, dtype=np.int16), scores
+
+    def unclip(self, box, unclip_ratio):
+        """unclip"""
+        poly = Polygon(box)
+        distance = poly.area * unclip_ratio / poly.length
+        offset = pyclipper.PyclipperOffset()
+        offset.AddPath(box, pyclipper.JT_ROUND, pyclipper.ET_CLOSEDPOLYGON)
+        try:
+            expanded = np.array(offset.Execute(distance))
+        except ValueError:
+            expanded = np.array(offset.Execute(distance)[0])
+        return expanded
+
+    def get_mini_boxes(self, contour):
+        """get mini boxes"""
+        bounding_box = cv2.minAreaRect(contour)
+        points = sorted(list(cv2.boxPoints(bounding_box)), key=lambda x: x[0])
+
+        index_1, index_2, index_3, index_4 = 0, 1, 2, 3
+        if points[1][1] > points[0][1]:
+            index_1 = 0
+            index_4 = 1
+        else:
+            index_1 = 1
+            index_4 = 0
+        if points[3][1] > points[2][1]:
+            index_2 = 2
+            index_3 = 3
+        else:
+            index_2 = 3
+            index_3 = 2
+
+        box = [points[index_1], points[index_2], points[index_3], points[index_4]]
+        return box, min(bounding_box[1])
+
+    def box_score_fast(self, bitmap, _box):
+        """box_score_fast: use bbox mean score as the mean score"""
+        h, w = bitmap.shape[:2]
+        box = _box.copy()
+        xmin = np.clip(np.floor(box[:, 0].min()).astype("int"), 0, w - 1)
+        xmax = np.clip(np.ceil(box[:, 0].max()).astype("int"), 0, w - 1)
+        ymin = np.clip(np.floor(box[:, 1].min()).astype("int"), 0, h - 1)
+        ymax = np.clip(np.ceil(box[:, 1].max()).astype("int"), 0, h - 1)
+
+        mask = np.zeros((ymax - ymin + 1, xmax - xmin + 1), dtype=np.uint8)
+        box[:, 0] = box[:, 0] - xmin
+        box[:, 1] = box[:, 1] - ymin
+        cv2.fillPoly(mask, box.reshape(1, -1, 2).astype(np.int32), 1)
+        return cv2.mean(bitmap[ymin : ymax + 1, xmin : xmax + 1], mask)[0]
+
+    def box_score_slow(self, bitmap, contour):
+        """box_score_slow: use polyon mean score as the mean score"""
+        h, w = bitmap.shape[:2]
+        contour = contour.copy()
+        contour = np.reshape(contour, (-1, 2))
+
+        xmin = np.clip(np.min(contour[:, 0]), 0, w - 1)
+        xmax = np.clip(np.max(contour[:, 0]), 0, w - 1)
+        ymin = np.clip(np.min(contour[:, 1]), 0, h - 1)
+        ymax = np.clip(np.max(contour[:, 1]), 0, h - 1)
+
+        mask = np.zeros((ymax - ymin + 1, xmax - xmin + 1), dtype=np.uint8)
+
+        contour[:, 0] = contour[:, 0] - xmin
+        contour[:, 1] = contour[:, 1] - ymin
+
+        cv2.fillPoly(mask, contour.reshape(1, -1, 2).astype(np.int32), 1)
+        return cv2.mean(bitmap[ymin : ymax + 1, xmin : xmax + 1], mask)[0]
+
+    def __call__(self, preds, img_shapes):
+        """apply"""
+        boxes, scores = [], []
+        for pred, img_shape in zip(preds[0], img_shapes):
+            box, score = self.process(pred, img_shape)
+            boxes.append(box)
+            scores.append(score)
+        return boxes, scores
+
+    def process(self, pred, img_shape):
+        pred = pred[0, :, :]
+        segmentation = pred > self.thresh
+
+        src_h, src_w, ratio_h, ratio_w = img_shape
+        if self.dilation_kernel is not None:
+            mask = cv2.dilate(
+                np.array(segmentation).astype(np.uint8),
+                self.dilation_kernel,
+            )
+        else:
+            mask = segmentation
+        if self.box_type == "poly":
+            boxes, scores = self.polygons_from_bitmap(pred, mask, src_w, src_h)
+        elif self.box_type == "quad":
+            boxes, scores = self.boxes_from_bitmap(pred, mask, src_w, src_h)
+        else:
+            raise ValueError("box_type can only be one of ['quad', 'poly']")
+        return boxes, scores

+ 33 - 0
paddlex/inference/models_new/text_detection/result.py

@@ -0,0 +1,33 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import numpy as np
+import cv2
+
+from ...common.result import BaseCVResult
+
+
+class TextDetResult(BaseCVResult):
+
+    def __init__(self, data):
+        super().__init__(data)
+
+    def _to_img(self):
+        """draw rectangle"""
+        boxes = self["dt_polys"]
+        image = self._input_img
+        for box in boxes:
+            box = np.reshape(np.array(box).astype(int), [-1, 1, 2]).astype(np.int64)
+            cv2.polylines(image, [box], True, (0, 0, 255), 2)
+        return image[:, :, ::-1]

+ 15 - 0
paddlex/inference/models_new/text_recognition/__init__.py

@@ -0,0 +1,15 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from .predictor import TextRecPredictor

+ 106 - 0
paddlex/inference/models_new/text_recognition/predictor.py

@@ -0,0 +1,106 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ....utils.func_register import FuncRegister
+from ....modules.text_recognition.model_list import MODELS
+from ...common.batch_sampler import ImageBatchSampler
+from ...common.reader import ReadImage
+from ..common import (
+    Resize,
+    ResizeByShort,
+    Normalize,
+    ToCHWImage,
+    ToBatch,
+    StaticInfer,
+)
+from ..base import BasicPredictor
+from .processors import OCRReisizeNormImg, CTCLabelDecode
+from .result import TextRecResult
+
+
+class TextRecPredictor(BasicPredictor):
+
+    entities = MODELS
+
+    _FUNC_MAP = {}
+    register = FuncRegister(_FUNC_MAP)
+
+    def __init__(self, *args, **kwargs):
+        super().__init__(*args, **kwargs)
+        self.pre_tfs, self.infer, self.post_op = self._build()
+
+    def _build_batch_sampler(self):
+        return ImageBatchSampler()
+
+    def _get_result_class(self):
+        return TextRecResult
+
+    def _build(self):
+        pre_tfs = {"Read": ReadImage(format="RGB")}
+        for cfg in self.config["PreProcess"]["transform_ops"]:
+            tf_key = list(cfg.keys())[0]
+            assert tf_key in self._FUNC_MAP
+            func = self._FUNC_MAP[tf_key]
+            args = cfg.get(tf_key, {})
+            name, op = func(self, **args) if args else func(self)
+            if op:
+                pre_tfs[name] = op
+        pre_tfs["ToBatch"] = ToBatch()
+
+        infer = StaticInfer(
+            model_dir=self.model_dir,
+            model_prefix=self.MODEL_FILE_PREFIX,
+            option=self.pp_option,
+        )
+
+        post_op = self.build_postprocess(**self.config["PostProcess"])
+        return pre_tfs, infer, post_op
+
+    def process(self, batch_data):
+        batch_raw_imgs = self.pre_tfs["Read"](imgs=batch_data)
+        batch_imgs = self.pre_tfs["ReisizeNorm"](imgs=batch_raw_imgs)
+        x = self.pre_tfs["ToBatch"](imgs=batch_imgs)
+        batch_preds = self.infer(x=x)
+        texts, scores = self.post_op(batch_preds)
+        return {
+            "input_path": batch_data,
+            "input_img": batch_raw_imgs,
+            "rec_text": texts,
+            "rec_score": scores,
+        }
+
+    @register("DecodeImage")
+    def build_readimg(self, channel_first, img_mode):
+        assert channel_first == False
+        return "Read", ReadImage(format=img_mode)
+
+    @register("RecResizeImg")
+    def build_resize(self, image_shape):
+        return "ReisizeNorm", OCRReisizeNormImg(rec_image_shape=image_shape)
+
+    def build_postprocess(self, **kwargs):
+        if kwargs.get("name") == "CTCLabelDecode":
+            return CTCLabelDecode(
+                character_list=kwargs.get("character_dict"),
+            )
+        else:
+            raise Exception()
+
+    @register("MultiLabelEncode")
+    def foo(self, *args, **kwargs):
+        return None, None
+
+    @register("KeepKeys")
+    def foo(self, *args, **kwargs):
+        return None, None

+ 186 - 0
paddlex/inference/models_new/text_recognition/processors.py

@@ -0,0 +1,186 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import os
+import os.path as osp
+
+import re
+import numpy as np
+from PIL import Image
+import cv2
+import math
+import json
+import tempfile
+from tokenizers import Tokenizer as TokenizerFast
+
+from ....utils import logging
+
+
+class OCRReisizeNormImg:
+    """for ocr image resize and normalization"""
+
+    def __init__(self, rec_image_shape=[3, 48, 320]):
+        super().__init__()
+        self.rec_image_shape = rec_image_shape
+
+    def resize_norm_img(self, img, max_wh_ratio):
+        """resize and normalize the img"""
+        imgC, imgH, imgW = self.rec_image_shape
+        assert imgC == img.shape[2]
+        imgW = int((imgH * max_wh_ratio))
+
+        h, w = img.shape[:2]
+        ratio = w / float(h)
+        if math.ceil(imgH * ratio) > imgW:
+            resized_w = imgW
+        else:
+            resized_w = int(math.ceil(imgH * ratio))
+        resized_image = cv2.resize(img, (resized_w, imgH))
+        resized_image = resized_image.astype("float32")
+        resized_image = resized_image.transpose((2, 0, 1)) / 255
+        resized_image -= 0.5
+        resized_image /= 0.5
+        padding_im = np.zeros((imgC, imgH, imgW), dtype=np.float32)
+        padding_im[:, :, 0:resized_w] = resized_image
+        return padding_im
+
+    def __call__(self, imgs):
+        """apply"""
+        return [self.resize(img) for img in imgs]
+
+    def resize(self, img):
+        imgC, imgH, imgW = self.rec_image_shape
+        max_wh_ratio = imgW / imgH
+        h, w = img.shape[:2]
+        wh_ratio = w * 1.0 / h
+        max_wh_ratio = max(max_wh_ratio, wh_ratio)
+        img = self.resize_norm_img(img, max_wh_ratio)
+        return img
+
+
+class BaseRecLabelDecode:
+    """Convert between text-label and text-index"""
+
+    def __init__(self, character_str=None, use_space_char=True):
+        super().__init__()
+        self.reverse = False
+        character_list = (
+            list(character_str)
+            if character_str is not None
+            else list("0123456789abcdefghijklmnopqrstuvwxyz")
+        )
+        if use_space_char:
+            character_list.append(" ")
+
+        character_list = self.add_special_char(character_list)
+        self.dict = {}
+        for i, char in enumerate(character_list):
+            self.dict[char] = i
+        self.character = character_list
+
+    def pred_reverse(self, pred):
+        """pred_reverse"""
+        pred_re = []
+        c_current = ""
+        for c in pred:
+            if not bool(re.search("[a-zA-Z0-9 :*./%+-]", c)):
+                if c_current != "":
+                    pred_re.append(c_current)
+                pred_re.append(c)
+                c_current = ""
+            else:
+                c_current += c
+        if c_current != "":
+            pred_re.append(c_current)
+
+        return "".join(pred_re[::-1])
+
+    def add_special_char(self, character_list):
+        """add_special_char"""
+        return character_list
+
+    def decode(self, text_index, text_prob=None, is_remove_duplicate=False):
+        """convert text-index into text-label."""
+        result_list = []
+        ignored_tokens = self.get_ignored_tokens()
+        batch_size = len(text_index)
+        for batch_idx in range(batch_size):
+            selection = np.ones(len(text_index[batch_idx]), dtype=bool)
+            if is_remove_duplicate:
+                selection[1:] = text_index[batch_idx][1:] != text_index[batch_idx][:-1]
+            for ignored_token in ignored_tokens:
+                selection &= text_index[batch_idx] != ignored_token
+
+            char_list = [
+                self.character[text_id] for text_id in text_index[batch_idx][selection]
+            ]
+            if text_prob is not None:
+                conf_list = text_prob[batch_idx][selection]
+            else:
+                conf_list = [1] * len(selection)
+            if len(conf_list) == 0:
+                conf_list = [0]
+
+            text = "".join(char_list)
+
+            if self.reverse:  # for arabic rec
+                text = self.pred_reverse(text)
+
+            result_list.append((text, np.mean(conf_list).tolist()))
+        return result_list
+
+    def get_ignored_tokens(self):
+        """get_ignored_tokens"""
+        return [0]  # for ctc blank
+
+    def __call__(self, pred):
+        """apply"""
+        preds = np.array(pred)
+        if isinstance(preds, tuple) or isinstance(preds, list):
+            preds = preds[-1]
+        preds_idx = preds.argmax(axis=-1)
+        preds_prob = preds.max(axis=-1)
+        text = self.decode(preds_idx, preds_prob, is_remove_duplicate=True)
+        texts = []
+        scores = []
+        for t in text:
+            texts.append(t[0])
+            scores.append(t[1])
+        return texts, scores
+
+
+class CTCLabelDecode(BaseRecLabelDecode):
+    """Convert between text-label and text-index"""
+
+    def __init__(self, character_list=None, use_space_char=True):
+        super().__init__(character_list, use_space_char=use_space_char)
+
+    def __call__(self, pred):
+        """apply"""
+        preds = np.array(pred[0])
+        preds_idx = preds.argmax(axis=-1)
+        preds_prob = preds.max(axis=-1)
+        text = self.decode(preds_idx, preds_prob, is_remove_duplicate=True)
+        texts = []
+        scores = []
+        for t in text:
+            texts.append(t[0])
+            scores.append(t[1])
+        return texts, scores
+
+    def add_special_char(self, character_list):
+        """add_special_char"""
+        character_list = ["blank"] + character_list
+        return character_list

+ 64 - 0
paddlex/inference/models_new/text_recognition/result.py

@@ -0,0 +1,64 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import PIL
+from PIL import Image, ImageDraw, ImageFont
+
+from ....utils.fonts import PINGFANG_FONT_FILE_PATH
+from ...common.result import BaseCVResult
+
+
+class TextRecResult(BaseCVResult):
+
+    def _to_img(self):
+        """Draw label on image"""
+        image = Image.fromarray(self._input_img)
+        rec_text = self["rec_text"]
+        rec_score = self["rec_score"]
+        image = image.convert("RGB")
+        image_width, image_height = image.size
+        text = f"{rec_text} ({rec_score})"
+        font = self.adjust_font_size(image_width, text, PINGFANG_FONT_FILE_PATH)
+        row_height = font.getbbox(text)[3]
+        new_image_height = image_height + int(row_height * 1.2)
+        new_image = Image.new("RGB", (image_width, new_image_height), (255, 255, 255))
+        new_image.paste(image, (0, 0))
+
+        draw = ImageDraw.Draw(new_image)
+        draw.text(
+            (0, image_height),
+            text,
+            fill=(0, 0, 0),
+            font=font,
+        )
+        return new_image
+
+    def adjust_font_size(self, image_width, text, font_path):
+        font_size = int(image_width * 0.06)
+        font = ImageFont.truetype(font_path, font_size)
+
+        if int(PIL.__version__.split(".")[0]) < 10:
+            text_width, _ = font.getsize(text)
+        else:
+            text_width, _ = font.getbbox(text)[2:]
+
+        while text_width > image_width:
+            font_size -= 1
+            font = ImageFont.truetype(font_path, font_size)
+            if int(PIL.__version__.split(".")[0]) < 10:
+                text_width, _ = font.getsize(text)
+            else:
+                text_width, _ = font.getbbox(text)[2:]
+
+        return font

+ 2 - 12
paddlex/inference/pipelines_new/components/common/crop_image_regions.py

@@ -95,24 +95,14 @@ class CropByPolys(BaseOperator):
             for bno in range(len(dt_boxes)):
                 tmp_box = copy.deepcopy(dt_boxes[bno])
                 img_crop = self.get_minarea_rect_crop(img, tmp_box)
-                output_list.append(
-                    {
-                        "img": img_crop,
-                        "img_size": [img_crop.shape[1], img_crop.shape[0]],
-                    }
-                )
+                output_list.append(img_crop)
         elif self.det_box_type == "poly":
             output_list = []
             dt_boxes = dt_polys
             for bno in range(len(dt_boxes)):
                 tmp_box = copy.deepcopy(dt_boxes[bno])
                 img_crop = self.get_poly_rect_crop(img.copy(), tmp_box)
-                output_list.append(
-                    {
-                        "img": img_crop,
-                        "img_size": [img_crop.shape[1], img_crop.shape[0]],
-                    }
-                )
+                output_list.append(img_crop)
         else:
             raise NotImplementedError
 

+ 12 - 28
paddlex/inference/pipelines_new/ocr/pipeline.py

@@ -12,16 +12,15 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from ..base import BasePipeline
 from typing import Any, Dict, Optional
-from ..components import SortQuadBoxes, SortPolyBoxes, CropByPolys
-from .result import OCRResult
-
-########## [TODO]后续需要更新路径
-from ...components.transforms import ReadImage
+import numpy as np
 
+from ...common.reader import ReadImage
+from ...common.batch_sampler import ImageBatchSampler
 from ...utils.pp_option import PaddlePredictorOption
-import numpy as np
+from ..base import BasePipeline
+from ..components import CropByPolys, SortQuadBoxes, SortPolyBoxes
+from .result import OCRResult
 
 
 class OCRPipeline(BasePipeline):
@@ -68,6 +67,7 @@ class OCRPipeline(BasePipeline):
         else:
             raise ValueError("Unsupported text type {}".format(self.text_type))
 
+        self.batch_sampler = ImageBatchSampler(batch_size=1)
         self.img_reader = ReadImage(format="BGR")
 
     def predict(
@@ -82,21 +82,10 @@ class OCRPipeline(BasePipeline):
         Returns:
             OCRResult: An iterable of OCRResult objects, each containing the predicted text and other relevant information.
         """
-        if not isinstance(input, list):
-            input_list = [input]
-        else:
-            input_list = input
-
-        img_id = 1
-        for input in input_list:
-            if isinstance(input, str):
-                image_array = next(self.img_reader(input))[0]["img"]
-            else:
-                image_array = input
 
-            assert len(image_array.shape) == 3
-
-            det_res = next(self.text_det_model(image_array))
+        for img_id, batch_data in enumerate(self.batch_sampler(input)):
+            raw_img = self.img_reader(batch_data)[0]
+            det_res = next(self.text_det_model(raw_img))
 
             dt_polys = det_res["dt_polys"]
             dt_scores = det_res["dt_scores"]
@@ -106,7 +95,7 @@ class OCRPipeline(BasePipeline):
             dt_polys = self._sort_boxes(dt_polys)
 
             single_img_res = {
-                "input_img": image_array,
+                "input_img": raw_img,
                 "dt_polys": dt_polys,
                 "img_id": img_id,
                 "text_type": self.text_type,
@@ -115,12 +104,7 @@ class OCRPipeline(BasePipeline):
             single_img_res["rec_text"] = []
             single_img_res["rec_score"] = []
             if len(dt_polys) > 0:
-                all_subs_of_img = list(self._crop_by_polys(image_array, dt_polys))
-
-                ########## [TODO] Update in the future
-                for sub_img in all_subs_of_img:
-                    sub_img["input"] = sub_img["img"]
-                ##########
+                all_subs_of_img = list(self._crop_by_polys(raw_img, dt_polys))
 
                 for rec_res in self.text_rec_model(all_subs_of_img):
                     single_img_res["rec_text"].append(rec_res["rec_text"])

+ 28 - 11
paddlex/inference/utils/benchmark.py

@@ -20,19 +20,23 @@ from pathlib import Path
 import numpy as np
 from prettytable import PrettyTable
 
-from ...utils.flags import INFER_BENCHMARK_OUTPUT
+from ...utils.flags import INFER_BENCHMARK, INFER_BENCHMARK_OUTPUT
+from ...utils.misc import Singleton
 from ...utils import logging
 
 
-class Benchmark:
-    def __init__(self, components):
-        self._components = components
+class Benchmark(metaclass=Singleton):
+    def __init__(self):
+        self._components = {}
         self._warmup_start = None
         self._warmup_elapse = None
         self._warmup_num = None
         self._e2e_tic = None
         self._e2e_elapse = None
 
+    def attach(self, component):
+        self._components[component.name] = component
+
     def start(self):
         self._warmup_start = time.time()
         self._reset()
@@ -51,13 +55,18 @@ class Benchmark:
         if cmps is None:
             return
         for name, cmp in cmps.items():
-            if cmp.sub_cmps is not None:
-                yield from self.iterate_cmp(cmp.sub_cmps)
+            if hasattr(cmp, "benchmark"):
+                yield from self.iterate_cmp(cmp.benchmark)
             yield name, cmp
 
     def gather(self, e2e_num):
         # lazy import for avoiding circular import
-        from ..components.paddle_predictor import BasePaddlePredictor
+        from ...utils.flags import NEW_PREDICTOR
+
+        if NEW_PREDICTOR:
+            from ..new_models.base import BasePaddlePredictor
+        else:
+            from ..models.common_components.paddle_predictor import BasePaddlePredictor
 
         detail = []
         summary = {"preprocess": 0, "inference": 0, "postprocess": 0}
@@ -65,7 +74,7 @@ class Benchmark:
         for name, cmp in self._components.items():
             if isinstance(cmp, BasePaddlePredictor):
                 # TODO(gaotingquan): show by hierarchy. Now dont show xxxPredictor benchmark info to ensure mutual exclusivity between components.
-                for name, sub_cmp in cmp.sub_cmps.items():
+                for name, sub_cmp in cmp.benchmark.items():
                     times = sub_cmp.timer.logs
                     counts = len(times)
                     avg = np.mean(times) * 1000
@@ -74,6 +83,8 @@ class Benchmark:
                     summary["inference"] += total
                 op_tag = "postprocess"
             else:
+                # TODO(gaotingquan): support sub_cmps for others
+                # if hasattr(cmp, "benchmark"):
                 times = cmp.timer.logs
                 counts = len(times)
                 avg = np.mean(times) * 1000
@@ -158,20 +169,23 @@ class Benchmark:
             save_dir = Path(INFER_BENCHMARK_OUTPUT)
             save_dir.mkdir(parents=True, exist_ok=True)
             csv_data = [detail_head, *detail]
-            # csv_data.extend(detail)
             with open(Path(save_dir) / "detail.csv", "w", newline="") as file:
                 writer = csv.writer(file)
                 writer.writerows(csv_data)
 
             csv_data = [summary_head, *summary]
-            # csv_data.extend(summary)
             with open(Path(save_dir) / "summary.csv", "w", newline="") as file:
                 writer = csv.writer(file)
                 writer.writerows(csv_data)
 
 
 class Timer:
-    def __init__(self):
+    def __init__(self, component):
+        from ..new_models.base import BaseComponent
+
+        assert isinstance(component, BaseComponent)
+        benchmark.attach(component)
+        component.apply = self.watch_func(component.apply)
         self._tic = None
         self._elapses = []
 
@@ -212,3 +226,6 @@ class Timer:
     @property
     def logs(self):
         return self._elapses
+
+
+benchmark = Benchmark() if INFER_BENCHMARK else None

+ 2 - 0
paddlex/utils/flags.py

@@ -27,6 +27,7 @@ __all__ = [
     "INFER_BENCHMARK_DATA_SIZE",
     "FLAGS_json_format_model",
     "USE_NEW_INFERENCE",
+    "NEW_PREDICTOR",
 ]
 
 
@@ -48,6 +49,7 @@ CHECK_OPTS = get_flag_from_env_var("PADDLE_PDX_CHECK_OPTS", False)
 EAGER_INITIALIZATION = get_flag_from_env_var("PADDLE_PDX_EAGER_INIT", True)
 FLAGS_json_format_model = get_flag_from_env_var("FLAGS_json_format_model", None)
 USE_NEW_INFERENCE = get_flag_from_env_var("USE_NEW_INFERENCE", False)
+NEW_PREDICTOR = get_flag_from_env_var("PADDLE_PDX_NEW_PREDICTOR", False)
 
 # Inference Benchmark
 INFER_BENCHMARK = get_flag_from_env_var("PADDLE_PDX_INFER_BENCHMARK", None)

+ 1 - 2
paddlex/utils/logging.py

@@ -54,8 +54,7 @@ def debug(msg, *args, **kwargs):
         else:
             caller_info = f"{caller_func_name}"
         msg = f"【{caller_info}】{msg}"
-
-    _logger.debug(msg, *args, **kwargs)
+        _logger.debug(msg, *args, **kwargs)
 
 
 def info(msg, *args, **kwargs):