gaotingquan 1 anno fa
parent
commit
17a385b3cd

+ 0 - 1
paddlex/inference/models/base/base_predictor.py

@@ -20,7 +20,6 @@ from abc import abstractmethod
 from ...components.base import BaseComponent
 from ...components.base import BaseComponent
 from ...utils.pp_option import PaddlePredictorOption
 from ...utils.pp_option import PaddlePredictorOption
 from ...utils.process_hook import generatorable_method
 from ...utils.process_hook import generatorable_method
-from ..utils.predict_set import DeviceSetMixin, PPOptionSetMixin, BatchSizeSetMixin
 
 
 
 
 class BasePredictor(BaseComponent):
 class BasePredictor(BaseComponent):

+ 8 - 21
paddlex/inference/models/base/basic_predictor.py

@@ -20,15 +20,11 @@ from ....utils import logging
 from ...components.base import BaseComponent, ComponentsEngine
 from ...components.base import BaseComponent, ComponentsEngine
 from ...utils.pp_option import PaddlePredictorOption
 from ...utils.pp_option import PaddlePredictorOption
 from ...utils.process_hook import generatorable_method
 from ...utils.process_hook import generatorable_method
-from ..utils.predict_set import DeviceSetMixin, PPOptionSetMixin, BatchSizeSetMixin
 from .base_predictor import BasePredictor
 from .base_predictor import BasePredictor
 
 
 
 
 class BasicPredictor(
 class BasicPredictor(
     BasePredictor,
     BasePredictor,
-    DeviceSetMixin,
-    PPOptionSetMixin,
-    BatchSizeSetMixin,
     metaclass=AutoRegisterABCMetaClass,
     metaclass=AutoRegisterABCMetaClass,
 ):
 ):
 
 
@@ -40,7 +36,7 @@ class BasicPredictor(
             pp_option = PaddlePredictorOption(model_name=self.model_name)
             pp_option = PaddlePredictorOption(model_name=self.model_name)
         if device:
         if device:
             pp_option.device = device
             pp_option.device = device
-        self._pp_option = pp_option
+        self.pp_option = pp_option
 
 
         self.components = {}
         self.components = {}
         self._build_components()
         self._build_components()
@@ -73,27 +69,18 @@ class BasicPredictor(
             ), f"The key ({key}) has been used: {self.components}!"
             ), f"The key ({key}) has been used: {self.components}!"
             self.components[key] = cmp
             self.components[key] = cmp
 
 
-    def set_predictor(self, **kwargs):
-        for k in kwargs:
-            if self._has_setter(k):
-                setattr(self, k, kwargs[k])
-            else:
-                raise Exception(
-                    f"The arg({k}) is not supported to specify in predict() func! Only supports: {self._get_settable_attributes()}"
-                )
+    def set_predictor(self, batch_size=None, device=None, pp_option=None):
+        if batch_size:
+            self.components["ReadCmp"].batch_size = batch_size
+        if device:
+            self.pp_option.device = device
+        if pp_option:
+            self.pp_option = pp_option
 
 
     def _has_setter(self, attr):
     def _has_setter(self, attr):
         prop = getattr(self.__class__, attr, None)
         prop = getattr(self.__class__, attr, None)
         return isinstance(prop, property) and prop.fset is not None
         return isinstance(prop, property) and prop.fset is not None
 
 
-    @classmethod
-    def _get_settable_attributes(cls):
-        return [
-            name
-            for name, obj in inspect.getmembers(cls, lambda o: isinstance(o, property))
-            if obj.fset is not None
-        ]
-
     @abstractmethod
     @abstractmethod
     def _build_components(self):
     def _build_components(self):
         raise NotImplementedError
         raise NotImplementedError

+ 0 - 13
paddlex/inference/models/utils/__init__.py

@@ -1,13 +0,0 @@
-# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.

+ 0 - 43
paddlex/inference/models/utils/predict_set.py

@@ -1,43 +0,0 @@
-# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-class BatchSizeSetMixin:
-    @property
-    def batch_size(self):
-        return self.components["ReadCmp"].batch_size
-
-    @batch_size.setter
-    def batch_size(self, batch_size):
-        self.components["ReadCmp"].batch_size = batch_size
-
-
-class DeviceSetMixin:
-    @property
-    def device(self):
-        return self._pp_option.device
-
-    @device.setter
-    def device(self, device):
-        self._pp_option.device = device
-
-
-class PPOptionSetMixin:
-    @property
-    def pp_option(self):
-        return self._pp_option
-
-    @pp_option.setter
-    def pp_option(self, pp_option):
-        self._pp_option = pp_option

+ 4 - 6
paddlex/inference/pipelines/__init__.py

@@ -71,20 +71,18 @@ def create_pipeline_from_config(
         pipeline_setting.pop("hpi_params", None)
         pipeline_setting.pop("hpi_params", None)
     elif "hpi_params" in pipeline_setting:
     elif "hpi_params" in pipeline_setting:
         predictor_kwargs["hpi_params"] = pipeline_setting.pop("hpi_params")
         predictor_kwargs["hpi_params"] = pipeline_setting.pop("hpi_params")
-    if device is not None:
-        predictor_kwargs["device"] = device
-        pipeline_setting.pop("device", None)
-    elif "device" in pipeline_setting:
-        predictor_kwargs["device"] = pipeline_setting.pop("device")
+
     if pp_option is not None:
     if pp_option is not None:
         predictor_kwargs["pp_option"] = pp_option
         predictor_kwargs["pp_option"] = pp_option
         pipeline_setting.pop("pp_option", None)
         pipeline_setting.pop("pp_option", None)
     elif "pp_option" in pipeline_setting:
     elif "pp_option" in pipeline_setting:
         predictor_kwargs["pp_option"] = pipeline_setting.pop("pp_option")
         predictor_kwargs["pp_option"] = pipeline_setting.pop("pp_option")
 
 
+    device = device if device else pipeline_setting.pop("device", None)
+
     pipeline_setting.update(kwargs)
     pipeline_setting.update(kwargs)
     pipeline = BasePipeline.get(pipeline_name)(
     pipeline = BasePipeline.get(pipeline_name)(
-        predictor_kwargs=predictor_kwargs, *args, **pipeline_setting
+        device=device, predictor_kwargs=predictor_kwargs, *args, **pipeline_setting
     )
     )
 
 
     return pipeline
     return pipeline

+ 11 - 3
paddlex/inference/pipelines/base.py

@@ -12,7 +12,7 @@
 # See the License for the specific language governing permissions and
 # See the License for the specific language governing permissions and
 # limitations under the License.
 # limitations under the License.
 
 
-from abc import ABC
+from abc import ABC, abstractmethod
 from typing import Any, Dict, Optional
 from typing import Any, Dict, Optional
 
 
 from ...utils.subclass_register import AutoRegisterABCMetaClass
 from ...utils.subclass_register import AutoRegisterABCMetaClass
@@ -28,6 +28,7 @@ class BasePipeline(ABC, metaclass=AutoRegisterABCMetaClass):
         super().__init__()
         super().__init__()
         self._predictor_kwargs = {} if predictor_kwargs is None else predictor_kwargs
         self._predictor_kwargs = {} if predictor_kwargs is None else predictor_kwargs
 
 
+    @abstractmethod
     def set_predictor():
     def set_predictor():
         raise NotImplementedError(
         raise NotImplementedError(
             "The method `set_predictor` has not been implemented yet."
             "The method `set_predictor` has not been implemented yet."
@@ -37,5 +38,12 @@ class BasePipeline(ABC, metaclass=AutoRegisterABCMetaClass):
     def __call__(self, *args, **kwargs):
     def __call__(self, *args, **kwargs):
         yield from self.predict(*args, **kwargs)
         yield from self.predict(*args, **kwargs)
 
 
-    def _create_model(self, *args, **kwargs):
-        return create_predictor(*args, **kwargs, **self._predictor_kwargs)
+    def _create(self, model=None, pipeline=None, *args, **kwargs):
+        if model:
+            return create_predictor(
+                model=model, *args, **kwargs, **self._predictor_kwargs
+            )
+        elif pipeline:
+            return pipeline(*args, **kwargs, predictor_kwargs=self._predictor_kwargs)
+        else:
+            raise Exception()

+ 17 - 13
paddlex/inference/pipelines/formula_recognition.py

@@ -30,28 +30,36 @@ class FormulaRecognitionPipeline(BasePipeline):
         formula_rec_model,
         formula_rec_model,
         layout_batch_size=1,
         layout_batch_size=1,
         formula_rec_batch_size=1,
         formula_rec_batch_size=1,
+        device=None,
         predictor_kwargs=None,
         predictor_kwargs=None,
     ):
     ):
         super().__init__(predictor_kwargs=predictor_kwargs)
         super().__init__(predictor_kwargs=predictor_kwargs)
         self._build_predictor(layout_model, formula_rec_model)
         self._build_predictor(layout_model, formula_rec_model)
-        self.set_predictor(layout_batch_size, formula_rec_batch_size)
+        self.set_predictor(
+            layout_batch_size=layout_batch_size,
+            formula_rec_batch_size=formula_rec_batch_size,
+            device=device,
+        )
 
 
     def _build_predictor(self, layout_model, formula_rec_model):
     def _build_predictor(self, layout_model, formula_rec_model):
-        self.layout_predictor = self._create_model(layout_model)
-        self.formula_predictor = self._create_model(formula_rec_model)
+        self.layout_predictor = self._create(model=layout_model)
+        self.formula_predictor = self._create(model=formula_rec_model)
         self._crop_by_boxes = CropByBoxes()
         self._crop_by_boxes = CropByBoxes()
 
 
-    def set_predictor(self, layout_batch_size=None, formula_rec_batch_size=None):
+    def set_predictor(
+        self, layout_batch_size=None, formula_rec_batch_size=None, device=None
+    ):
         if layout_batch_size:
         if layout_batch_size:
             self.layout_predictor.set_predictor(batch_size=layout_batch_size)
             self.layout_predictor.set_predictor(batch_size=layout_batch_size)
         if formula_rec_batch_size:
         if formula_rec_batch_size:
             self.formula_predictor.set_predictor(batch_size=formula_rec_batch_size)
             self.formula_predictor.set_predictor(batch_size=formula_rec_batch_size)
+        if device:
+            self.layout_predictor.set_predictor(device=device)
+            self.formula_predictor.set_predictor(device=device)
 
 
     def predict(self, x, **kwargs):
     def predict(self, x, **kwargs):
-        device = kwargs.get("device", None)
-        for layout_pred in self.layout_predictor(
-            x, batch_size=kwargs.get("layout_batch_size", 1), device=device
-        ):
+        self.set_predictor(**kwargs)
+        for layout_pred in self.layout_predictor(x):
             single_img_res = {
             single_img_res = {
                 "input_path": "",
                 "input_path": "",
                 "layout_result": {},
                 "layout_result": {},
@@ -78,11 +86,7 @@ class FormulaRecognitionPipeline(BasePipeline):
                         all_subs_of_formula_img.append(sub["img"])
                         all_subs_of_formula_img.append(sub["img"])
                         single_img_res["dt_polys"].append(poly)
                         single_img_res["dt_polys"].append(poly)
                 if len(all_subs_of_formula_img) > 0:
                 if len(all_subs_of_formula_img) > 0:
-                    for formula_res in self.formula_predictor(
-                        all_subs_of_formula_img,
-                        batch_size=kwargs.get("formula_rec_batch_size", 1),
-                        device=device,
-                    ):
+                    for formula_res in self.formula_predictor(all_subs_of_formula_img):
                         single_img_res["rec_formula"].append(
                         single_img_res["rec_formula"].append(
                             str(formula_res["rec_text"])
                             str(formula_res["rec_text"])
                         )
                         )

+ 17 - 13
paddlex/inference/pipelines/ocr.py

@@ -29,15 +29,20 @@ class OCRPipeline(BasePipeline):
         text_rec_model,
         text_rec_model,
         text_det_batch_size=1,
         text_det_batch_size=1,
         text_rec_batch_size=1,
         text_rec_batch_size=1,
+        device=None,
         predictor_kwargs=None,
         predictor_kwargs=None,
     ):
     ):
         super().__init__(predictor_kwargs=predictor_kwargs)
         super().__init__(predictor_kwargs=predictor_kwargs)
         self._build_predictor(text_det_model, text_rec_model)
         self._build_predictor(text_det_model, text_rec_model)
-        self.set_predictor(text_det_batch_size, text_rec_batch_size)
+        self.set_predictor(
+            text_det_batch_size=text_det_batch_size,
+            text_rec_batch_size=text_rec_batch_size,
+            device=device,
+        )
 
 
     def _build_predictor(self, text_det_model, text_rec_model):
     def _build_predictor(self, text_det_model, text_rec_model):
-        self.text_det_model = self._create_model(text_det_model)
-        self.text_rec_model = self._create_model(text_rec_model)
+        self.text_det_model = self._create(model=text_det_model)
+        self.text_rec_model = self._create(model=text_rec_model)
         self.is_curve = self.text_det_model.model_name in [
         self.is_curve = self.text_det_model.model_name in [
             "PP-OCRv4_mobile_seal_det",
             "PP-OCRv4_mobile_seal_det",
             "PP-OCRv4_server_seal_det",
             "PP-OCRv4_server_seal_det",
@@ -47,19 +52,22 @@ class OCRPipeline(BasePipeline):
             det_box_type="poly" if self.is_curve else "quad"
             det_box_type="poly" if self.is_curve else "quad"
         )
         )
 
 
-    def set_predictor(self, text_det_batch_size=None, text_rec_batch_size=None):
+    def set_predictor(
+        self, text_det_batch_size=None, text_rec_batch_size=None, device=None
+    ):
         if text_det_batch_size and text_det_batch_size > 1:
         if text_det_batch_size and text_det_batch_size > 1:
             logging.warning(
             logging.warning(
                 f"text det model only support batch_size=1 now,the setting of text_det_batch_size={text_det_batch_size} will not using! "
                 f"text det model only support batch_size=1 now,the setting of text_det_batch_size={text_det_batch_size} will not using! "
             )
             )
         if text_rec_batch_size:
         if text_rec_batch_size:
             self.text_rec_model.set_predictor(batch_size=text_rec_batch_size)
             self.text_rec_model.set_predictor(batch_size=text_rec_batch_size)
+        if device:
+            self.text_rec_model.set_predictor(device=device)
+            self.text_det_model.set_predictor(device=device)
 
 
     def predict(self, input, **kwargs):
     def predict(self, input, **kwargs):
-        device = kwargs.get("device", None)
-        for det_res in self.text_det_model(
-            input, batch_size=kwargs.get("det_batch_size", 1), device=device
-        ):
+        self.set_predictor(**kwargs)
+        for det_res in self.text_det_model(input):
             single_img_res = (
             single_img_res = (
                 det_res if self.is_curve else next(self._sort_boxes(det_res))
                 det_res if self.is_curve else next(self._sort_boxes(det_res))
             )
             )
@@ -67,11 +75,7 @@ class OCRPipeline(BasePipeline):
             single_img_res["rec_score"] = []
             single_img_res["rec_score"] = []
             if len(single_img_res["dt_polys"]) > 0:
             if len(single_img_res["dt_polys"]) > 0:
                 all_subs_of_img = list(self._crop_by_polys(single_img_res))
                 all_subs_of_img = list(self._crop_by_polys(single_img_res))
-                for rec_res in self.text_rec_model(
-                    all_subs_of_img,
-                    batch_size=kwargs.get("rec_batch_size", 1),
-                    device=device,
-                ):
+                for rec_res in self.text_rec_model(all_subs_of_img):
                     single_img_res["rec_text"].append(rec_res["rec_text"])
                     single_img_res["rec_text"].append(rec_res["rec_text"])
                     single_img_res["rec_score"].append(rec_res["rec_score"])
                     single_img_res["rec_score"].append(rec_res["rec_score"])
             yield OCRResult(single_img_res)
             yield OCRResult(single_img_res)

+ 103 - 98
paddlex/inference/pipelines/ppchatocrv3/ppchatocrv3.py

@@ -24,7 +24,7 @@ from ....utils import logging
 from ...results import *
 from ...results import *
 from ...components.llm import ErnieBot
 from ...components.llm import ErnieBot
 from ...utils.io import ImageReader, PDFReader
 from ...utils.io import ImageReader, PDFReader
-from ..table_recognition import TableRecPipeline
+from ..table_recognition import _TableRecPipeline
 from ...components.llm import create_llm_api, ErnieBot
 from ...components.llm import create_llm_api, ErnieBot
 from ....utils.file_interface import read_yaml_file
 from ....utils.file_interface import read_yaml_file
 from ..table_recognition.utils import convert_4point2rect, get_ori_coordinate_for_table
 from ..table_recognition.utils import convert_4point2rect, get_ori_coordinate_for_table
@@ -32,7 +32,7 @@ from ..table_recognition.utils import convert_4point2rect, get_ori_coordinate_fo
 PROMPT_FILE = os.path.join(os.path.dirname(__file__), "ch_prompt.yaml")
 PROMPT_FILE = os.path.join(os.path.dirname(__file__), "ch_prompt.yaml")
 
 
 
 
-class PPChatOCRPipeline(TableRecPipeline):
+class PPChatOCRPipeline(_TableRecPipeline):
     """PP-ChatOCRv3 Pileline"""
     """PP-ChatOCRv3 Pileline"""
 
 
     entities = "PP-ChatOCRv3-doc"
     entities = "PP-ChatOCRv3-doc"
@@ -54,51 +54,38 @@ class PPChatOCRPipeline(TableRecPipeline):
         text_det_batch_size=1,
         text_det_batch_size=1,
         text_rec_batch_size=1,
         text_rec_batch_size=1,
         table_batch_size=1,
         table_batch_size=1,
-        uvdoc_batch_size=1,
-        curve_batch_size=1,
-        oricls_batch_size=1,
+        doc_image_ori_cls_batch_size=1,
+        doc_image_unwarp_batch_size=1,
+        seal_text_det_batch_size=1,
         recovery=True,
         recovery=True,
         device=None,
         device=None,
         predictor_kwargs=None,
         predictor_kwargs=None,
     ):
     ):
-        self.layout_model = layout_model
-        self.text_det_model = text_det_model
-        self.text_rec_model = text_rec_model
-        self.table_model = table_model
-        self.doc_image_ori_cls_model = doc_image_ori_cls_model
-        self.doc_image_unwarp_model = doc_image_unwarp_model
-        self.seal_text_det_model = seal_text_det_model
-        self.llm_name = llm_name
-        self.llm_params = llm_params
-        self.task_prompt_yaml = task_prompt_yaml
-        self.user_prompt_yaml = user_prompt_yaml
-        self.layout_batch_size = layout_batch_size
-        self.text_det_batch_size = text_det_batch_size
-        self.text_rec_batch_size = text_rec_batch_size
-        self.table_batch_size = table_batch_size
-        self.uvdoc_batch_size = uvdoc_batch_size
-        self.curve_batch_size = curve_batch_size
-        self.oricls_batch_size = oricls_batch_size
-        self.recovery = recovery
-        self.device = device
-        self.predictor_kwargs = predictor_kwargs
         super().__init__(
         super().__init__(
+            predictor_kwargs=predictor_kwargs,
+        )
+        self._build_predictor(
             layout_model=layout_model,
             layout_model=layout_model,
             text_det_model=text_det_model,
             text_det_model=text_det_model,
             text_rec_model=text_rec_model,
             text_rec_model=text_rec_model,
             table_model=table_model,
             table_model=table_model,
+            doc_image_ori_cls_model=doc_image_ori_cls_model,
+            doc_image_unwarp_model=doc_image_unwarp_model,
+            seal_text_det_model=seal_text_det_model,
+            llm_name=llm_name,
+            llm_params=llm_params,
+        )
+        self.set_predictor(
             layout_batch_size=layout_batch_size,
             layout_batch_size=layout_batch_size,
             text_det_batch_size=text_det_batch_size,
             text_det_batch_size=text_det_batch_size,
             text_rec_batch_size=text_rec_batch_size,
             text_rec_batch_size=text_rec_batch_size,
             table_batch_size=table_batch_size,
             table_batch_size=table_batch_size,
-            predictor_kwargs=predictor_kwargs,
+            doc_image_ori_cls_batch_size=doc_image_ori_cls_batch_size,
+            doc_image_unwarp_batch_size=doc_image_unwarp_batch_size,
+            seal_text_det_batch_size=seal_text_det_batch_size,
+            device=device,
         )
         )
-        self._build_predictor()
-        self.llm_api = create_llm_api(
-            llm_name,
-            llm_params,
-        )
-        self.cropper = CropByBoxes()
+
         # get base prompt from yaml info
         # get base prompt from yaml info
         if task_prompt_yaml:
         if task_prompt_yaml:
             self.task_prompt_dict = read_yaml_file(task_prompt_yaml)
             self.task_prompt_dict = read_yaml_file(task_prompt_yaml)
@@ -110,44 +97,50 @@ class PPChatOCRPipeline(TableRecPipeline):
             self.user_prompt_dict = read_yaml_file(user_prompt_yaml)
             self.user_prompt_dict = read_yaml_file(user_prompt_yaml)
         else:
         else:
             self.user_prompt_dict = None
             self.user_prompt_dict = None
+
         self.recovery = recovery
         self.recovery = recovery
-        self.img_reader = ReadImage(format="RGB")
         self.visual_info = None
         self.visual_info = None
         self.vector = None
         self.vector = None
         self.visual_flag = False
         self.visual_flag = False
 
 
-    def _build_predictor(self):
-        super()._build_predictor()
-        if self.seal_text_det_model:
-            self.curve_pipeline = OCRPipeline(
-                text_det_model=self.seal_text_det_model,
-                text_rec_model=self.text_rec_model,
-                text_det_batch_size=self.text_det_batch_size,
-                text_rec_batch_size=self.text_rec_batch_size,
-                predictor_kwargs=self.predictor_kwargs,
+    def _build_predictor(
+        self,
+        layout_model,
+        text_det_model,
+        text_rec_model,
+        table_model,
+        llm_name,
+        llm_params,
+        seal_text_det_model=None,
+        doc_image_ori_cls_model=None,
+        doc_image_unwarp_model=None,
+    ):
+        super()._build_predictor(
+            layout_model, text_det_model, text_rec_model, table_model
+        )
+        if seal_text_det_model:
+            self.curve_pipeline = self._create(
+                pipeline=OCRPipeline,
+                text_det_model=seal_text_det_model,
+                text_rec_model=text_rec_model,
             )
             )
         else:
         else:
             self.curve_pipeline = None
             self.curve_pipeline = None
-        if self.doc_image_ori_cls_model:
-            self.oricls_predictor = self._create_model(self.doc_image_ori_cls_model)
+        if doc_image_ori_cls_model:
+            self.oricls_predictor = self._create(doc_image_ori_cls_model)
         else:
         else:
             self.oricls_predictor = None
             self.oricls_predictor = None
-        if self.doc_image_unwarp_model:
-            self.uvdoc_predictor = self._create_model(self.doc_image_unwarp_model)
+        if doc_image_unwarp_model:
+            self.uvdoc_predictor = self._create(doc_image_unwarp_model)
         else:
         else:
             self.uvdoc_predictor = None
             self.uvdoc_predictor = None
-        if self.curve_pipeline and self.curve_batch_size:
-            self.curve_pipeline.text_det_model.set_predictor(
-                batch_size=self.curve_batch_size, device=self.device
-            )
-        if self.oricls_predictor and self.oricls_batch_size:
-            self.oricls_predictor.set_predictor(
-                batch_size=self.oricls_batch_size, device=self.device
-            )
-        if self.uvdoc_predictor and self.uvdoc_batch_size:
-            self.uvdoc_predictor.set_predictor(
-                batch_size=self.uvdoc_batch_size, device=self.device
-            )
+
+        self.img_reader = ReadImage(format="RGB")
+        self.llm_api = create_llm_api(
+            llm_name,
+            llm_params,
+        )
+        self.cropper = CropByBoxes()
 
 
     def set_predictor(
     def set_predictor(
         self,
         self,
@@ -155,9 +148,9 @@ class PPChatOCRPipeline(TableRecPipeline):
         text_det_batch_size=None,
         text_det_batch_size=None,
         text_rec_batch_size=None,
         text_rec_batch_size=None,
         table_batch_size=None,
         table_batch_size=None,
-        curve_batch_size=None,
-        oricls_batch_size=None,
-        uvdoc_batch_size=None,
+        doc_image_ori_cls_batch_size=None,
+        doc_image_unwarp_batch_size=None,
+        seal_text_det_batch_size=None,
         device=None,
         device=None,
     ):
     ):
         if text_det_batch_size and text_det_batch_size > 1:
         if text_det_batch_size and text_det_batch_size > 1:
@@ -172,23 +165,46 @@ class PPChatOCRPipeline(TableRecPipeline):
             )
             )
         if table_batch_size:
         if table_batch_size:
             self.table_predictor.set_predictor(batch_size=table_batch_size)
             self.table_predictor.set_predictor(batch_size=table_batch_size)
-        if self.curve_pipeline and curve_batch_size:
+        if self.curve_pipeline and seal_text_det_batch_size:
             self.curve_pipeline.text_det_model.set_predictor(
             self.curve_pipeline.text_det_model.set_predictor(
-                batch_size=curve_batch_size, device=device
-            )
-        if self.oricls_predictor and oricls_batch_size:
-            self.oricls_predictor.set_predictor(
-                batch_size=oricls_batch_size, device=device
+                batch_size=seal_text_det_batch_size
             )
             )
-        if self.uvdoc_predictor and uvdoc_batch_size:
-            self.uvdoc_predictor.set_predictor(
-                batch_size=uvdoc_batch_size, device=device
-            )
-
-    def predict(self, input, **kwargs):
+        if self.oricls_predictor and doc_image_ori_cls_batch_size:
+            self.oricls_predictor.set_predictor(batch_size=doc_image_ori_cls_batch_size)
+        if self.uvdoc_predictor and doc_image_unwarp_batch_size:
+            self.uvdoc_predictor.set_predictor(batch_size=doc_image_unwarp_batch_size)
+
+        if device:
+            if self.curve_pipeline:
+                self.curve_pipeline.set_predictor(device=device)
+            if self.oricls_predictor:
+                self.oricls_predictor.set_predictor(device=device)
+            if self.uvdoc_predictor:
+                self.uvdoc_predictor.set_predictor(device=device)
+            self.layout_batch_size.set_predictor(device=device)
+            self.ocr_pipeline.set_predictor(device=device)
+
+    def predict(
+        self,
+        input,
+        use_doc_image_ori_cls_model=True,
+        use_doc_image_unwarp_model=True,
+        use_seal_text_det_model=True,
+        recovery=True,
+        **kwargs,
+    ):
+        self.set_predictor(**kwargs)
         visual_info = {"ocr_text": [], "table_html": [], "table_text": []}
         visual_info = {"ocr_text": [], "table_html": [], "table_text": []}
         # get all visual result
         # get all visual result
-        visual_result = list(self.get_visual_result(input, **kwargs))
+        visual_result = list(
+            self.get_visual_result(
+                input,
+                use_doc_image_ori_cls_model=use_doc_image_ori_cls_model,
+                use_doc_image_unwarp_model=use_doc_image_unwarp_model,
+                use_seal_text_det_model=use_seal_text_det_model,
+                recovery=recovery,
+            )
+        )
         # decode visual result to get table_html, table_text, ocr_text
         # decode visual result to get table_html, table_text, ocr_text
         ocr_text, table_text, table_html = self.decode_visual_result(visual_result)
         ocr_text, table_text, table_html = self.decode_visual_result(visual_result)
 
 
@@ -202,32 +218,21 @@ class PPChatOCRPipeline(TableRecPipeline):
 
 
         return visual_result, visual_info
         return visual_result, visual_info
 
 
-    def get_visual_result(self, inputs, **kwargs):
-        layout_batch_size = kwargs.get("layout_batch_size")
-        text_det_batch_size = kwargs.get("text_det_batch_size")
-        text_rec_batch_size = kwargs.get("text_rec_batch_size")
-        table_batch_size = kwargs.get("table_batch_size")
-        curve_batch_size = kwargs.get("curve_batch_size")
-        oricls_batch_size = kwargs.get("oricls_batch_size")
-        uvdoc_batch_size = kwargs.get("uvdoc_batch_size")
-        device = kwargs.get("device")
-        self.set_predictor(
-            layout_batch_size,
-            text_det_batch_size,
-            text_rec_batch_size,
-            table_batch_size,
-            curve_batch_size,
-            oricls_batch_size,
-            uvdoc_batch_size,
-            device,
-        )
+    def get_visual_result(
+        self,
+        inputs,
+        use_doc_image_ori_cls_model=True,
+        use_doc_image_unwarp_model=True,
+        use_seal_text_det_model=True,
+        recovery=True,
+    ):
         # get oricls and uvdoc results
         # get oricls and uvdoc results
         img_info_list = list(self.img_reader(inputs))[0]
         img_info_list = list(self.img_reader(inputs))[0]
         oricls_results = []
         oricls_results = []
-        if self.oricls_predictor and kwargs.get("use_doc_image_ori_cls_model", True):
+        if self.oricls_predictor and use_doc_image_ori_cls_model:
             oricls_results = get_oriclas_results(img_info_list, self.oricls_predictor)
             oricls_results = get_oriclas_results(img_info_list, self.oricls_predictor)
         uvdoc_results = []
         uvdoc_results = []
-        if self.uvdoc_predictor and kwargs.get("use_doc_image_unwarp_model", True):
+        if self.uvdoc_predictor and use_doc_image_unwarp_model:
             uvdoc_results = get_uvdoc_results(img_info_list, self.uvdoc_predictor)
             uvdoc_results = get_uvdoc_results(img_info_list, self.uvdoc_predictor)
         img_list = [img_info["img"] for img_info in img_info_list]
         img_list = [img_info["img"] for img_info in img_info_list]
         for idx, (img_info, layout_pred) in enumerate(
         for idx, (img_info, layout_pred) in enumerate(
@@ -269,7 +274,7 @@ class PPChatOCRPipeline(TableRecPipeline):
                     elif sub["label"].lower() == "seal":
                     elif sub["label"].lower() == "seal":
                         curve_subs.append(sub)
                         curve_subs.append(sub)
                     else:
                     else:
-                        if self.recovery and kwargs.get("recovery", True):
+                        if self.recovery and recovery:
                             # TODO: Why use the entire image?
                             # TODO: Why use the entire image?
                             wht_im = (
                             wht_im = (
                                 np.ones(single_img.shape, dtype=single_img.dtype) * 255
                                 np.ones(single_img.shape, dtype=single_img.dtype) * 255
@@ -303,7 +308,7 @@ class PPChatOCRPipeline(TableRecPipeline):
                         single_img[ymin:ymax, xmin:xmax, :] = 255
                         single_img[ymin:ymax, xmin:xmax, :] = 255
 
 
             curve_pipeline = self.ocr_pipeline
             curve_pipeline = self.ocr_pipeline
-            if self.curve_pipeline and kwargs.get("use_seal_text_det_model", True):
+            if self.curve_pipeline and use_seal_text_det_model:
                 curve_pipeline = self.curve_pipeline
                 curve_pipeline = self.curve_pipeline
 
 
             all_curve_res = get_ocr_res(curve_pipeline, curve_subs)
             all_curve_res = get_ocr_res(curve_pipeline, curve_subs)

+ 31 - 30
paddlex/inference/pipelines/seal_recognition.py

@@ -47,41 +47,48 @@ class SealOCRPipeline(BasePipeline):
         layout_batch_size=1,
         layout_batch_size=1,
         text_det_batch_size=1,
         text_det_batch_size=1,
         text_rec_batch_size=1,
         text_rec_batch_size=1,
+        device=None,
         predictor_kwargs=None,
         predictor_kwargs=None,
     ):
     ):
-        self.layout_model = layout_model
-        self.text_det_model = text_det_model
-        self.text_rec_model = text_rec_model
-        self.layout_batch_size = layout_batch_size
-        self.text_det_batch_size = text_det_batch_size
-        self.text_rec_batch_size = text_rec_batch_size
-        self.predictor_kwargs = predictor_kwargs
         super().__init__(predictor_kwargs=predictor_kwargs)
         super().__init__(predictor_kwargs=predictor_kwargs)
-        self._build_predictor()
+        self._build_predictor(
+            layout_model=layout_model,
+            text_det_model=text_det_model,
+            text_rec_model=text_rec_model,
+            layout_batch_size=layout_batch_size,
+            text_det_batch_size=text_det_batch_size,
+            text_rec_batch_size=text_rec_batch_size,
+        )
+        self.set_predictor(
+            layout_batch_size=layout_batch_size,
+            text_det_batch_size=text_det_batch_size,
+            text_rec_batch_size=text_rec_batch_size,
+            device=device,
+        )
 
 
     def _build_predictor(
     def _build_predictor(
         self,
         self,
+        layout_model,
+        text_det_model,
+        text_rec_model,
+        layout_batch_size,
+        text_det_batch_size,
+        text_rec_batch_size,
     ):
     ):
-        self.layout_predictor = self._create_model(model=self.layout_model)
-        self.ocr_pipeline = OCRPipeline(
-            text_det_model=self.text_det_model,
-            text_rec_model=self.text_rec_model,
-            text_det_batch_size=self.text_det_batch_size,
-            text_rec_batch_size=self.text_rec_batch_size,
-            predictor_kwargs=self.predictor_kwargs,
+        self.layout_predictor = self._create(model=layout_model)
+        self.ocr_pipeline = self._create(
+            pipeline=OCRPipeline,
+            text_det_model=text_det_model,
+            text_rec_model=text_rec_model,
         )
         )
         self._crop_by_boxes = CropByBoxes()
         self._crop_by_boxes = CropByBoxes()
-        self.layout_predictor.set_predictor(batch_size=self.layout_batch_size)
-        self.ocr_pipeline.text_rec_model.set_predictor(
-            batch_size=self.text_rec_batch_size
-        )
 
 
     def set_predictor(
     def set_predictor(
         self,
         self,
         layout_batch_size=None,
         layout_batch_size=None,
         text_det_batch_size=None,
         text_det_batch_size=None,
         text_rec_batch_size=None,
         text_rec_batch_size=None,
-        # device=None,
+        device=None,
     ):
     ):
         if text_det_batch_size and text_det_batch_size > 1:
         if text_det_batch_size and text_det_batch_size > 1:
             logging.warning(
             logging.warning(
@@ -93,18 +100,12 @@ class SealOCRPipeline(BasePipeline):
             self.ocr_pipeline.text_rec_model.set_predictor(
             self.ocr_pipeline.text_rec_model.set_predictor(
                 batch_size=text_rec_batch_size
                 batch_size=text_rec_batch_size
             )
             )
+        if device:
+            self.layout_predictor.set_predictor(device=device)
+            self.ocr_pipeline.set_predictor(device=device)
 
 
     def predict(self, x, **kwargs):
     def predict(self, x, **kwargs):
-        layout_batch_size = kwargs.get("layout_batch_size")
-        text_det_batch_size = kwargs.get("text_det_batch_size")
-        text_rec_batch_size = kwargs.get("text_rec_batch_size")
-        # device = kwargs.get("device")
-        self.set_predictor(
-            layout_batch_size,
-            text_det_batch_size,
-            text_rec_batch_size,
-            # device,
-        )
+        self.set_predictor(**kwargs)
         for layout_pred in self.layout_predictor(x):
         for layout_pred in self.layout_predictor(x):
             single_img_res = {
             single_img_res = {
                 "input_path": "",
                 "input_path": "",

+ 10 - 36
paddlex/inference/pipelines/single_model_pipeline.py

@@ -17,86 +17,60 @@ from .base import BasePipeline
 
 
 class _SingleModelPipeline(BasePipeline):
 class _SingleModelPipeline(BasePipeline):
 
 
-    def __init__(self, model, batch_size=1, predictor_kwargs=None):
+    def __init__(self, model, batch_size=1, device=None, predictor_kwargs=None):
         super().__init__(predictor_kwargs=predictor_kwargs)
         super().__init__(predictor_kwargs=predictor_kwargs)
         self._build_predictor(model)
         self._build_predictor(model)
-        self.set_predictor(batch_size)
+        self.set_predictor(batch_size=batch_size, device=device)
 
 
     def _build_predictor(self, model):
     def _build_predictor(self, model):
-        self.model = self._create_model(model)
+        self.model = self._create(model)
 
 
-    def set_predictor(self, batch_size):
-        self.model.set_predictor(batch_size=batch_size)
+    def set_predictor(self, batch_size=None, device=None):
+        if batch_size:
+            self.model.set_predictor(batch_size=batch_size)
+        if device:
+            self.model.set_predictor(device=device)
 
 
     def predict(self, input, **kwargs):
     def predict(self, input, **kwargs):
-        yield from self.model(input, **kwargs)
+        self.set_predictor(**kwargs)
+        yield from self.model(input)
 
 
 
 
 class ImageClassification(_SingleModelPipeline):
 class ImageClassification(_SingleModelPipeline):
     entities = "image_classification"
     entities = "image_classification"
 
 
-    def __init__(self, *args, **kwargs):
-        super().__init__(*args, **kwargs)
-
 
 
 class ObjectDetection(_SingleModelPipeline):
 class ObjectDetection(_SingleModelPipeline):
     entities = "object_detection"
     entities = "object_detection"
 
 
-    def __init__(self, *args, **kwargs):
-        super().__init__(*args, **kwargs)
-
 
 
 class InstanceSegmentation(_SingleModelPipeline):
 class InstanceSegmentation(_SingleModelPipeline):
     entities = "instance_segmentation"
     entities = "instance_segmentation"
 
 
-    def __init__(self, *args, **kwargs):
-        super().__init__(*args, **kwargs)
-
 
 
 class SemanticSegmentation(_SingleModelPipeline):
 class SemanticSegmentation(_SingleModelPipeline):
     entities = "semantic_segmentation"
     entities = "semantic_segmentation"
 
 
-    def __init__(self, *args, **kwargs):
-        super().__init__(*args, **kwargs)
-
 
 
 class TSFc(_SingleModelPipeline):
 class TSFc(_SingleModelPipeline):
     entities = "ts_fc"
     entities = "ts_fc"
 
 
-    def __init__(self, *args, **kwargs):
-        super().__init__(*args, **kwargs)
-
 
 
 class TSAd(_SingleModelPipeline):
 class TSAd(_SingleModelPipeline):
     entities = "ts_ad"
     entities = "ts_ad"
 
 
-    def __init__(self, *args, **kwargs):
-        super().__init__(*args, **kwargs)
-
 
 
 class TSCls(_SingleModelPipeline):
 class TSCls(_SingleModelPipeline):
     entities = "ts_cls"
     entities = "ts_cls"
 
 
-    def __init__(self, *args, **kwargs):
-        super().__init__(*args, **kwargs)
-
 
 
 class MultiLableImageClas(_SingleModelPipeline):
 class MultiLableImageClas(_SingleModelPipeline):
     entities = "multi_label_image_classification"
     entities = "multi_label_image_classification"
 
 
-    def __init__(self, *args, **kwargs):
-        super().__init__(*args, **kwargs)
-
 
 
 class SmallObjDet(_SingleModelPipeline):
 class SmallObjDet(_SingleModelPipeline):
     entities = "small_object_detection"
     entities = "small_object_detection"
 
 
-    def __init__(self, *args, **kwargs):
-        super().__init__(*args, **kwargs)
-
 
 
 class AnomalyDetection(_SingleModelPipeline):
 class AnomalyDetection(_SingleModelPipeline):
     entities = "anomaly_detection"
     entities = "anomaly_detection"
-
-    def __init__(self, *args, **kwargs):
-        super().__init__(*args, **kwargs)

+ 1 - 1
paddlex/inference/pipelines/table_recognition/__init__.py

@@ -12,4 +12,4 @@
 # See the License for the specific language governing permissions and
 # See the License for the specific language governing permissions and
 # limitations under the License.
 # limitations under the License.
 
 
-from .table_recognition import TableRecPipeline
+from .table_recognition import _TableRecPipeline, TableRecPipeline

+ 48 - 36
paddlex/inference/pipelines/table_recognition/table_recognition.py

@@ -21,54 +21,31 @@ from ...components import CropByBoxes
 from ...results import OCRResult, TableResult, StructureTableResult
 from ...results import OCRResult, TableResult, StructureTableResult
 
 
 
 
-class TableRecPipeline(BasePipeline):
+class _TableRecPipeline(BasePipeline):
     """Table Recognition Pipeline"""
     """Table Recognition Pipeline"""
 
 
-    entities = "table_recognition"
-
     def __init__(
     def __init__(
         self,
         self,
-        layout_model,
-        text_det_model,
-        text_rec_model,
-        table_model,
-        layout_batch_size=1,
-        text_det_batch_size=1,
-        text_rec_batch_size=1,
-        table_batch_size=1,
         predictor_kwargs=None,
         predictor_kwargs=None,
     ):
     ):
-        self.layout_model = layout_model
-        self.text_det_model = text_det_model
-        self.text_rec_model = text_rec_model
-        self.table_model = table_model
-        self.layout_batch_size = layout_batch_size
-        self.text_det_batch_size = text_det_batch_size
-        self.text_rec_batch_size = text_rec_batch_size
-        self.table_batch_size = table_batch_size
-        self.predictor_kwargs = predictor_kwargs
         super().__init__(predictor_kwargs=predictor_kwargs)
         super().__init__(predictor_kwargs=predictor_kwargs)
-        self._build_predictor()
 
 
     def _build_predictor(
     def _build_predictor(
         self,
         self,
+        layout_model,
+        text_det_model,
+        text_rec_model,
+        table_model,
     ):
     ):
-        self.layout_predictor = self._create_model(model=self.layout_model)
-        self.ocr_pipeline = OCRPipeline(
-            text_det_model=self.text_det_model,
-            text_rec_model=self.text_rec_model,
-            text_det_batch_size=self.text_det_batch_size,
-            text_rec_batch_size=self.text_rec_batch_size,
-            predictor_kwargs=self.predictor_kwargs,
+        self.layout_predictor = self._create(model=layout_model)
+        self.ocr_pipeline = self._create(
+            pipeline=OCRPipeline,
+            text_det_model=text_det_model,
+            text_rec_model=text_rec_model,
         )
         )
-        self.table_predictor = self._create_model(model=self.table_model)
+        self.table_predictor = self._create(model=table_model)
         self._crop_by_boxes = CropByBoxes()
         self._crop_by_boxes = CropByBoxes()
         self._match = TableMatch(filter_ocr_result=False)
         self._match = TableMatch(filter_ocr_result=False)
-        self.layout_predictor.set_predictor(batch_size=self.layout_batch_size)
-        self.ocr_pipeline.text_rec_model.set_predictor(
-            batch_size=self.text_rec_batch_size
-        )
-        self.table_predictor.set_predictor(batch_size=self.table_batch_size)
 
 
     def set_predictor(
     def set_predictor(
         self,
         self,
@@ -76,6 +53,7 @@ class TableRecPipeline(BasePipeline):
         text_det_batch_size=None,
         text_det_batch_size=None,
         text_rec_batch_size=None,
         text_rec_batch_size=None,
         table_batch_size=None,
         table_batch_size=None,
+        device=None,
     ):
     ):
         if text_det_batch_size and text_det_batch_size > 1:
         if text_det_batch_size and text_det_batch_size > 1:
             logging.warning(
             logging.warning(
@@ -89,10 +67,15 @@ class TableRecPipeline(BasePipeline):
             )
             )
         if table_batch_size:
         if table_batch_size:
             self.table_predictor.set_predictor(batch_size=table_batch_size)
             self.table_predictor.set_predictor(batch_size=table_batch_size)
+        if device:
+            self.layout_predictor.set_predictor(device=device)
+            self.ocr_pipeline.text_rec_model.set_predictor(device=device)
+            self.table_predictor.set_predictor(device=device)
 
 
-    def predict(self, x):
+    def predict(self, input, **kwargs):
+        self.set_predictor(**kwargs)
         for layout_pred, ocr_pred in zip(
         for layout_pred, ocr_pred in zip(
-            self.layout_predictor(x), self.ocr_pipeline(x)
+            self.layout_predictor(input), self.ocr_pipeline(input)
         ):
         ):
             single_img_res = {
             single_img_res = {
                 "input_path": "",
                 "input_path": "",
@@ -176,3 +159,32 @@ class TableRecPipeline(BasePipeline):
             ocr_res_list.append(ocr_pred)
             ocr_res_list.append(ocr_pred)
             table_index += 1
             table_index += 1
         return table_res_list, ocr_res_list
         return table_res_list, ocr_res_list
+
+
+class TableRecPipeline(_TableRecPipeline):
+    """Table Recognition Pipeline"""
+
+    entities = "table_recognition"
+
+    def __init__(
+        self,
+        layout_model,
+        text_det_model,
+        text_rec_model,
+        table_model,
+        layout_batch_size=1,
+        text_det_batch_size=1,
+        text_rec_batch_size=1,
+        table_batch_size=1,
+        device=None,
+        predictor_kwargs=None,
+    ):
+        super().__init__(predictor_kwargs=predictor_kwargs)
+        self._build_predictor(layout_model, text_det_model, text_rec_model, table_model)
+        self.set_predictor(
+            layout_batch_size=layout_batch_size,
+            text_det_batch_size=text_det_batch_size,
+            text_rec_batch_size=text_rec_batch_size,
+            table_batch_size=table_batch_size,
+            device=device,
+        )