Эх сурвалжийг харах

add doc-bee serving (#3832)

* add pipeline for docbee

* Support create openai chat completion as primary operation

* support pp_docbee serving

* add pp_docbee serving

* refine code

---------

Co-authored-by: zhangzelun <zhangzelun@baidu.com>
Co-authored-by: Bobholamovic <bob1998425@hotmail.com>
学卿 7 сар өмнө
parent
commit
67c9f58025
43 өөрчлөгдсөн 452 нэмэгдсэн , 131 устгасан
  1. 14 0
      paddlex/configs/modules/doc_vlm/PP-DocBee-2B.yaml
  2. 14 0
      paddlex/configs/modules/doc_vlm/PP-DocBee-7B.yaml
  3. 9 0
      paddlex/configs/pipelines/doc_understanding.yaml
  4. 2 1
      paddlex/inference/common/result/base_result.py
  5. 1 0
      paddlex/inference/pipelines/__init__.py
  6. 15 0
      paddlex/inference/pipelines/doc_understanding/__init__.py
  7. 71 0
      paddlex/inference/pipelines/doc_understanding/pipeline.py
  8. 10 7
      paddlex/inference/serving/basic_serving/_app.py
  9. 3 3
      paddlex/inference/serving/basic_serving/_pipeline_apps/anomaly_detection.py
  10. 3 3
      paddlex/inference/serving/basic_serving/_pipeline_apps/doc_preprocessor.py
  11. 109 0
      paddlex/inference/serving/basic_serving/_pipeline_apps/doc_understanding.py
  12. 9 9
      paddlex/inference/serving/basic_serving/_pipeline_apps/face_recognition.py
  13. 3 3
      paddlex/inference/serving/basic_serving/_pipeline_apps/formula_recognition.py
  14. 3 3
      paddlex/inference/serving/basic_serving/_pipeline_apps/human_keypoint_detection.py
  15. 3 3
      paddlex/inference/serving/basic_serving/_pipeline_apps/image_classification.py
  16. 3 3
      paddlex/inference/serving/basic_serving/_pipeline_apps/image_multilabel_classification.py
  17. 3 3
      paddlex/inference/serving/basic_serving/_pipeline_apps/instance_segmentation.py
  18. 3 3
      paddlex/inference/serving/basic_serving/_pipeline_apps/layout_parsing.py
  19. 3 3
      paddlex/inference/serving/basic_serving/_pipeline_apps/m_3d_bev_detection.py
  20. 3 3
      paddlex/inference/serving/basic_serving/_pipeline_apps/multilingual_speech_recognition.py
  21. 3 3
      paddlex/inference/serving/basic_serving/_pipeline_apps/object_detection.py
  22. 3 3
      paddlex/inference/serving/basic_serving/_pipeline_apps/ocr.py
  23. 3 3
      paddlex/inference/serving/basic_serving/_pipeline_apps/open_vocabulary_detection.py
  24. 3 3
      paddlex/inference/serving/basic_serving/_pipeline_apps/open_vocabulary_segmentation.py
  25. 3 3
      paddlex/inference/serving/basic_serving/_pipeline_apps/pedestrian_attribute_recognition.py
  26. 7 7
      paddlex/inference/serving/basic_serving/_pipeline_apps/pp_chatocrv3_doc.py
  27. 9 9
      paddlex/inference/serving/basic_serving/_pipeline_apps/pp_chatocrv4_doc.py
  28. 9 9
      paddlex/inference/serving/basic_serving/_pipeline_apps/pp_shituv2.py
  29. 3 3
      paddlex/inference/serving/basic_serving/_pipeline_apps/pp_structurev3.py
  30. 3 3
      paddlex/inference/serving/basic_serving/_pipeline_apps/rotated_object_detection.py
  31. 3 3
      paddlex/inference/serving/basic_serving/_pipeline_apps/seal_recognition.py
  32. 3 3
      paddlex/inference/serving/basic_serving/_pipeline_apps/semantic_segmentation.py
  33. 3 3
      paddlex/inference/serving/basic_serving/_pipeline_apps/small_object_detection.py
  34. 3 3
      paddlex/inference/serving/basic_serving/_pipeline_apps/table_recognition.py
  35. 3 3
      paddlex/inference/serving/basic_serving/_pipeline_apps/table_recognition_v2.py
  36. 3 3
      paddlex/inference/serving/basic_serving/_pipeline_apps/ts_anomaly_detection.py
  37. 3 3
      paddlex/inference/serving/basic_serving/_pipeline_apps/ts_classification.py
  38. 3 3
      paddlex/inference/serving/basic_serving/_pipeline_apps/ts_forecast.py
  39. 3 3
      paddlex/inference/serving/basic_serving/_pipeline_apps/vehicle_attribute_recognition.py
  40. 3 3
      paddlex/inference/serving/basic_serving/_pipeline_apps/video_classification.py
  41. 3 3
      paddlex/inference/serving/basic_serving/_pipeline_apps/video_detection.py
  42. 12 5
      paddlex/inference/serving/infra/models.py
  43. 77 0
      paddlex/inference/serving/schemas/doc_understanding.py

+ 14 - 0
paddlex/configs/modules/doc_vlm/PP-DocBee-2B.yaml

@@ -0,0 +1,14 @@
+Global:
+  model: PP-DocBee-2B
+  mode: predict # only support predict
+  device: gpu:0
+  output: "output"
+
+Predict:
+  batch_size: 1
+  model_dir: "/path/to/PP-DocBee-2B"
+  input:
+    image: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/medal_table.png"
+    query: "识别这份表格的内容"
+  kernel_option:
+    run_mode: paddle

+ 14 - 0
paddlex/configs/modules/doc_vlm/PP-DocBee-7B.yaml

@@ -0,0 +1,14 @@
+Global:
+  model: PP-DocBee-7B
+  mode: predict # only support predict
+  device: gpu:0
+  output: "output"
+
+Predict:
+  batch_size: 1
+  model_dir: "/path/to/PP-DocBee-7B"
+  input:
+    image: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/medal_table.png"
+    query: "识别这份表格的内容"
+  kernel_option:
+    run_mode: paddle

+ 9 - 0
paddlex/configs/pipelines/doc_understanding.yaml

@@ -0,0 +1,9 @@
+
+pipeline_name: doc_understanding
+
+SubModules:
+  DocUnderstanding:
+    module_name: doc_vlm
+    model_name: PP-DocBee-2B
+    model_dir: null
+    batch_size: 1

+ 2 - 1
paddlex/inference/common/result/base_result.py

@@ -56,7 +56,7 @@ class BaseResult(dict, JsonMixin, StrMixin):
                 func()
                 func()
 
 
     def _get_input_fn(self):
     def _get_input_fn(self):
-        if (fp := self["input_path"]) is None:
+        if self.get("input_path", None) is None:
             if self._rand_fn:
             if self._rand_fn:
                 return self._rand_fn
                 return self._rand_fn
 
 
@@ -68,4 +68,5 @@ class BaseResult(dict, JsonMixin, StrMixin):
             )
             )
             self._rand_fn = Path(fp).name
             self._rand_fn = Path(fp).name
             return self._rand_fn
             return self._rand_fn
+        fp = self["input_path"]
         return Path(fp).name
         return Path(fp).name

+ 1 - 0
paddlex/inference/pipelines/__init__.py

@@ -27,6 +27,7 @@ from .attribute_recognition import (
 from .base import BasePipeline
 from .base import BasePipeline
 from .components import BaseChat, BaseGeneratePrompt, BaseRetriever
 from .components import BaseChat, BaseGeneratePrompt, BaseRetriever
 from .doc_preprocessor import DocPreprocessorPipeline
 from .doc_preprocessor import DocPreprocessorPipeline
+from .doc_understanding import DocUnderstandingPipeline
 from .face_recognition import FaceRecPipeline
 from .face_recognition import FaceRecPipeline
 from .formula_recognition import FormulaRecognitionPipeline
 from .formula_recognition import FormulaRecognitionPipeline
 from .image_classification import ImageClassificationPipeline
 from .image_classification import ImageClassificationPipeline

+ 15 - 0
paddlex/inference/pipelines/doc_understanding/__init__.py

@@ -0,0 +1,15 @@
+# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from .pipeline import DocUnderstandingPipeline

+ 71 - 0
paddlex/inference/pipelines/doc_understanding/pipeline.py

@@ -0,0 +1,71 @@
+# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import Any, Dict, Optional, Union
+
+from ....utils.deps import pipeline_requires_extra
+from ...models.doc_vlm.result import DocVLMResult
+from ...utils.hpi import HPIConfig
+from ...utils.pp_option import PaddlePredictorOption
+from ..base import BasePipeline
+
+
+@pipeline_requires_extra("multimodal")
+class DocUnderstandingPipeline(BasePipeline):
+    """Doc Understanding Pipeline"""
+
+    entities = "doc_understanding"
+
+    def __init__(
+        self,
+        config: Dict,
+        device: str = None,
+        pp_option: PaddlePredictorOption = None,
+        use_hpip: bool = False,
+        hpi_config: Optional[Union[Dict[str, Any], HPIConfig]] = None,
+    ) -> None:
+        """
+        Initializes the class with given configurations and options.
+
+        Args:
+            config (Dict): Configuration dictionary containing model and other parameters.
+            device (str): The device to run the prediction on. Default is None.
+            pp_option (PaddlePredictorOption): Options for PaddlePaddle predictor. Default is None.
+            use_hpip (bool, optional): Whether to use the high-performance
+                inference plugin (HPIP) by default. Defaults to False.
+            hpi_config (Optional[Union[Dict[str, Any], HPIConfig]], optional):
+                The default high-performance inference configuration dictionary.
+                Defaults to None.
+        """
+        super().__init__(
+            device=device, pp_option=pp_option, use_hpip=use_hpip, hpi_config=hpi_config
+        )
+
+        doc_understanding_model_config = config.get("SubModules", {}).get(
+            "DocUnderstanding",
+            {"model_config_error": "config error for doc_understanding_model!"},
+        )
+        self.doc_understanding_model = self.create_model(doc_understanding_model_config)
+
+    def predict(self, input: Dict, **kwargs) -> DocVLMResult:
+        """Predicts doc understanding results for the given input.
+
+        Args:
+            input (dict): The input image and query.
+            **kwargs: Additional keyword arguments that can be passed to the function.
+
+        Returns:
+            DocVLMResult: The predicted doc understanding results.
+        """
+        yield from self.doc_understanding_model(input, **kwargs)

+ 10 - 7
paddlex/inference/serving/basic_serving/_app.py

@@ -34,7 +34,7 @@ from ....utils import logging
 from ....utils.deps import class_requires_deps, function_requires_deps, is_dep_available
 from ....utils.deps import class_requires_deps, function_requires_deps, is_dep_available
 from ...pipelines import BasePipeline
 from ...pipelines import BasePipeline
 from ..infra.config import AppConfig
 from ..infra.config import AppConfig
-from ..infra.models import NoResultResponse
+from ..infra.models import AIStudioNoResultResponse
 from ..infra.utils import call_async, generate_log_id
 from ..infra.utils import call_async, generate_log_id
 
 
 if is_dep_available("aiohttp"):
 if is_dep_available("aiohttp"):
@@ -156,8 +156,8 @@ def create_app(
     app.state.context = ctx
     app.state.context = ctx
 
 
     @app.get("/health", operation_id="checkHealth")
     @app.get("/health", operation_id="checkHealth")
-    async def _check_health() -> NoResultResponse:
-        return NoResultResponse(
+    async def _check_health() -> AIStudioNoResultResponse:
+        return AIStudioNoResultResponse(
             logId=generate_log_id(), errorCode=0, errorMsg="Healthy"
             logId=generate_log_id(), errorCode=0, errorMsg="Healthy"
         )
         )
 
 
@@ -166,7 +166,7 @@ def create_app(
         request: fastapi.Request, exc: RequestValidationError
         request: fastapi.Request, exc: RequestValidationError
     ) -> JSONResponse:
     ) -> JSONResponse:
         json_compatible_data = jsonable_encoder(
         json_compatible_data = jsonable_encoder(
-            NoResultResponse(
+            AIStudioNoResultResponse(
                 logId=generate_log_id(),
                 logId=generate_log_id(),
                 errorCode=422,
                 errorCode=422,
                 errorMsg=json.dumps(exc.errors()),
                 errorMsg=json.dumps(exc.errors()),
@@ -179,7 +179,7 @@ def create_app(
         request: fastapi.Request, exc: HTTPException
         request: fastapi.Request, exc: HTTPException
     ) -> JSONResponse:
     ) -> JSONResponse:
         json_compatible_data = jsonable_encoder(
         json_compatible_data = jsonable_encoder(
-            NoResultResponse(
+            AIStudioNoResultResponse(
                 logId=generate_log_id(), errorCode=exc.status_code, errorMsg=exc.detail
                 logId=generate_log_id(), errorCode=exc.status_code, errorMsg=exc.detail
             )
             )
         )
         )
@@ -193,7 +193,7 @@ def create_app(
         # necessary to log the exception info here?
         # necessary to log the exception info here?
         logging.exception("Unhandled exception")
         logging.exception("Unhandled exception")
         json_compatible_data = jsonable_encoder(
         json_compatible_data = jsonable_encoder(
-            NoResultResponse(
+            AIStudioNoResultResponse(
                 logId=generate_log_id(),
                 logId=generate_log_id(),
                 errorCode=500,
                 errorCode=500,
                 errorMsg="Internal server error",
                 errorMsg="Internal server error",
@@ -212,7 +212,10 @@ def primary_operation(
     return app.post(
     return app.post(
         path,
         path,
         operation_id=operation_id,
         operation_id=operation_id,
-        responses={422: {"model": NoResultResponse}, 500: {"model": NoResultResponse}},
+        responses={
+            422: {"model": AIStudioNoResultResponse},
+            500: {"model": AIStudioNoResultResponse},
+        },
         response_model_exclude_none=True,
         response_model_exclude_none=True,
         **kwargs,
         **kwargs,
     )
     )

+ 3 - 3
paddlex/inference/serving/basic_serving/_pipeline_apps/anomaly_detection.py

@@ -17,7 +17,7 @@ from typing import Any
 from .....utils.deps import function_requires_deps, is_dep_available
 from .....utils.deps import function_requires_deps, is_dep_available
 from ...infra import utils as serving_utils
 from ...infra import utils as serving_utils
 from ...infra.config import AppConfig
 from ...infra.config import AppConfig
-from ...infra.models import ResultResponse
+from ...infra.models import AIStudioResultResponse
 from ...schemas.anomaly_detection import INFER_ENDPOINT, InferRequest, InferResult
 from ...schemas.anomaly_detection import INFER_ENDPOINT, InferRequest, InferResult
 from .._app import create_app, primary_operation
 from .._app import create_app, primary_operation
 
 
@@ -36,7 +36,7 @@ def create_pipeline_app(pipeline: Any, app_config: AppConfig) -> "FastAPI":
         INFER_ENDPOINT,
         INFER_ENDPOINT,
         "infer",
         "infer",
     )
     )
-    async def _infer(request: InferRequest) -> ResultResponse[InferResult]:
+    async def _infer(request: InferRequest) -> AIStudioResultResponse[InferResult]:
         pipeline = ctx.pipeline
         pipeline = ctx.pipeline
         aiohttp_session = ctx.aiohttp_session
         aiohttp_session = ctx.aiohttp_session
 
 
@@ -57,7 +57,7 @@ def create_pipeline_app(pipeline: Any, app_config: AppConfig) -> "FastAPI":
         else:
         else:
             output_image_base64 = None
             output_image_base64 = None
 
 
-        return ResultResponse[InferResult](
+        return AIStudioResultResponse[InferResult](
             logId=serving_utils.generate_log_id(),
             logId=serving_utils.generate_log_id(),
             result=InferResult(
             result=InferResult(
                 labelMap=label_map, size=size, image=output_image_base64
                 labelMap=label_map, size=size, image=output_image_base64

+ 3 - 3
paddlex/inference/serving/basic_serving/_pipeline_apps/doc_preprocessor.py

@@ -17,7 +17,7 @@ from typing import Any, Dict, List
 from .....utils.deps import function_requires_deps, is_dep_available
 from .....utils.deps import function_requires_deps, is_dep_available
 from ...infra import utils as serving_utils
 from ...infra import utils as serving_utils
 from ...infra.config import AppConfig
 from ...infra.config import AppConfig
-from ...infra.models import ResultResponse
+from ...infra.models import AIStudioResultResponse
 from ...schemas.doc_preprocessor import INFER_ENDPOINT, InferRequest, InferResult
 from ...schemas.doc_preprocessor import INFER_ENDPOINT, InferRequest, InferResult
 from .._app import create_app, primary_operation
 from .._app import create_app, primary_operation
 from ._common import common
 from ._common import common
@@ -40,7 +40,7 @@ def create_pipeline_app(pipeline: Any, app_config: AppConfig) -> "FastAPI":
         INFER_ENDPOINT,
         INFER_ENDPOINT,
         "infer",
         "infer",
     )
     )
-    async def _infer(request: InferRequest) -> ResultResponse[InferResult]:
+    async def _infer(request: InferRequest) -> AIStudioResultResponse[InferResult]:
         pipeline = ctx.pipeline
         pipeline = ctx.pipeline
 
 
         log_id = serving_utils.generate_log_id()
         log_id = serving_utils.generate_log_id()
@@ -89,7 +89,7 @@ def create_pipeline_app(pipeline: Any, app_config: AppConfig) -> "FastAPI":
                 )
                 )
             )
             )
 
 
-        return ResultResponse[InferResult](
+        return AIStudioResultResponse[InferResult](
             logId=log_id,
             logId=log_id,
             result=InferResult(
             result=InferResult(
                 docPreprocessingResults=doc_pp_results,
                 docPreprocessingResults=doc_pp_results,

+ 109 - 0
paddlex/inference/serving/basic_serving/_pipeline_apps/doc_understanding.py

@@ -0,0 +1,109 @@
+# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import time
+from typing import Any, List
+
+from .....utils.deps import function_requires_deps, is_dep_available
+from ...infra import utils as serving_utils
+from ...infra.config import AppConfig
+from ...schemas.doc_understanding import (
+    INFER_ENDPOINT,
+    ImageContent,
+    ImageUrl,
+    InferRequest,
+    Message,
+    RoleType,
+    TextContent,
+)
+from .._app import create_app, primary_operation
+
+if is_dep_available("fastapi"):
+    from fastapi import FastAPI
+if is_dep_available("openai"):
+    from openai.types.chat import ChatCompletion
+    from openai.types.chat.chat_completion import Choice as ChatCompletionChoice
+    from openai.types.chat.chat_completion_message import ChatCompletionMessage
+
+
+@function_requires_deps("fastapi", "openai")
+def create_pipeline_app(pipeline: Any, app_config: AppConfig) -> "FastAPI":
+    app, ctx = create_app(
+        pipeline=pipeline, app_config=app_config, app_aiohttp_session=True
+    )
+
+    @primary_operation(
+        app,
+        INFER_ENDPOINT,
+        "infer",
+    )
+    async def _infer(request: InferRequest) -> "ChatCompletion":
+        pipeline = ctx.pipeline
+
+        def _process_messages(messages: List[Message]):
+            system_message = ""
+            user_message = ""
+            image_url = ""
+
+            for msg in messages:
+                if msg.role == RoleType.SYSTEM:
+                    if isinstance(msg.content, list):
+                        for content in msg.content:
+                            if isinstance(content, TextContent):
+                                system_message = content.text
+                                break
+                    else:
+                        system_message = msg.content
+
+                elif msg.role == RoleType.USER:
+                    if isinstance(msg.content, list):
+                        for content in msg.content:
+                            if isinstance(content, str):
+                                user_message = content
+                            else:
+                                if isinstance(content, TextContent):
+                                    user_message = content.text
+                                elif isinstance(content, ImageContent):
+                                    image_url = content.image_url
+                                    if isinstance(image_url, ImageUrl):
+                                        image_url = image_url.url
+                    else:
+                        user_message = msg.content
+            return system_message, user_message, image_url
+
+        system_message, user_message, image_url = _process_messages(request.messages)
+        result = (
+            await pipeline.infer(
+                {"image": image_url, "query": user_message},
+            )
+        )[0]
+
+        return ChatCompletion(
+            id=serving_utils.generate_log_id(),
+            model=request.model,
+            choices=[
+                ChatCompletionChoice(
+                    index=0,
+                    finish_reason="stop",
+                    message=ChatCompletionMessage(
+                        role="assistant",
+                        content=result["result"],
+                    ),
+                )
+            ],
+            created=int(time.time()),
+            object="chat.completion",
+        )
+
+    return app

+ 9 - 9
paddlex/inference/serving/basic_serving/_pipeline_apps/face_recognition.py

@@ -20,7 +20,7 @@ from .....utils.deps import function_requires_deps, is_dep_available
 from ....pipelines.components import IndexData
 from ....pipelines.components import IndexData
 from ...infra import utils as serving_utils
 from ...infra import utils as serving_utils
 from ...infra.config import AppConfig
 from ...infra.config import AppConfig
-from ...infra.models import ResultResponse
+from ...infra.models import AIStudioResultResponse
 from ...schemas import face_recognition as schema
 from ...schemas import face_recognition as schema
 from .._app import create_app, primary_operation
 from .._app import create_app, primary_operation
 from ._common import image_recognition as ir_common
 from ._common import image_recognition as ir_common
@@ -49,7 +49,7 @@ def create_pipeline_app(pipeline: Any, app_config: AppConfig) -> "FastAPI":
     )
     )
     async def _build_index(
     async def _build_index(
         request: schema.BuildIndexRequest,
         request: schema.BuildIndexRequest,
-    ) -> ResultResponse[schema.BuildIndexResult]:
+    ) -> AIStudioResultResponse[schema.BuildIndexResult]:
         pipeline = ctx.pipeline
         pipeline = ctx.pipeline
         aiohttp_session = ctx.aiohttp_session
         aiohttp_session = ctx.aiohttp_session
 
 
@@ -77,7 +77,7 @@ def create_pipeline_app(pipeline: Any, app_config: AppConfig) -> "FastAPI":
         index_data_bytes = index_data.to_bytes()
         index_data_bytes = index_data.to_bytes()
         await serving_utils.call_async(index_storage.set, index_key, index_data_bytes)
         await serving_utils.call_async(index_storage.set, index_key, index_data_bytes)
 
 
-        return ResultResponse[schema.BuildIndexResult](
+        return AIStudioResultResponse[schema.BuildIndexResult](
             logId=serving_utils.generate_log_id(),
             logId=serving_utils.generate_log_id(),
             result=schema.BuildIndexResult(
             result=schema.BuildIndexResult(
                 indexKey=index_key, imageCount=len(index_data.id_map)
                 indexKey=index_key, imageCount=len(index_data.id_map)
@@ -91,7 +91,7 @@ def create_pipeline_app(pipeline: Any, app_config: AppConfig) -> "FastAPI":
     )
     )
     async def _add_images_to_index(
     async def _add_images_to_index(
         request: schema.AddImagesToIndexRequest,
         request: schema.AddImagesToIndexRequest,
-    ) -> ResultResponse[schema.AddImagesToIndexResult]:
+    ) -> AIStudioResultResponse[schema.AddImagesToIndexResult]:
         pipeline = ctx.pipeline
         pipeline = ctx.pipeline
         aiohttp_session = ctx.aiohttp_session
         aiohttp_session = ctx.aiohttp_session
 
 
@@ -119,7 +119,7 @@ def create_pipeline_app(pipeline: Any, app_config: AppConfig) -> "FastAPI":
             index_storage.set, request.indexKey, index_data_bytes
             index_storage.set, request.indexKey, index_data_bytes
         )
         )
 
 
-        return ResultResponse[schema.AddImagesToIndexResult](
+        return AIStudioResultResponse[schema.AddImagesToIndexResult](
             logId=serving_utils.generate_log_id(),
             logId=serving_utils.generate_log_id(),
             result=schema.AddImagesToIndexResult(imageCount=len(index_data.id_map)),
             result=schema.AddImagesToIndexResult(imageCount=len(index_data.id_map)),
         )
         )
@@ -131,7 +131,7 @@ def create_pipeline_app(pipeline: Any, app_config: AppConfig) -> "FastAPI":
     )
     )
     async def _remove_images_from_index(
     async def _remove_images_from_index(
         request: schema.RemoveImagesFromIndexRequest,
         request: schema.RemoveImagesFromIndexRequest,
-    ) -> ResultResponse[schema.RemoveImagesFromIndexResult]:
+    ) -> AIStudioResultResponse[schema.RemoveImagesFromIndexResult]:
         pipeline = ctx.pipeline
         pipeline = ctx.pipeline
 
 
         index_storage = ctx.extra["index_storage"]
         index_storage = ctx.extra["index_storage"]
@@ -149,7 +149,7 @@ def create_pipeline_app(pipeline: Any, app_config: AppConfig) -> "FastAPI":
             index_storage.set, request.indexKey, index_data_bytes
             index_storage.set, request.indexKey, index_data_bytes
         )
         )
 
 
-        return ResultResponse[schema.RemoveImagesFromIndexResult](
+        return AIStudioResultResponse[schema.RemoveImagesFromIndexResult](
             logId=serving_utils.generate_log_id(),
             logId=serving_utils.generate_log_id(),
             result=schema.RemoveImagesFromIndexResult(
             result=schema.RemoveImagesFromIndexResult(
                 imageCount=len(index_data.id_map)
                 imageCount=len(index_data.id_map)
@@ -163,7 +163,7 @@ def create_pipeline_app(pipeline: Any, app_config: AppConfig) -> "FastAPI":
     )
     )
     async def _infer(
     async def _infer(
         request: schema.InferRequest,
         request: schema.InferRequest,
-    ) -> ResultResponse[schema.InferResult]:
+    ) -> AIStudioResultResponse[schema.InferResult]:
         pipeline = ctx.pipeline
         pipeline = ctx.pipeline
         aiohttp_session = ctx.aiohttp_session
         aiohttp_session = ctx.aiohttp_session
 
 
@@ -218,7 +218,7 @@ def create_pipeline_app(pipeline: Any, app_config: AppConfig) -> "FastAPI":
         else:
         else:
             output_image_base64 = None
             output_image_base64 = None
 
 
-        return ResultResponse[schema.InferResult](
+        return AIStudioResultResponse[schema.InferResult](
             logId=serving_utils.generate_log_id(),
             logId=serving_utils.generate_log_id(),
             result=schema.InferResult(faces=objs, image=output_image_base64),
             result=schema.InferResult(faces=objs, image=output_image_base64),
         )
         )

+ 3 - 3
paddlex/inference/serving/basic_serving/_pipeline_apps/formula_recognition.py

@@ -17,7 +17,7 @@ from typing import Any, Dict, List
 from .....utils.deps import function_requires_deps, is_dep_available
 from .....utils.deps import function_requires_deps, is_dep_available
 from ...infra import utils as serving_utils
 from ...infra import utils as serving_utils
 from ...infra.config import AppConfig
 from ...infra.config import AppConfig
-from ...infra.models import ResultResponse
+from ...infra.models import AIStudioResultResponse
 from ...schemas.formula_recognition import INFER_ENDPOINT, InferRequest, InferResult
 from ...schemas.formula_recognition import INFER_ENDPOINT, InferRequest, InferResult
 from .._app import create_app, primary_operation
 from .._app import create_app, primary_operation
 from ._common import common
 from ._common import common
@@ -40,7 +40,7 @@ def create_pipeline_app(pipeline: Any, app_config: AppConfig) -> "FastAPI":
         INFER_ENDPOINT,
         INFER_ENDPOINT,
         "infer",
         "infer",
     )
     )
-    async def _infer(request: InferRequest) -> ResultResponse[InferResult]:
+    async def _infer(request: InferRequest) -> AIStudioResultResponse[InferResult]:
         pipeline = ctx.pipeline
         pipeline = ctx.pipeline
 
 
         log_id = serving_utils.generate_log_id()
         log_id = serving_utils.generate_log_id()
@@ -89,7 +89,7 @@ def create_pipeline_app(pipeline: Any, app_config: AppConfig) -> "FastAPI":
                 )
                 )
             )
             )
 
 
-        return ResultResponse[InferResult](
+        return AIStudioResultResponse[InferResult](
             logId=log_id,
             logId=log_id,
             result=InferResult(
             result=InferResult(
                 formulaRecResults=formula_rec_results,
                 formulaRecResults=formula_rec_results,

+ 3 - 3
paddlex/inference/serving/basic_serving/_pipeline_apps/human_keypoint_detection.py

@@ -17,7 +17,7 @@ from typing import Any, Dict, List
 from .....utils.deps import function_requires_deps, is_dep_available
 from .....utils.deps import function_requires_deps, is_dep_available
 from ...infra import utils as serving_utils
 from ...infra import utils as serving_utils
 from ...infra.config import AppConfig
 from ...infra.config import AppConfig
-from ...infra.models import ResultResponse
+from ...infra.models import AIStudioResultResponse
 from ...schemas.human_keypoint_detection import (
 from ...schemas.human_keypoint_detection import (
     INFER_ENDPOINT,
     INFER_ENDPOINT,
     InferRequest,
     InferRequest,
@@ -40,7 +40,7 @@ def create_pipeline_app(pipeline: Any, app_config: AppConfig) -> "FastAPI":
         INFER_ENDPOINT,
         INFER_ENDPOINT,
         "infer",
         "infer",
     )
     )
-    async def _infer(request: InferRequest) -> ResultResponse[InferResult]:
+    async def _infer(request: InferRequest) -> AIStudioResultResponse[InferResult]:
         pipeline = ctx.pipeline
         pipeline = ctx.pipeline
         aiohttp_session = ctx.aiohttp_session
         aiohttp_session = ctx.aiohttp_session
 
 
@@ -73,7 +73,7 @@ def create_pipeline_app(pipeline: Any, app_config: AppConfig) -> "FastAPI":
         else:
         else:
             output_image_base64 = None
             output_image_base64 = None
 
 
-        return ResultResponse[InferResult](
+        return AIStudioResultResponse[InferResult](
             logId=serving_utils.generate_log_id(),
             logId=serving_utils.generate_log_id(),
             result=InferResult(persons=objs, image=output_image_base64),
             result=InferResult(persons=objs, image=output_image_base64),
         )
         )

+ 3 - 3
paddlex/inference/serving/basic_serving/_pipeline_apps/image_classification.py

@@ -17,7 +17,7 @@ from typing import Any, Dict, List
 from .....utils.deps import function_requires_deps, is_dep_available
 from .....utils.deps import function_requires_deps, is_dep_available
 from ...infra import utils as serving_utils
 from ...infra import utils as serving_utils
 from ...infra.config import AppConfig
 from ...infra.config import AppConfig
-from ...infra.models import ResultResponse
+from ...infra.models import AIStudioResultResponse
 from ...schemas.image_classification import INFER_ENDPOINT, InferRequest, InferResult
 from ...schemas.image_classification import INFER_ENDPOINT, InferRequest, InferResult
 from .._app import create_app, primary_operation
 from .._app import create_app, primary_operation
 
 
@@ -36,7 +36,7 @@ def create_pipeline_app(pipeline: Any, app_config: AppConfig) -> "FastAPI":
         INFER_ENDPOINT,
         INFER_ENDPOINT,
         "infer",
         "infer",
     )
     )
-    async def _infer(request: InferRequest) -> ResultResponse[InferResult]:
+    async def _infer(request: InferRequest) -> AIStudioResultResponse[InferResult]:
         pipeline = ctx.pipeline
         pipeline = ctx.pipeline
         aiohttp_session = ctx.aiohttp_session
         aiohttp_session = ctx.aiohttp_session
 
 
@@ -61,7 +61,7 @@ def create_pipeline_app(pipeline: Any, app_config: AppConfig) -> "FastAPI":
         else:
         else:
             output_image_base64 = None
             output_image_base64 = None
 
 
-        return ResultResponse[InferResult](
+        return AIStudioResultResponse[InferResult](
             logId=serving_utils.generate_log_id(),
             logId=serving_utils.generate_log_id(),
             result=InferResult(categories=categories, image=output_image_base64),
             result=InferResult(categories=categories, image=output_image_base64),
         )
         )

+ 3 - 3
paddlex/inference/serving/basic_serving/_pipeline_apps/image_multilabel_classification.py

@@ -17,7 +17,7 @@ from typing import Any, Dict, List
 from .....utils.deps import function_requires_deps, is_dep_available
 from .....utils.deps import function_requires_deps, is_dep_available
 from ...infra import utils as serving_utils
 from ...infra import utils as serving_utils
 from ...infra.config import AppConfig
 from ...infra.config import AppConfig
-from ...infra.models import ResultResponse
+from ...infra.models import AIStudioResultResponse
 from ...schemas.image_multilabel_classification import (
 from ...schemas.image_multilabel_classification import (
     INFER_ENDPOINT,
     INFER_ENDPOINT,
     InferRequest,
     InferRequest,
@@ -40,7 +40,7 @@ def create_pipeline_app(pipeline: Any, app_config: AppConfig) -> "FastAPI":
         INFER_ENDPOINT,
         INFER_ENDPOINT,
         "infer",
         "infer",
     )
     )
-    async def _infer(request: InferRequest) -> ResultResponse[InferResult]:
+    async def _infer(request: InferRequest) -> AIStudioResultResponse[InferResult]:
         pipeline = ctx.pipeline
         pipeline = ctx.pipeline
         aiohttp_session = ctx.aiohttp_session
         aiohttp_session = ctx.aiohttp_session
 
 
@@ -65,7 +65,7 @@ def create_pipeline_app(pipeline: Any, app_config: AppConfig) -> "FastAPI":
         else:
         else:
             output_image_base64 = None
             output_image_base64 = None
 
 
-        return ResultResponse[InferResult](
+        return AIStudioResultResponse[InferResult](
             logId=serving_utils.generate_log_id(),
             logId=serving_utils.generate_log_id(),
             result=InferResult(categories=categories, image=output_image_base64),
             result=InferResult(categories=categories, image=output_image_base64),
         )
         )

+ 3 - 3
paddlex/inference/serving/basic_serving/_pipeline_apps/instance_segmentation.py

@@ -19,7 +19,7 @@ import numpy as np
 from .....utils.deps import function_requires_deps, is_dep_available
 from .....utils.deps import function_requires_deps, is_dep_available
 from ...infra import utils as serving_utils
 from ...infra import utils as serving_utils
 from ...infra.config import AppConfig
 from ...infra.config import AppConfig
-from ...infra.models import ResultResponse
+from ...infra.models import AIStudioResultResponse
 from ...schemas.instance_segmentation import INFER_ENDPOINT, InferRequest, InferResult
 from ...schemas.instance_segmentation import INFER_ENDPOINT, InferRequest, InferResult
 from .._app import create_app, primary_operation
 from .._app import create_app, primary_operation
 
 
@@ -48,7 +48,7 @@ def create_pipeline_app(pipeline: Any, app_config: AppConfig) -> "FastAPI":
         INFER_ENDPOINT,
         INFER_ENDPOINT,
         "infer",
         "infer",
     )
     )
-    async def _infer(request: InferRequest) -> ResultResponse[InferResult]:
+    async def _infer(request: InferRequest) -> AIStudioResultResponse[InferResult]:
         pipeline = ctx.pipeline
         pipeline = ctx.pipeline
         aiohttp_session = ctx.aiohttp_session
         aiohttp_session = ctx.aiohttp_session
 
 
@@ -79,7 +79,7 @@ def create_pipeline_app(pipeline: Any, app_config: AppConfig) -> "FastAPI":
         else:
         else:
             output_image_base64 = None
             output_image_base64 = None
 
 
-        return ResultResponse[InferResult](
+        return AIStudioResultResponse[InferResult](
             logId=serving_utils.generate_log_id(),
             logId=serving_utils.generate_log_id(),
             result=InferResult(instances=instances, image=output_image_base64),
             result=InferResult(instances=instances, image=output_image_base64),
         )
         )

+ 3 - 3
paddlex/inference/serving/basic_serving/_pipeline_apps/layout_parsing.py

@@ -17,7 +17,7 @@ from typing import Any, Dict, List
 from .....utils.deps import function_requires_deps, is_dep_available
 from .....utils.deps import function_requires_deps, is_dep_available
 from ...infra import utils as serving_utils
 from ...infra import utils as serving_utils
 from ...infra.config import AppConfig
 from ...infra.config import AppConfig
-from ...infra.models import ResultResponse
+from ...infra.models import AIStudioResultResponse
 from ...schemas.layout_parsing import INFER_ENDPOINT, InferRequest, InferResult
 from ...schemas.layout_parsing import INFER_ENDPOINT, InferRequest, InferResult
 from .._app import create_app, primary_operation
 from .._app import create_app, primary_operation
 from ._common import common
 from ._common import common
@@ -42,7 +42,7 @@ def create_pipeline_app(pipeline: Any, app_config: AppConfig) -> "FastAPI":
     )
     )
     async def _infer(
     async def _infer(
         request: InferRequest,
         request: InferRequest,
-    ) -> ResultResponse[InferResult]:
+    ) -> AIStudioResultResponse[InferResult]:
         pipeline = ctx.pipeline
         pipeline = ctx.pipeline
 
 
         log_id = serving_utils.generate_log_id()
         log_id = serving_utils.generate_log_id()
@@ -107,7 +107,7 @@ def create_pipeline_app(pipeline: Any, app_config: AppConfig) -> "FastAPI":
                 )
                 )
             )
             )
 
 
-        return ResultResponse[InferResult](
+        return AIStudioResultResponse[InferResult](
             logId=log_id,
             logId=log_id,
             result=InferResult(
             result=InferResult(
                 layoutParsingResults=layout_parsing_results,
                 layoutParsingResults=layout_parsing_results,

+ 3 - 3
paddlex/inference/serving/basic_serving/_pipeline_apps/m_3d_bev_detection.py

@@ -18,7 +18,7 @@ from typing import Any, Dict, List
 from .....utils.deps import function_requires_deps, is_dep_available
 from .....utils.deps import function_requires_deps, is_dep_available
 from ...infra import utils as serving_utils
 from ...infra import utils as serving_utils
 from ...infra.config import AppConfig
 from ...infra.config import AppConfig
-from ...infra.models import ResultResponse
+from ...infra.models import AIStudioResultResponse
 from ...schemas.m_3d_bev_detection import INFER_ENDPOINT, InferRequest, InferResult
 from ...schemas.m_3d_bev_detection import INFER_ENDPOINT, InferRequest, InferResult
 from .._app import create_app, primary_operation
 from .._app import create_app, primary_operation
 
 
@@ -37,7 +37,7 @@ def create_pipeline_app(pipeline: Any, app_config: AppConfig) -> "FastAPI":
         INFER_ENDPOINT,
         INFER_ENDPOINT,
         "infer",
         "infer",
     )
     )
-    async def _infer(request: InferRequest) -> ResultResponse[InferResult]:
+    async def _infer(request: InferRequest) -> AIStudioResultResponse[InferResult]:
         pipeline = ctx.pipeline
         pipeline = ctx.pipeline
         aiohttp_session = ctx.aiohttp_session
         aiohttp_session = ctx.aiohttp_session
 
 
@@ -71,7 +71,7 @@ def create_pipeline_app(pipeline: Any, app_config: AppConfig) -> "FastAPI":
                 )
                 )
             )
             )
 
 
-        return ResultResponse[InferResult](
+        return AIStudioResultResponse[InferResult](
             logId=serving_utils.generate_log_id(),
             logId=serving_utils.generate_log_id(),
             result=InferResult(detectedObjects=objects),
             result=InferResult(detectedObjects=objects),
         )
         )

+ 3 - 3
paddlex/inference/serving/basic_serving/_pipeline_apps/multilingual_speech_recognition.py

@@ -18,7 +18,7 @@ from typing import Any, Dict, List
 from .....utils.deps import function_requires_deps, is_dep_available
 from .....utils.deps import function_requires_deps, is_dep_available
 from ...infra import utils as serving_utils
 from ...infra import utils as serving_utils
 from ...infra.config import AppConfig
 from ...infra.config import AppConfig
-from ...infra.models import ResultResponse
+from ...infra.models import AIStudioResultResponse
 from ...schemas.multilingual_speech_recognition import (
 from ...schemas.multilingual_speech_recognition import (
     INFER_ENDPOINT,
     INFER_ENDPOINT,
     InferRequest,
     InferRequest,
@@ -41,7 +41,7 @@ def create_pipeline_app(pipeline: Any, app_config: AppConfig) -> "FastAPI":
         INFER_ENDPOINT,
         INFER_ENDPOINT,
         "infer",
         "infer",
     )
     )
-    async def _infer(request: InferRequest) -> ResultResponse[InferResult]:
+    async def _infer(request: InferRequest) -> AIStudioResultResponse[InferResult]:
         pipeline = ctx.pipeline
         pipeline = ctx.pipeline
         aiohttp_session = ctx.aiohttp_session
         aiohttp_session = ctx.aiohttp_session
 
 
@@ -80,7 +80,7 @@ def create_pipeline_app(pipeline: Any, app_config: AppConfig) -> "FastAPI":
             )
             )
             segments.append(segment)
             segments.append(segment)
 
 
-        return ResultResponse[InferResult](
+        return AIStudioResultResponse[InferResult](
             logId=serving_utils.generate_log_id(),
             logId=serving_utils.generate_log_id(),
             result=InferResult(
             result=InferResult(
                 text=result["result"]["text"],
                 text=result["result"]["text"],

+ 3 - 3
paddlex/inference/serving/basic_serving/_pipeline_apps/object_detection.py

@@ -17,7 +17,7 @@ from typing import Any, Dict, List
 from .....utils.deps import function_requires_deps, is_dep_available
 from .....utils.deps import function_requires_deps, is_dep_available
 from ...infra import utils as serving_utils
 from ...infra import utils as serving_utils
 from ...infra.config import AppConfig
 from ...infra.config import AppConfig
-from ...infra.models import ResultResponse
+from ...infra.models import AIStudioResultResponse
 from ...schemas.object_detection import INFER_ENDPOINT, InferRequest, InferResult
 from ...schemas.object_detection import INFER_ENDPOINT, InferRequest, InferResult
 from .._app import create_app, primary_operation
 from .._app import create_app, primary_operation
 
 
@@ -36,7 +36,7 @@ def create_pipeline_app(pipeline: Any, app_config: AppConfig) -> "FastAPI":
         INFER_ENDPOINT,
         INFER_ENDPOINT,
         "infer",
         "infer",
     )
     )
-    async def _infer(request: InferRequest) -> ResultResponse[InferResult]:
+    async def _infer(request: InferRequest) -> AIStudioResultResponse[InferResult]:
         pipeline = ctx.pipeline
         pipeline = ctx.pipeline
         aiohttp_session = ctx.aiohttp_session
         aiohttp_session = ctx.aiohttp_session
 
 
@@ -69,7 +69,7 @@ def create_pipeline_app(pipeline: Any, app_config: AppConfig) -> "FastAPI":
         else:
         else:
             output_image_base64 = None
             output_image_base64 = None
 
 
-        return ResultResponse[InferResult](
+        return AIStudioResultResponse[InferResult](
             logId=serving_utils.generate_log_id(),
             logId=serving_utils.generate_log_id(),
             result=InferResult(detectedObjects=objects, image=output_image_base64),
             result=InferResult(detectedObjects=objects, image=output_image_base64),
         )
         )

+ 3 - 3
paddlex/inference/serving/basic_serving/_pipeline_apps/ocr.py

@@ -17,7 +17,7 @@ from typing import Any, Dict, List
 from .....utils.deps import function_requires_deps, is_dep_available
 from .....utils.deps import function_requires_deps, is_dep_available
 from ...infra import utils as serving_utils
 from ...infra import utils as serving_utils
 from ...infra.config import AppConfig
 from ...infra.config import AppConfig
-from ...infra.models import ResultResponse
+from ...infra.models import AIStudioResultResponse
 from ...schemas.ocr import INFER_ENDPOINT, InferRequest, InferResult
 from ...schemas.ocr import INFER_ENDPOINT, InferRequest, InferResult
 from .._app import create_app, primary_operation
 from .._app import create_app, primary_operation
 from ._common import common
 from ._common import common
@@ -40,7 +40,7 @@ def create_pipeline_app(pipeline: Any, app_config: AppConfig) -> "FastAPI":
         INFER_ENDPOINT,
         INFER_ENDPOINT,
         "infer",
         "infer",
     )
     )
-    async def _infer(request: InferRequest) -> ResultResponse[InferResult]:
+    async def _infer(request: InferRequest) -> AIStudioResultResponse[InferResult]:
         pipeline = ctx.pipeline
         pipeline = ctx.pipeline
 
 
         log_id = serving_utils.generate_log_id()
         log_id = serving_utils.generate_log_id()
@@ -91,7 +91,7 @@ def create_pipeline_app(pipeline: Any, app_config: AppConfig) -> "FastAPI":
                 )
                 )
             )
             )
 
 
-        return ResultResponse[InferResult](
+        return AIStudioResultResponse[InferResult](
             logId=log_id,
             logId=log_id,
             result=InferResult(
             result=InferResult(
                 ocrResults=ocr_results,
                 ocrResults=ocr_results,

+ 3 - 3
paddlex/inference/serving/basic_serving/_pipeline_apps/open_vocabulary_detection.py

@@ -17,7 +17,7 @@ from typing import Any, Dict, List
 from .....utils.deps import function_requires_deps, is_dep_available
 from .....utils.deps import function_requires_deps, is_dep_available
 from ...infra import utils as serving_utils
 from ...infra import utils as serving_utils
 from ...infra.config import AppConfig
 from ...infra.config import AppConfig
-from ...infra.models import ResultResponse
+from ...infra.models import AIStudioResultResponse
 from ...schemas.open_vocabulary_detection import (
 from ...schemas.open_vocabulary_detection import (
     INFER_ENDPOINT,
     INFER_ENDPOINT,
     InferRequest,
     InferRequest,
@@ -40,7 +40,7 @@ def create_pipeline_app(pipeline: Any, app_config: AppConfig) -> "FastAPI":
         INFER_ENDPOINT,
         INFER_ENDPOINT,
         "infer",
         "infer",
     )
     )
-    async def _infer(request: InferRequest) -> ResultResponse[InferResult]:
+    async def _infer(request: InferRequest) -> AIStudioResultResponse[InferResult]:
         pipeline = ctx.pipeline
         pipeline = ctx.pipeline
         aiohttp_session = ctx.aiohttp_session
         aiohttp_session = ctx.aiohttp_session
 
 
@@ -73,7 +73,7 @@ def create_pipeline_app(pipeline: Any, app_config: AppConfig) -> "FastAPI":
         else:
         else:
             output_image_base64 = None
             output_image_base64 = None
 
 
-        return ResultResponse[InferResult](
+        return AIStudioResultResponse[InferResult](
             logId=serving_utils.generate_log_id(),
             logId=serving_utils.generate_log_id(),
             result=InferResult(detectedObjects=objects, image=output_image_base64),
             result=InferResult(detectedObjects=objects, image=output_image_base64),
         )
         )

+ 3 - 3
paddlex/inference/serving/basic_serving/_pipeline_apps/open_vocabulary_segmentation.py

@@ -19,7 +19,7 @@ import numpy as np
 from .....utils.deps import function_requires_deps, is_dep_available
 from .....utils.deps import function_requires_deps, is_dep_available
 from ...infra import utils as serving_utils
 from ...infra import utils as serving_utils
 from ...infra.config import AppConfig
 from ...infra.config import AppConfig
-from ...infra.models import ResultResponse
+from ...infra.models import AIStudioResultResponse
 from ...schemas.open_vocabulary_segmentation import (
 from ...schemas.open_vocabulary_segmentation import (
     INFER_ENDPOINT,
     INFER_ENDPOINT,
     InferRequest,
     InferRequest,
@@ -52,7 +52,7 @@ def create_pipeline_app(pipeline: Any, app_config: AppConfig) -> "FastAPI":
         INFER_ENDPOINT,
         INFER_ENDPOINT,
         "infer",
         "infer",
     )
     )
-    async def _infer(request: InferRequest) -> ResultResponse[InferResult]:
+    async def _infer(request: InferRequest) -> AIStudioResultResponse[InferResult]:
         pipeline = ctx.pipeline
         pipeline = ctx.pipeline
         aiohttp_session = ctx.aiohttp_session
         aiohttp_session = ctx.aiohttp_session
 
 
@@ -81,7 +81,7 @@ def create_pipeline_app(pipeline: Any, app_config: AppConfig) -> "FastAPI":
         else:
         else:
             output_image_base64 = None
             output_image_base64 = None
 
 
-        return ResultResponse[InferResult](
+        return AIStudioResultResponse[InferResult](
             logId=serving_utils.generate_log_id(),
             logId=serving_utils.generate_log_id(),
             result=InferResult(
             result=InferResult(
                 masks=rle_masks, maskInfos=mask_infos, image=output_image_base64
                 masks=rle_masks, maskInfos=mask_infos, image=output_image_base64

+ 3 - 3
paddlex/inference/serving/basic_serving/_pipeline_apps/pedestrian_attribute_recognition.py

@@ -17,7 +17,7 @@ from typing import Any, Dict, List
 from .....utils.deps import function_requires_deps, is_dep_available
 from .....utils.deps import function_requires_deps, is_dep_available
 from ...infra import utils as serving_utils
 from ...infra import utils as serving_utils
 from ...infra.config import AppConfig
 from ...infra.config import AppConfig
-from ...infra.models import ResultResponse
+from ...infra.models import AIStudioResultResponse
 from ...schemas.pedestrian_attribute_recognition import (
 from ...schemas.pedestrian_attribute_recognition import (
     INFER_ENDPOINT,
     INFER_ENDPOINT,
     InferRequest,
     InferRequest,
@@ -40,7 +40,7 @@ def create_pipeline_app(pipeline: Any, app_config: AppConfig) -> "FastAPI":
         INFER_ENDPOINT,
         INFER_ENDPOINT,
         "infer",
         "infer",
     )
     )
-    async def _infer(request: InferRequest) -> ResultResponse[InferResult]:
+    async def _infer(request: InferRequest) -> AIStudioResultResponse[InferResult]:
         pipeline = ctx.pipeline
         pipeline = ctx.pipeline
         aiohttp_session = ctx.aiohttp_session
         aiohttp_session = ctx.aiohttp_session
 
 
@@ -76,7 +76,7 @@ def create_pipeline_app(pipeline: Any, app_config: AppConfig) -> "FastAPI":
         else:
         else:
             output_image_base64 = None
             output_image_base64 = None
 
 
-        return ResultResponse[InferResult](
+        return AIStudioResultResponse[InferResult](
             logId=serving_utils.generate_log_id(),
             logId=serving_utils.generate_log_id(),
             result=InferResult(pedestrians=objs, image=output_image_base64),
             result=InferResult(pedestrians=objs, image=output_image_base64),
         )
         )

+ 7 - 7
paddlex/inference/serving/basic_serving/_pipeline_apps/pp_chatocrv3_doc.py

@@ -17,7 +17,7 @@ from typing import Any, Dict, List
 from .....utils.deps import function_requires_deps, is_dep_available
 from .....utils.deps import function_requires_deps, is_dep_available
 from ...infra import utils as serving_utils
 from ...infra import utils as serving_utils
 from ...infra.config import AppConfig
 from ...infra.config import AppConfig
-from ...infra.models import ResultResponse
+from ...infra.models import AIStudioResultResponse
 from ...schemas import pp_chatocrv3_doc as schema
 from ...schemas import pp_chatocrv3_doc as schema
 from .._app import create_app, primary_operation
 from .._app import create_app, primary_operation
 from ._common import common
 from ._common import common
@@ -42,7 +42,7 @@ def create_pipeline_app(pipeline: Any, app_config: AppConfig) -> "FastAPI":
     )
     )
     async def _analyze_images(
     async def _analyze_images(
         request: schema.AnalyzeImagesRequest,
         request: schema.AnalyzeImagesRequest,
-    ) -> ResultResponse[schema.AnalyzeImagesResult]:
+    ) -> AIStudioResultResponse[schema.AnalyzeImagesResult]:
         pipeline = ctx.pipeline
         pipeline = ctx.pipeline
 
 
         log_id = serving_utils.generate_log_id()
         log_id = serving_utils.generate_log_id()
@@ -108,7 +108,7 @@ def create_pipeline_app(pipeline: Any, app_config: AppConfig) -> "FastAPI":
             )
             )
             visual_info.append(item["visual_info"])
             visual_info.append(item["visual_info"])
 
 
-        return ResultResponse[schema.AnalyzeImagesResult](
+        return AIStudioResultResponse[schema.AnalyzeImagesResult](
             logId=log_id,
             logId=log_id,
             result=schema.AnalyzeImagesResult(
             result=schema.AnalyzeImagesResult(
                 layoutParsingResults=layout_parsing_results,
                 layoutParsingResults=layout_parsing_results,
@@ -124,7 +124,7 @@ def create_pipeline_app(pipeline: Any, app_config: AppConfig) -> "FastAPI":
     )
     )
     async def _build_vector_store(
     async def _build_vector_store(
         request: schema.BuildVectorStoreRequest,
         request: schema.BuildVectorStoreRequest,
-    ) -> ResultResponse[schema.BuildVectorStoreResult]:
+    ) -> AIStudioResultResponse[schema.BuildVectorStoreResult]:
         pipeline = ctx.pipeline
         pipeline = ctx.pipeline
 
 
         kwargs: Dict[str, Any] = {
         kwargs: Dict[str, Any] = {
@@ -142,7 +142,7 @@ def create_pipeline_app(pipeline: Any, app_config: AppConfig) -> "FastAPI":
             **kwargs,
             **kwargs,
         )
         )
 
 
-        return ResultResponse[schema.BuildVectorStoreResult](
+        return AIStudioResultResponse[schema.BuildVectorStoreResult](
             logId=serving_utils.generate_log_id(),
             logId=serving_utils.generate_log_id(),
             result=schema.BuildVectorStoreResult(vectorInfo=vector_info),
             result=schema.BuildVectorStoreResult(vectorInfo=vector_info),
         )
         )
@@ -154,7 +154,7 @@ def create_pipeline_app(pipeline: Any, app_config: AppConfig) -> "FastAPI":
     )
     )
     async def _chat(
     async def _chat(
         request: schema.ChatRequest,
         request: schema.ChatRequest,
-    ) -> ResultResponse[schema.ChatResult]:
+    ) -> AIStudioResultResponse[schema.ChatResult]:
         pipeline = ctx.pipeline
         pipeline = ctx.pipeline
 
 
         kwargs: Dict[str, Any] = dict(
         kwargs: Dict[str, Any] = dict(
@@ -184,7 +184,7 @@ def create_pipeline_app(pipeline: Any, app_config: AppConfig) -> "FastAPI":
             **kwargs,
             **kwargs,
         )
         )
 
 
-        return ResultResponse[schema.ChatResult](
+        return AIStudioResultResponse[schema.ChatResult](
             logId=serving_utils.generate_log_id(),
             logId=serving_utils.generate_log_id(),
             result=schema.ChatResult(
             result=schema.ChatResult(
                 chatResult=result["chat_res"],
                 chatResult=result["chat_res"],

+ 9 - 9
paddlex/inference/serving/basic_serving/_pipeline_apps/pp_chatocrv4_doc.py

@@ -17,7 +17,7 @@ from typing import Any, Dict, List
 from .....utils.deps import function_requires_deps, is_dep_available
 from .....utils.deps import function_requires_deps, is_dep_available
 from ...infra import utils as serving_utils
 from ...infra import utils as serving_utils
 from ...infra.config import AppConfig
 from ...infra.config import AppConfig
-from ...infra.models import ResultResponse
+from ...infra.models import AIStudioResultResponse
 from ...schemas import pp_chatocrv4_doc as schema
 from ...schemas import pp_chatocrv4_doc as schema
 from .._app import create_app, primary_operation
 from .._app import create_app, primary_operation
 from ._common import common
 from ._common import common
@@ -42,7 +42,7 @@ def create_pipeline_app(pipeline: Any, app_config: AppConfig) -> "FastAPI":
     )
     )
     async def _analyze_images(
     async def _analyze_images(
         request: schema.AnalyzeImagesRequest,
         request: schema.AnalyzeImagesRequest,
-    ) -> ResultResponse[schema.AnalyzeImagesResult]:
+    ) -> AIStudioResultResponse[schema.AnalyzeImagesResult]:
         pipeline = ctx.pipeline
         pipeline = ctx.pipeline
 
 
         log_id = serving_utils.generate_log_id()
         log_id = serving_utils.generate_log_id()
@@ -108,7 +108,7 @@ def create_pipeline_app(pipeline: Any, app_config: AppConfig) -> "FastAPI":
             )
             )
             visual_info.append(item["visual_info"])
             visual_info.append(item["visual_info"])
 
 
-        return ResultResponse[schema.AnalyzeImagesResult](
+        return AIStudioResultResponse[schema.AnalyzeImagesResult](
             logId=log_id,
             logId=log_id,
             result=schema.AnalyzeImagesResult(
             result=schema.AnalyzeImagesResult(
                 layoutParsingResults=layout_parsing_results,
                 layoutParsingResults=layout_parsing_results,
@@ -124,7 +124,7 @@ def create_pipeline_app(pipeline: Any, app_config: AppConfig) -> "FastAPI":
     )
     )
     async def _build_vector_store(
     async def _build_vector_store(
         request: schema.BuildVectorStoreRequest,
         request: schema.BuildVectorStoreRequest,
-    ) -> ResultResponse[schema.BuildVectorStoreResult]:
+    ) -> AIStudioResultResponse[schema.BuildVectorStoreResult]:
         pipeline = ctx.pipeline
         pipeline = ctx.pipeline
 
 
         kwargs: Dict[str, Any] = {
         kwargs: Dict[str, Any] = {
@@ -142,7 +142,7 @@ def create_pipeline_app(pipeline: Any, app_config: AppConfig) -> "FastAPI":
             **kwargs,
             **kwargs,
         )
         )
 
 
-        return ResultResponse[schema.BuildVectorStoreResult](
+        return AIStudioResultResponse[schema.BuildVectorStoreResult](
             logId=serving_utils.generate_log_id(),
             logId=serving_utils.generate_log_id(),
             result=schema.BuildVectorStoreResult(vectorInfo=vector_info),
             result=schema.BuildVectorStoreResult(vectorInfo=vector_info),
         )
         )
@@ -154,7 +154,7 @@ def create_pipeline_app(pipeline: Any, app_config: AppConfig) -> "FastAPI":
     )
     )
     async def _invoke_mllm(
     async def _invoke_mllm(
         request: schema.InvokeMLLMRequest,
         request: schema.InvokeMLLMRequest,
-    ) -> ResultResponse[schema.InvokeMLLMResult]:
+    ) -> AIStudioResultResponse[schema.InvokeMLLMResult]:
         pipeline = ctx.pipeline
         pipeline = ctx.pipeline
         aiohttp_session = ctx.aiohttp_session
         aiohttp_session = ctx.aiohttp_session
 
 
@@ -170,7 +170,7 @@ def create_pipeline_app(pipeline: Any, app_config: AppConfig) -> "FastAPI":
             mllm_chat_bot_config=request.mllmChatBotConfig,
             mllm_chat_bot_config=request.mllmChatBotConfig,
         )
         )
 
 
-        return ResultResponse[schema.InvokeMLLMResult](
+        return AIStudioResultResponse[schema.InvokeMLLMResult](
             logId=serving_utils.generate_log_id(),
             logId=serving_utils.generate_log_id(),
             result=schema.InvokeMLLMResult(mllmPredictInfo=mllm_predict_info),
             result=schema.InvokeMLLMResult(mllmPredictInfo=mllm_predict_info),
         )
         )
@@ -182,7 +182,7 @@ def create_pipeline_app(pipeline: Any, app_config: AppConfig) -> "FastAPI":
     )
     )
     async def _chat(
     async def _chat(
         request: schema.ChatRequest,
         request: schema.ChatRequest,
-    ) -> ResultResponse[schema.ChatResult]:
+    ) -> AIStudioResultResponse[schema.ChatResult]:
         pipeline = ctx.pipeline
         pipeline = ctx.pipeline
 
 
         kwargs: Dict[str, Any] = dict(
         kwargs: Dict[str, Any] = dict(
@@ -214,7 +214,7 @@ def create_pipeline_app(pipeline: Any, app_config: AppConfig) -> "FastAPI":
             **kwargs,
             **kwargs,
         )
         )
 
 
-        return ResultResponse[schema.ChatResult](
+        return AIStudioResultResponse[schema.ChatResult](
             logId=serving_utils.generate_log_id(),
             logId=serving_utils.generate_log_id(),
             result=schema.ChatResult(
             result=schema.ChatResult(
                 chatResult=result["chat_res"],
                 chatResult=result["chat_res"],

+ 9 - 9
paddlex/inference/serving/basic_serving/_pipeline_apps/pp_shituv2.py

@@ -20,7 +20,7 @@ from .....utils.deps import function_requires_deps, is_dep_available
 from ....pipelines.components import IndexData
 from ....pipelines.components import IndexData
 from ...infra import utils as serving_utils
 from ...infra import utils as serving_utils
 from ...infra.config import AppConfig
 from ...infra.config import AppConfig
-from ...infra.models import ResultResponse
+from ...infra.models import AIStudioResultResponse
 from ...schemas import pp_shituv2 as schema
 from ...schemas import pp_shituv2 as schema
 from .._app import create_app, primary_operation
 from .._app import create_app, primary_operation
 from ._common import image_recognition as ir_common
 from ._common import image_recognition as ir_common
@@ -44,7 +44,7 @@ def create_pipeline_app(pipeline: Any, app_config: AppConfig) -> "FastAPI":
     )
     )
     async def _build_index(
     async def _build_index(
         request: schema.BuildIndexRequest,
         request: schema.BuildIndexRequest,
-    ) -> ResultResponse[schema.BuildIndexResult]:
+    ) -> AIStudioResultResponse[schema.BuildIndexResult]:
         pipeline = ctx.pipeline
         pipeline = ctx.pipeline
         aiohttp_session = ctx.aiohttp_session
         aiohttp_session = ctx.aiohttp_session
 
 
@@ -72,7 +72,7 @@ def create_pipeline_app(pipeline: Any, app_config: AppConfig) -> "FastAPI":
         index_data_bytes = index_data.to_bytes()
         index_data_bytes = index_data.to_bytes()
         await serving_utils.call_async(index_storage.set, index_key, index_data_bytes)
         await serving_utils.call_async(index_storage.set, index_key, index_data_bytes)
 
 
-        return ResultResponse[schema.BuildIndexResult](
+        return AIStudioResultResponse[schema.BuildIndexResult](
             logId=serving_utils.generate_log_id(),
             logId=serving_utils.generate_log_id(),
             result=schema.BuildIndexResult(
             result=schema.BuildIndexResult(
                 indexKey=index_key, imageCount=len(index_data.id_map)
                 indexKey=index_key, imageCount=len(index_data.id_map)
@@ -86,7 +86,7 @@ def create_pipeline_app(pipeline: Any, app_config: AppConfig) -> "FastAPI":
     )
     )
     async def _add_images_to_index(
     async def _add_images_to_index(
         request: schema.AddImagesToIndexRequest,
         request: schema.AddImagesToIndexRequest,
-    ) -> ResultResponse[schema.AddImagesToIndexResult]:
+    ) -> AIStudioResultResponse[schema.AddImagesToIndexResult]:
         pipeline = ctx.pipeline
         pipeline = ctx.pipeline
         aiohttp_session = ctx.aiohttp_session
         aiohttp_session = ctx.aiohttp_session
 
 
@@ -114,7 +114,7 @@ def create_pipeline_app(pipeline: Any, app_config: AppConfig) -> "FastAPI":
             index_storage.set, request.indexKey, index_data_bytes
             index_storage.set, request.indexKey, index_data_bytes
         )
         )
 
 
-        return ResultResponse[schema.AddImagesToIndexResult](
+        return AIStudioResultResponse[schema.AddImagesToIndexResult](
             logId=serving_utils.generate_log_id(),
             logId=serving_utils.generate_log_id(),
             result=schema.AddImagesToIndexResult(imageCount=len(index_data.id_map)),
             result=schema.AddImagesToIndexResult(imageCount=len(index_data.id_map)),
         )
         )
@@ -126,7 +126,7 @@ def create_pipeline_app(pipeline: Any, app_config: AppConfig) -> "FastAPI":
     )
     )
     async def _remove_images_from_index(
     async def _remove_images_from_index(
         request: schema.RemoveImagesFromIndexRequest,
         request: schema.RemoveImagesFromIndexRequest,
-    ) -> ResultResponse[schema.RemoveImagesFromIndexResult]:
+    ) -> AIStudioResultResponse[schema.RemoveImagesFromIndexResult]:
         pipeline = ctx.pipeline
         pipeline = ctx.pipeline
 
 
         index_storage = ctx.extra["index_storage"]
         index_storage = ctx.extra["index_storage"]
@@ -144,7 +144,7 @@ def create_pipeline_app(pipeline: Any, app_config: AppConfig) -> "FastAPI":
             index_storage.set, request.indexKey, index_data_bytes
             index_storage.set, request.indexKey, index_data_bytes
         )
         )
 
 
-        return ResultResponse[schema.RemoveImagesFromIndexResult](
+        return AIStudioResultResponse[schema.RemoveImagesFromIndexResult](
             logId=serving_utils.generate_log_id(),
             logId=serving_utils.generate_log_id(),
             result=schema.RemoveImagesFromIndexResult(
             result=schema.RemoveImagesFromIndexResult(
                 imageCount=len(index_data.id_map)
                 imageCount=len(index_data.id_map)
@@ -158,7 +158,7 @@ def create_pipeline_app(pipeline: Any, app_config: AppConfig) -> "FastAPI":
     )
     )
     async def _infer(
     async def _infer(
         request: schema.InferRequest,
         request: schema.InferRequest,
-    ) -> ResultResponse[schema.InferResult]:
+    ) -> AIStudioResultResponse[schema.InferResult]:
         pipeline = ctx.pipeline
         pipeline = ctx.pipeline
         aiohttp_session = ctx.aiohttp_session
         aiohttp_session = ctx.aiohttp_session
 
 
@@ -213,7 +213,7 @@ def create_pipeline_app(pipeline: Any, app_config: AppConfig) -> "FastAPI":
         else:
         else:
             output_image_base64 = None
             output_image_base64 = None
 
 
-        return ResultResponse[schema.InferResult](
+        return AIStudioResultResponse[schema.InferResult](
             logId=serving_utils.generate_log_id(),
             logId=serving_utils.generate_log_id(),
             result=schema.InferResult(detectedObjects=objs, image=output_image_base64),
             result=schema.InferResult(detectedObjects=objs, image=output_image_base64),
         )
         )

+ 3 - 3
paddlex/inference/serving/basic_serving/_pipeline_apps/pp_structurev3.py

@@ -17,7 +17,7 @@ from typing import Any, Dict, List
 from .....utils.deps import function_requires_deps, is_dep_available
 from .....utils.deps import function_requires_deps, is_dep_available
 from ...infra import utils as serving_utils
 from ...infra import utils as serving_utils
 from ...infra.config import AppConfig
 from ...infra.config import AppConfig
-from ...infra.models import ResultResponse
+from ...infra.models import AIStudioResultResponse
 from ...schemas.pp_structurev3 import INFER_ENDPOINT, InferRequest, InferResult
 from ...schemas.pp_structurev3 import INFER_ENDPOINT, InferRequest, InferResult
 from .._app import create_app, primary_operation
 from .._app import create_app, primary_operation
 from ._common import common
 from ._common import common
@@ -42,7 +42,7 @@ def create_pipeline_app(pipeline: Any, app_config: AppConfig) -> "FastAPI":
     )
     )
     async def _infer(
     async def _infer(
         request: InferRequest,
         request: InferRequest,
-    ) -> ResultResponse[InferResult]:
+    ) -> AIStudioResultResponse[InferResult]:
         pipeline = ctx.pipeline
         pipeline = ctx.pipeline
 
 
         log_id = serving_utils.generate_log_id()
         log_id = serving_utils.generate_log_id()
@@ -128,7 +128,7 @@ def create_pipeline_app(pipeline: Any, app_config: AppConfig) -> "FastAPI":
                 )
                 )
             )
             )
 
 
-        return ResultResponse[InferResult](
+        return AIStudioResultResponse[InferResult](
             logId=log_id,
             logId=log_id,
             result=InferResult(
             result=InferResult(
                 layoutParsingResults=layout_parsing_results,
                 layoutParsingResults=layout_parsing_results,

+ 3 - 3
paddlex/inference/serving/basic_serving/_pipeline_apps/rotated_object_detection.py

@@ -17,7 +17,7 @@ from typing import Any, Dict, List
 from .....utils.deps import function_requires_deps, is_dep_available
 from .....utils.deps import function_requires_deps, is_dep_available
 from ...infra import utils as serving_utils
 from ...infra import utils as serving_utils
 from ...infra.config import AppConfig
 from ...infra.config import AppConfig
-from ...infra.models import ResultResponse
+from ...infra.models import AIStudioResultResponse
 from ...schemas.rotated_object_detection import (
 from ...schemas.rotated_object_detection import (
     INFER_ENDPOINT,
     INFER_ENDPOINT,
     InferRequest,
     InferRequest,
@@ -40,7 +40,7 @@ def create_pipeline_app(pipeline: Any, app_config: AppConfig) -> "FastAPI":
         INFER_ENDPOINT,
         INFER_ENDPOINT,
         "infer",
         "infer",
     )
     )
-    async def _infer(request: InferRequest) -> ResultResponse[InferResult]:
+    async def _infer(request: InferRequest) -> AIStudioResultResponse[InferResult]:
         pipeline = ctx.pipeline
         pipeline = ctx.pipeline
         aiohttp_session = ctx.aiohttp_session
         aiohttp_session = ctx.aiohttp_session
 
 
@@ -73,7 +73,7 @@ def create_pipeline_app(pipeline: Any, app_config: AppConfig) -> "FastAPI":
         else:
         else:
             output_image_base64 = None
             output_image_base64 = None
 
 
-        return ResultResponse[InferResult](
+        return AIStudioResultResponse[InferResult](
             logId=serving_utils.generate_log_id(),
             logId=serving_utils.generate_log_id(),
             result=InferResult(detectedObjects=objects, image=output_image_base64),
             result=InferResult(detectedObjects=objects, image=output_image_base64),
         )
         )

+ 3 - 3
paddlex/inference/serving/basic_serving/_pipeline_apps/seal_recognition.py

@@ -17,7 +17,7 @@ from typing import Any, Dict, List
 from .....utils.deps import function_requires_deps, is_dep_available
 from .....utils.deps import function_requires_deps, is_dep_available
 from ...infra import utils as serving_utils
 from ...infra import utils as serving_utils
 from ...infra.config import AppConfig
 from ...infra.config import AppConfig
-from ...infra.models import ResultResponse
+from ...infra.models import AIStudioResultResponse
 from ...schemas.seal_recognition import INFER_ENDPOINT, InferRequest, InferResult
 from ...schemas.seal_recognition import INFER_ENDPOINT, InferRequest, InferResult
 from .._app import create_app, primary_operation
 from .._app import create_app, primary_operation
 from ._common import common
 from ._common import common
@@ -40,7 +40,7 @@ def create_pipeline_app(pipeline: Any, app_config: AppConfig) -> "FastAPI":
         INFER_ENDPOINT,
         INFER_ENDPOINT,
         "infer",
         "infer",
     )
     )
-    async def _infer(request: InferRequest) -> ResultResponse[InferResult]:
+    async def _infer(request: InferRequest) -> AIStudioResultResponse[InferResult]:
         pipeline = ctx.pipeline
         pipeline = ctx.pipeline
 
 
         log_id = serving_utils.generate_log_id()
         log_id = serving_utils.generate_log_id()
@@ -95,7 +95,7 @@ def create_pipeline_app(pipeline: Any, app_config: AppConfig) -> "FastAPI":
                 )
                 )
             )
             )
 
 
-        return ResultResponse[InferResult](
+        return AIStudioResultResponse[InferResult](
             logId=log_id,
             logId=log_id,
             result=InferResult(
             result=InferResult(
                 sealRecResults=seal_rec_results,
                 sealRecResults=seal_rec_results,

+ 3 - 3
paddlex/inference/serving/basic_serving/_pipeline_apps/semantic_segmentation.py

@@ -17,7 +17,7 @@ from typing import Any
 from .....utils.deps import function_requires_deps, is_dep_available
 from .....utils.deps import function_requires_deps, is_dep_available
 from ...infra import utils as serving_utils
 from ...infra import utils as serving_utils
 from ...infra.config import AppConfig
 from ...infra.config import AppConfig
-from ...infra.models import ResultResponse
+from ...infra.models import AIStudioResultResponse
 from ...schemas.semantic_segmentation import INFER_ENDPOINT, InferRequest, InferResult
 from ...schemas.semantic_segmentation import INFER_ENDPOINT, InferRequest, InferResult
 from .._app import create_app, primary_operation
 from .._app import create_app, primary_operation
 
 
@@ -36,7 +36,7 @@ def create_pipeline_app(pipeline: Any, app_config: AppConfig) -> "FastAPI":
         INFER_ENDPOINT,
         INFER_ENDPOINT,
         "infer",
         "infer",
     )
     )
-    async def _infer(request: InferRequest) -> ResultResponse[InferResult]:
+    async def _infer(request: InferRequest) -> AIStudioResultResponse[InferResult]:
         pipeline = ctx.pipeline
         pipeline = ctx.pipeline
         aiohttp_session = ctx.aiohttp_session
         aiohttp_session = ctx.aiohttp_session
 
 
@@ -57,7 +57,7 @@ def create_pipeline_app(pipeline: Any, app_config: AppConfig) -> "FastAPI":
         else:
         else:
             output_image_base64 = None
             output_image_base64 = None
 
 
-        return ResultResponse[InferResult](
+        return AIStudioResultResponse[InferResult](
             logId=serving_utils.generate_log_id(),
             logId=serving_utils.generate_log_id(),
             result=InferResult(
             result=InferResult(
                 labelMap=label_map, size=size, image=output_image_base64
                 labelMap=label_map, size=size, image=output_image_base64

+ 3 - 3
paddlex/inference/serving/basic_serving/_pipeline_apps/small_object_detection.py

@@ -17,7 +17,7 @@ from typing import Any, Dict, List
 from .....utils.deps import function_requires_deps, is_dep_available
 from .....utils.deps import function_requires_deps, is_dep_available
 from ...infra import utils as serving_utils
 from ...infra import utils as serving_utils
 from ...infra.config import AppConfig
 from ...infra.config import AppConfig
-from ...infra.models import ResultResponse
+from ...infra.models import AIStudioResultResponse
 from ...schemas.small_object_detection import INFER_ENDPOINT, InferRequest, InferResult
 from ...schemas.small_object_detection import INFER_ENDPOINT, InferRequest, InferResult
 from .._app import create_app, primary_operation
 from .._app import create_app, primary_operation
 
 
@@ -36,7 +36,7 @@ def create_pipeline_app(pipeline: Any, app_config: AppConfig) -> "FastAPI":
         INFER_ENDPOINT,
         INFER_ENDPOINT,
         "infer",
         "infer",
     )
     )
-    async def _infer(request: InferRequest) -> ResultResponse[InferResult]:
+    async def _infer(request: InferRequest) -> AIStudioResultResponse[InferResult]:
         pipeline = ctx.pipeline
         pipeline = ctx.pipeline
         aiohttp_session = ctx.aiohttp_session
         aiohttp_session = ctx.aiohttp_session
 
 
@@ -64,7 +64,7 @@ def create_pipeline_app(pipeline: Any, app_config: AppConfig) -> "FastAPI":
         else:
         else:
             output_image_base64 = None
             output_image_base64 = None
 
 
-        return ResultResponse[InferResult](
+        return AIStudioResultResponse[InferResult](
             logId=serving_utils.generate_log_id(),
             logId=serving_utils.generate_log_id(),
             result=InferResult(detectedObjects=objects, image=output_image_base64),
             result=InferResult(detectedObjects=objects, image=output_image_base64),
         )
         )

+ 3 - 3
paddlex/inference/serving/basic_serving/_pipeline_apps/table_recognition.py

@@ -17,7 +17,7 @@ from typing import Any, Dict, List
 from .....utils.deps import function_requires_deps, is_dep_available
 from .....utils.deps import function_requires_deps, is_dep_available
 from ...infra import utils as serving_utils
 from ...infra import utils as serving_utils
 from ...infra.config import AppConfig
 from ...infra.config import AppConfig
-from ...infra.models import ResultResponse
+from ...infra.models import AIStudioResultResponse
 from ...schemas.table_recognition import INFER_ENDPOINT, InferRequest, InferResult
 from ...schemas.table_recognition import INFER_ENDPOINT, InferRequest, InferResult
 from .._app import create_app, primary_operation
 from .._app import create_app, primary_operation
 from ._common import common
 from ._common import common
@@ -40,7 +40,7 @@ def create_pipeline_app(pipeline: Any, app_config: AppConfig) -> "FastAPI":
         INFER_ENDPOINT,
         INFER_ENDPOINT,
         "infer",
         "infer",
     )
     )
-    async def _infer(request: InferRequest) -> ResultResponse[InferResult]:
+    async def _infer(request: InferRequest) -> AIStudioResultResponse[InferResult]:
         pipeline = ctx.pipeline
         pipeline = ctx.pipeline
 
 
         log_id = serving_utils.generate_log_id()
         log_id = serving_utils.generate_log_id()
@@ -97,7 +97,7 @@ def create_pipeline_app(pipeline: Any, app_config: AppConfig) -> "FastAPI":
                 )
                 )
             )
             )
 
 
-        return ResultResponse[InferResult](
+        return AIStudioResultResponse[InferResult](
             logId=log_id,
             logId=log_id,
             result=InferResult(
             result=InferResult(
                 tableRecResults=table_rec_results,
                 tableRecResults=table_rec_results,

+ 3 - 3
paddlex/inference/serving/basic_serving/_pipeline_apps/table_recognition_v2.py

@@ -17,7 +17,7 @@ from typing import Any, Dict, List
 from .....utils.deps import function_requires_deps, is_dep_available
 from .....utils.deps import function_requires_deps, is_dep_available
 from ...infra import utils as serving_utils
 from ...infra import utils as serving_utils
 from ...infra.config import AppConfig
 from ...infra.config import AppConfig
-from ...infra.models import ResultResponse
+from ...infra.models import AIStudioResultResponse
 from ...schemas.table_recognition_v2 import INFER_ENDPOINT, InferRequest, InferResult
 from ...schemas.table_recognition_v2 import INFER_ENDPOINT, InferRequest, InferResult
 from .._app import create_app, primary_operation
 from .._app import create_app, primary_operation
 from ._common import common
 from ._common import common
@@ -40,7 +40,7 @@ def create_pipeline_app(pipeline: Any, app_config: AppConfig) -> "FastAPI":
         INFER_ENDPOINT,
         INFER_ENDPOINT,
         "infer",
         "infer",
     )
     )
-    async def _infer(request: InferRequest) -> ResultResponse[InferResult]:
+    async def _infer(request: InferRequest) -> AIStudioResultResponse[InferResult]:
         pipeline = ctx.pipeline
         pipeline = ctx.pipeline
 
 
         log_id = serving_utils.generate_log_id()
         log_id = serving_utils.generate_log_id()
@@ -99,7 +99,7 @@ def create_pipeline_app(pipeline: Any, app_config: AppConfig) -> "FastAPI":
                 )
                 )
             )
             )
 
 
-        return ResultResponse[InferResult](
+        return AIStudioResultResponse[InferResult](
             logId=log_id,
             logId=log_id,
             result=InferResult(
             result=InferResult(
                 tableRecResults=table_rec_results,
                 tableRecResults=table_rec_results,

+ 3 - 3
paddlex/inference/serving/basic_serving/_pipeline_apps/ts_anomaly_detection.py

@@ -17,7 +17,7 @@ from typing import Any
 from .....utils.deps import function_requires_deps, is_dep_available
 from .....utils.deps import function_requires_deps, is_dep_available
 from ...infra import utils as serving_utils
 from ...infra import utils as serving_utils
 from ...infra.config import AppConfig
 from ...infra.config import AppConfig
-from ...infra.models import ResultResponse
+from ...infra.models import AIStudioResultResponse
 from ...schemas.ts_anomaly_detection import INFER_ENDPOINT, InferRequest, InferResult
 from ...schemas.ts_anomaly_detection import INFER_ENDPOINT, InferRequest, InferResult
 from .._app import create_app, primary_operation
 from .._app import create_app, primary_operation
 
 
@@ -36,7 +36,7 @@ def create_pipeline_app(pipeline: Any, app_config: AppConfig) -> "FastAPI":
         INFER_ENDPOINT,
         INFER_ENDPOINT,
         "infer",
         "infer",
     )
     )
-    async def _infer(request: InferRequest) -> ResultResponse[InferResult]:
+    async def _infer(request: InferRequest) -> AIStudioResultResponse[InferResult]:
         pipeline = ctx.pipeline
         pipeline = ctx.pipeline
         aiohttp_session = ctx.aiohttp_session
         aiohttp_session = ctx.aiohttp_session
 
 
@@ -57,7 +57,7 @@ def create_pipeline_app(pipeline: Any, app_config: AppConfig) -> "FastAPI":
         else:
         else:
             output_image = None
             output_image = None
 
 
-        return ResultResponse[InferResult](
+        return AIStudioResultResponse[InferResult](
             logId=serving_utils.generate_log_id(),
             logId=serving_utils.generate_log_id(),
             result=InferResult(csv=output_csv, image=output_image),
             result=InferResult(csv=output_csv, image=output_image),
         )
         )

+ 3 - 3
paddlex/inference/serving/basic_serving/_pipeline_apps/ts_classification.py

@@ -17,7 +17,7 @@ from typing import Any
 from .....utils.deps import function_requires_deps, is_dep_available
 from .....utils.deps import function_requires_deps, is_dep_available
 from ...infra import utils as serving_utils
 from ...infra import utils as serving_utils
 from ...infra.config import AppConfig
 from ...infra.config import AppConfig
-from ...infra.models import ResultResponse
+from ...infra.models import AIStudioResultResponse
 from ...schemas.ts_classification import INFER_ENDPOINT, InferRequest, InferResult
 from ...schemas.ts_classification import INFER_ENDPOINT, InferRequest, InferResult
 from .._app import create_app, primary_operation
 from .._app import create_app, primary_operation
 
 
@@ -36,7 +36,7 @@ def create_pipeline_app(pipeline: Any, app_config: AppConfig) -> "FastAPI":
         INFER_ENDPOINT,
         INFER_ENDPOINT,
         "infer",
         "infer",
     )
     )
-    async def _infer(request: InferRequest) -> ResultResponse[InferResult]:
+    async def _infer(request: InferRequest) -> AIStudioResultResponse[InferResult]:
         pipeline = ctx.pipeline
         pipeline = ctx.pipeline
         aiohttp_session = ctx.aiohttp_session
         aiohttp_session = ctx.aiohttp_session
 
 
@@ -56,7 +56,7 @@ def create_pipeline_app(pipeline: Any, app_config: AppConfig) -> "FastAPI":
         else:
         else:
             output_image = None
             output_image = None
 
 
-        return ResultResponse[InferResult](
+        return AIStudioResultResponse[InferResult](
             logId=serving_utils.generate_log_id(),
             logId=serving_utils.generate_log_id(),
             result=InferResult(label=label, score=score, image=output_image),
             result=InferResult(label=label, score=score, image=output_image),
         )
         )

+ 3 - 3
paddlex/inference/serving/basic_serving/_pipeline_apps/ts_forecast.py

@@ -17,7 +17,7 @@ from typing import Any
 from .....utils.deps import function_requires_deps, is_dep_available
 from .....utils.deps import function_requires_deps, is_dep_available
 from ...infra import utils as serving_utils
 from ...infra import utils as serving_utils
 from ...infra.config import AppConfig
 from ...infra.config import AppConfig
-from ...infra.models import ResultResponse
+from ...infra.models import AIStudioResultResponse
 from ...schemas.ts_forecast import INFER_ENDPOINT, InferRequest, InferResult
 from ...schemas.ts_forecast import INFER_ENDPOINT, InferRequest, InferResult
 from .._app import create_app, primary_operation
 from .._app import create_app, primary_operation
 
 
@@ -36,7 +36,7 @@ def create_pipeline_app(pipeline: Any, app_config: AppConfig) -> "FastAPI":
         INFER_ENDPOINT,
         INFER_ENDPOINT,
         "infer",
         "infer",
     )
     )
-    async def _infer(request: InferRequest) -> ResultResponse[InferResult]:
+    async def _infer(request: InferRequest) -> AIStudioResultResponse[InferResult]:
         pipeline = ctx.pipeline
         pipeline = ctx.pipeline
         aiohttp_session = ctx.aiohttp_session
         aiohttp_session = ctx.aiohttp_session
 
 
@@ -57,7 +57,7 @@ def create_pipeline_app(pipeline: Any, app_config: AppConfig) -> "FastAPI":
         else:
         else:
             output_image = None
             output_image = None
 
 
-        return ResultResponse[InferResult](
+        return AIStudioResultResponse[InferResult](
             logId=serving_utils.generate_log_id(),
             logId=serving_utils.generate_log_id(),
             result=InferResult(csv=output_csv, image=output_image),
             result=InferResult(csv=output_csv, image=output_image),
         )
         )

+ 3 - 3
paddlex/inference/serving/basic_serving/_pipeline_apps/vehicle_attribute_recognition.py

@@ -17,7 +17,7 @@ from typing import Any, Dict, List
 from .....utils.deps import function_requires_deps, is_dep_available
 from .....utils.deps import function_requires_deps, is_dep_available
 from ...infra import utils as serving_utils
 from ...infra import utils as serving_utils
 from ...infra.config import AppConfig
 from ...infra.config import AppConfig
-from ...infra.models import ResultResponse
+from ...infra.models import AIStudioResultResponse
 from ...schemas.vehicle_attribute_recognition import (
 from ...schemas.vehicle_attribute_recognition import (
     INFER_ENDPOINT,
     INFER_ENDPOINT,
     InferRequest,
     InferRequest,
@@ -40,7 +40,7 @@ def create_pipeline_app(pipeline: Any, app_config: AppConfig) -> "FastAPI":
         INFER_ENDPOINT,
         INFER_ENDPOINT,
         "infer",
         "infer",
     )
     )
-    async def _infer(request: InferRequest) -> ResultResponse[InferResult]:
+    async def _infer(request: InferRequest) -> AIStudioResultResponse[InferResult]:
         pipeline = ctx.pipeline
         pipeline = ctx.pipeline
         aiohttp_session = ctx.aiohttp_session
         aiohttp_session = ctx.aiohttp_session
 
 
@@ -76,7 +76,7 @@ def create_pipeline_app(pipeline: Any, app_config: AppConfig) -> "FastAPI":
         else:
         else:
             output_image_base64 = None
             output_image_base64 = None
 
 
-        return ResultResponse[InferResult](
+        return AIStudioResultResponse[InferResult](
             logId=serving_utils.generate_log_id(),
             logId=serving_utils.generate_log_id(),
             result=InferResult(vehicles=objs, image=output_image_base64),
             result=InferResult(vehicles=objs, image=output_image_base64),
         )
         )

+ 3 - 3
paddlex/inference/serving/basic_serving/_pipeline_apps/video_classification.py

@@ -18,7 +18,7 @@ from typing import Any, Dict, List
 from .....utils.deps import function_requires_deps, is_dep_available
 from .....utils.deps import function_requires_deps, is_dep_available
 from ...infra import utils as serving_utils
 from ...infra import utils as serving_utils
 from ...infra.config import AppConfig
 from ...infra.config import AppConfig
-from ...infra.models import ResultResponse
+from ...infra.models import AIStudioResultResponse
 from ...schemas.video_classification import INFER_ENDPOINT, InferRequest, InferResult
 from ...schemas.video_classification import INFER_ENDPOINT, InferRequest, InferResult
 from .._app import create_app, primary_operation
 from .._app import create_app, primary_operation
 
 
@@ -37,7 +37,7 @@ def create_pipeline_app(pipeline: Any, app_config: AppConfig) -> "FastAPI":
         INFER_ENDPOINT,
         INFER_ENDPOINT,
         "infer",
         "infer",
     )
     )
-    async def _infer(request: InferRequest) -> ResultResponse[InferResult]:
+    async def _infer(request: InferRequest) -> AIStudioResultResponse[InferResult]:
         pipeline = ctx.pipeline
         pipeline = ctx.pipeline
         aiohttp_session = ctx.aiohttp_session
         aiohttp_session = ctx.aiohttp_session
 
 
@@ -68,7 +68,7 @@ def create_pipeline_app(pipeline: Any, app_config: AppConfig) -> "FastAPI":
         for id_, name, score in zip(result["class_ids"], cat_names, result["scores"]):
         for id_, name, score in zip(result["class_ids"], cat_names, result["scores"]):
             categories.append(dict(id=id_, name=name, score=score))
             categories.append(dict(id=id_, name=name, score=score))
 
 
-        return ResultResponse[InferResult](
+        return AIStudioResultResponse[InferResult](
             logId=serving_utils.generate_log_id(),
             logId=serving_utils.generate_log_id(),
             result=InferResult(categories=categories),
             result=InferResult(categories=categories),
         )
         )

+ 3 - 3
paddlex/inference/serving/basic_serving/_pipeline_apps/video_detection.py

@@ -18,7 +18,7 @@ from typing import Any, Dict, List
 from .....utils.deps import function_requires_deps, is_dep_available
 from .....utils.deps import function_requires_deps, is_dep_available
 from ...infra import utils as serving_utils
 from ...infra import utils as serving_utils
 from ...infra.config import AppConfig
 from ...infra.config import AppConfig
-from ...infra.models import ResultResponse
+from ...infra.models import AIStudioResultResponse
 from ...schemas.video_detection import INFER_ENDPOINT, InferRequest, InferResult
 from ...schemas.video_detection import INFER_ENDPOINT, InferRequest, InferResult
 from .._app import create_app, primary_operation
 from .._app import create_app, primary_operation
 
 
@@ -37,7 +37,7 @@ def create_pipeline_app(pipeline: Any, app_config: AppConfig) -> "FastAPI":
         INFER_ENDPOINT,
         INFER_ENDPOINT,
         "infer",
         "infer",
     )
     )
-    async def _infer(request: InferRequest) -> ResultResponse[InferResult]:
+    async def _infer(request: InferRequest) -> AIStudioResultResponse[InferResult]:
         pipeline = ctx.pipeline
         pipeline = ctx.pipeline
         aiohttp_session = ctx.aiohttp_session
         aiohttp_session = ctx.aiohttp_session
 
 
@@ -84,7 +84,7 @@ def create_pipeline_app(pipeline: Any, app_config: AppConfig) -> "FastAPI":
                 )
                 )
             )
             )
 
 
-        return ResultResponse[InferResult](
+        return AIStudioResultResponse[InferResult](
             logId=serving_utils.generate_log_id(),
             logId=serving_utils.generate_log_id(),
             result=InferResult(frames=frames),
             result=InferResult(frames=frames),
         )
         )

+ 12 - 5
paddlex/inference/serving/infra/models.py

@@ -17,10 +17,15 @@ from typing import Dict, Generic, List, Tuple, TypeVar, Union
 from pydantic import BaseModel, Discriminator
 from pydantic import BaseModel, Discriminator
 from typing_extensions import Annotated, Literal, TypeAlias
 from typing_extensions import Annotated, Literal, TypeAlias
 
 
+from ....utils.deps import is_dep_available
+
+if is_dep_available("openai"):
+    from openai.types.chat import ChatCompletion
+
 __all__ = [
 __all__ = [
-    "NoResultResponse",
+    "AIStudioNoResultResponse",
     "ResultT",
     "ResultT",
-    "ResultResponse",
+    "AIStudioResultResponse",
     "Response",
     "Response",
     "ImageInfo",
     "ImageInfo",
     "PDFPageInfo",
     "PDFPageInfo",
@@ -30,7 +35,7 @@ __all__ = [
 ]
 ]
 
 
 
 
-class NoResultResponse(BaseModel):
+class AIStudioNoResultResponse(BaseModel):
     logId: str
     logId: str
     errorCode: int
     errorCode: int
     errorMsg: str
     errorMsg: str
@@ -39,14 +44,16 @@ class NoResultResponse(BaseModel):
 ResultT = TypeVar("ResultT", bound=BaseModel)
 ResultT = TypeVar("ResultT", bound=BaseModel)
 
 
 
 
-class ResultResponse(BaseModel, Generic[ResultT]):
+class AIStudioResultResponse(BaseModel, Generic[ResultT]):
     logId: str
     logId: str
     result: ResultT
     result: ResultT
     errorCode: Literal[0] = 0
     errorCode: Literal[0] = 0
     errorMsg: Literal["Success"] = "Success"
     errorMsg: Literal["Success"] = "Success"
 
 
 
 
-Response: TypeAlias = Union[ResultResponse, NoResultResponse]
+Response: TypeAlias = Union[
+    AIStudioResultResponse, AIStudioNoResultResponse, "ChatCompletion"
+]
 
 
 
 
 class ImageInfo(BaseModel):
 class ImageInfo(BaseModel):

+ 77 - 0
paddlex/inference/serving/schemas/doc_understanding.py

@@ -0,0 +1,77 @@
+# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from enum import Enum
+from typing import Final, List, Literal, Optional, Union
+
+from pydantic import BaseModel, HttpUrl
+
+from ....utils.deps import is_dep_available
+from ..infra.models import PrimaryOperations
+
+if is_dep_available("openai"):
+    from openai.types.chat import ChatCompletion
+
+__all__ = [
+    "INFER_ENDPOINT",
+    "InferRequest",
+    "PRIMARY_OPERATIONS",
+]
+
+INFER_ENDPOINT: Final[str] = "/chat/completions"
+
+
+class ContentType(str, Enum):
+    TEXT = "text"
+    IMAGE_URL = "image_url"
+
+
+class RoleType(str, Enum):
+    USER = "user"
+    ASSISTANT = "assistant"
+    SYSTEM = "system"
+
+
+class ImageUrl(BaseModel):
+    url: Union[HttpUrl, str]
+    detail: Optional[Literal["low", "high", "auto"]] = "auto"
+
+
+class TextContent(BaseModel):
+    type: Literal[ContentType.TEXT] = ContentType.TEXT
+    text: str
+
+
+class ImageContent(BaseModel):
+    type: Literal[ContentType.IMAGE_URL] = ContentType.IMAGE_URL
+    image_url: Union[HttpUrl, ImageUrl]
+
+
+class Message(BaseModel):
+    role: str
+    content: Union[str, List[Union[TextContent, ImageContent]]]
+
+
+class InferRequest(BaseModel):
+    model: str
+    messages: List[Message]
+    max_tokens: Optional[int] = 1024
+    temperature: Optional[float] = 0.1
+    top_p: Optional[float] = 0.95
+    stream: Optional[bool] = False
+
+
+PRIMARY_OPERATIONS: Final[PrimaryOperations] = {
+    "infer": (INFER_ENDPOINT, InferRequest, ChatCompletion),
+}