Browse Source

Polish function names and docs (#3865)

Lin Manhui 7 months ago
parent
commit
30f55ad140

+ 1 - 1
docs/pipeline_deploy/high_performance_inference.en.md

@@ -566,7 +566,7 @@ The high-performance inference plugin achieves inference acceleration by intelli
 
 
 All pipelines and modules that use static graph models support enabling the high-performance inference plugin; however, in certain scenarios, some models might not be able to achieve accelerated inference. For detailed reasons, please refer to Question 1.
 All pipelines and modules that use static graph models support enabling the high-performance inference plugin; however, in certain scenarios, some models might not be able to achieve accelerated inference. For detailed reasons, please refer to Question 1.
 
 
-**3. Why does the installation of the high-performance inference plugin fail with a log message stating: “Currently, the CUDA version must be 11.x for GPU devices.”?**
+**3. Why does the installation of the high-performance inference plugin fail with a log message stating: “You are not using PaddlePaddle compiled with CUDA 11. Currently, CUDA versions other than 11.x are not supported by the high-performance inference plugin.”?**
 
 
 For the GPU version of the high-performance inference plugin, the official PaddleX currently only provides precompiled packages for CUDA 11.8 + cuDNN 8.9. The support for CUDA 12 is in progress.
 For the GPU version of the high-performance inference plugin, the official PaddleX currently only provides precompiled packages for CUDA 11.8 + cuDNN 8.9. The support for CUDA 12 is in progress.
 
 

+ 1 - 1
docs/pipeline_deploy/high_performance_inference.md

@@ -567,7 +567,7 @@ python -m pip install ../../python/dist/ultra_infer*.whl
 
 
 所有使用静态图模型的产线与模块都支持启用高性能推理插件,但部分模型在某些情况下可能无法获得推理加速,具体原因可以参考问题1。
 所有使用静态图模型的产线与模块都支持启用高性能推理插件,但部分模型在某些情况下可能无法获得推理加速,具体原因可以参考问题1。
 
 
-**3. 为什么安装高性能推理插件会失败,日志显示:“Currently, the CUDA version must be 11.x for GPU devices.”?**
+**3. 为什么安装高性能推理插件会失败,日志显示:“You are not using PaddlePaddle compiled with CUDA 11. Currently, CUDA versions other than 11.x are not supported by the high-performance inference plugin.”?**
 
 
 对于 GPU 版本的高性能推理插件,目前 PaddleX 官方仅提供 CUDA 11.8 + cuDNN 8.9 的预编译包。CUDA 12 目前正在支持中。
 对于 GPU 版本的高性能推理插件,目前 PaddleX 官方仅提供 CUDA 11.8 + cuDNN 8.9 的预编译包。CUDA 12 目前正在支持中。
 
 

+ 6 - 2
paddlex/inference/models/base/predictor/base_predictor.py

@@ -119,12 +119,16 @@ class BasePredictor(
         self._use_hpip = use_hpip
         self._use_hpip = use_hpip
         if not use_hpip:
         if not use_hpip:
             if hpi_config is not None:
             if hpi_config is not None:
-                logging.warning("`hpi_config` will be ignored when not using HPIP.")
+                logging.warning(
+                    "`hpi_config` will be ignored when not using the high-performance inference plugin."
+                )
             self._pp_option = self._prepare_pp_option(pp_option, device)
             self._pp_option = self._prepare_pp_option(pp_option, device)
         else:
         else:
             require_hpip()
             require_hpip()
             if pp_option is not None:
             if pp_option is not None:
-                logging.warning("`pp_option` will be ignored when using HPIP.")
+                logging.warning(
+                    "`pp_option` will be ignored when using the high-performance inference plugin."
+                )
             self._hpi_config = self._prepare_hpi_config(hpi_config, device)
             self._hpi_config = self._prepare_hpi_config(hpi_config, device)
 
 
         logging.debug(f"{self.__class__.__name__}: {self.model_dir}")
         logging.debug(f"{self.__class__.__name__}: {self.model_dir}")

+ 9 - 3
paddlex/inference/utils/hpi.py

@@ -24,7 +24,11 @@ from pydantic import BaseModel, Field
 from typing_extensions import Annotated, TypeAlias
 from typing_extensions import Annotated, TypeAlias
 
 
 from ...utils.deps import function_requires_deps, is_paddle2onnx_plugin_available
 from ...utils.deps import function_requires_deps, is_paddle2onnx_plugin_available
-from ...utils.env import get_cuda_version, get_cudnn_version, get_paddle_version
+from ...utils.env import (
+    get_paddle_cuda_version,
+    get_paddle_cudnn_version,
+    get_paddle_version,
+)
 from ...utils.flags import USE_PIR_TRT
 from ...utils.flags import USE_PIR_TRT
 from .model_paths import ModelPaths
 from .model_paths import ModelPaths
 
 
@@ -156,9 +160,11 @@ def suggest_inference_backend_and_config(
         else:
         else:
             return None, f"{repr(arch)} is not a supported architecture."
             return None, f"{repr(arch)} is not a supported architecture."
     elif hpi_config.device_type == "gpu":
     elif hpi_config.device_type == "gpu":
-        cuda_version = get_cuda_version()
+        # TODO: Is it better to also check the runtime versions of CUDA and
+        # cuDNN, and the versions of CUDA and cuDNN used to build `ultra-infer`?
+        cuda_version = get_paddle_cuda_version()
         cuda_version = "".join(map(str, cuda_version))
         cuda_version = "".join(map(str, cuda_version))
-        cudnn_version = get_cudnn_version()
+        cudnn_version = get_paddle_cudnn_version()
         cudnn_version = "".join(map(str, cudnn_version[:-1]))
         cudnn_version = "".join(map(str, cudnn_version[:-1]))
         key = f"gpu_cuda{cuda_version}_cudnn{cudnn_version}"
         key = f"gpu_cuda{cuda_version}_cudnn{cudnn_version}"
     else:
     else:

+ 7 - 6
paddlex/paddlex_cli.py

@@ -32,7 +32,7 @@ from .utils.deps import (
     get_serving_dep_specs,
     get_serving_dep_specs,
     require_paddle2onnx_plugin,
     require_paddle2onnx_plugin,
 )
 )
-from .utils.env import get_cuda_version
+from .utils.env import get_paddle_cuda_version
 from .utils.install import install_packages
 from .utils.install import install_packages
 from .utils.interactive_get_pipeline import interactive_get_pipeline
 from .utils.interactive_get_pipeline import interactive_get_pipeline
 from .utils.pipeline_arguments import PIPELINE_ARGUMENTS
 from .utils.pipeline_arguments import PIPELINE_ARGUMENTS
@@ -237,9 +237,8 @@ def install(args):
         SUPPORTED_DEVICE_TYPES = ["cpu", "gpu", "npu"]
         SUPPORTED_DEVICE_TYPES = ["cpu", "gpu", "npu"]
         if device_type not in SUPPORTED_DEVICE_TYPES:
         if device_type not in SUPPORTED_DEVICE_TYPES:
             logging.error(
             logging.error(
-                "HPI installation failed!\n"
-                "Supported device_type: %s. Your input device_type: %s.\n"
-                "Please ensure the device_type is correct.",
+                "Failed to install the high-performance plugin.\n"
+                "Supported device types: %s. Your input device type: %s.\n",
                 SUPPORTED_DEVICE_TYPES,
                 SUPPORTED_DEVICE_TYPES,
                 device_type,
                 device_type,
             )
             )
@@ -248,8 +247,10 @@ def install(args):
         if device_type == "cpu":
         if device_type == "cpu":
             package = "ultra-infer-python"
             package = "ultra-infer-python"
         elif device_type == "gpu":
         elif device_type == "gpu":
-            if get_cuda_version()[0] != 11:
-                sys.exit("Currently, the CUDA version must be 11.x for GPU devices.")
+            if get_paddle_cuda_version()[0] != 11:
+                sys.exit(
+                    "You are not using PaddlePaddle compiled with CUDA 11. Currently, CUDA versions other than 11.x are not supported by the high-performance inference plugin."
+                )
             package = "ultra-infer-gpu-python"
             package = "ultra-infer-gpu-python"
         elif device_type == "npu":
         elif device_type == "npu":
             package = "ultra-infer-npu-python"
             package = "ultra-infer-npu-python"

+ 5 - 6
paddlex/utils/env.py

@@ -37,19 +37,18 @@ def get_paddle_version():
         return major_v, minor_v, patch_v, None
         return major_v, minor_v, patch_v, None
 
 
 
 
-def get_cuda_version():
-    # FIXME: We should not rely on the PaddlePaddle library to detemine CUDA
-    # versions.
+def get_paddle_cuda_version():
     import paddle.version
     import paddle.version
 
 
     cuda_version = paddle.version.cuda()
     cuda_version = paddle.version.cuda()
     return tuple(map(int, cuda_version.split(".")))
     return tuple(map(int, cuda_version.split(".")))
 
 
 
 
-def get_cudnn_version():
-    # FIXME: We should not rely on the PaddlePaddle library to detemine cuDNN
-    # versions.
+def get_paddle_cudnn_version():
     import paddle.version
     import paddle.version
 
 
     cudnn_version = paddle.version.cudnn()
     cudnn_version = paddle.version.cudnn()
     return tuple(map(int, cudnn_version.split(".")))
     return tuple(map(int, cudnn_version.split(".")))
+
+
+# Should we also support getting the runtime versions of CUDA and cuDNN?