Browse Source

Merge pull request #3974 from opendatalab/add_lmdeploy_backend

Add lmdeploy backend
Xiaomeng Zhao 2 weeks ago
parent
commit
b26338d0ef

+ 38 - 1
mineru/backend/vlm/vlm_analyze.py

@@ -40,6 +40,7 @@ class ModelSingleton:
             model = None
             model = None
             processor = None
             processor = None
             vllm_llm = None
             vllm_llm = None
+            lmdeploy_engine = None
             vllm_async_llm = None
             vllm_async_llm = None
             batch_size = kwargs.get("batch_size", 0)  # for transformers backend only
             batch_size = kwargs.get("batch_size", 0)  # for transformers backend only
             max_concurrency = kwargs.get("max_concurrency", 100)  # for http-client backend only
             max_concurrency = kwargs.get("max_concurrency", 100)  # for http-client backend only
@@ -48,7 +49,7 @@ class ModelSingleton:
             for param in ["batch_size", "max_concurrency", "http_timeout"]:
             for param in ["batch_size", "max_concurrency", "http_timeout"]:
                 if param in kwargs:
                 if param in kwargs:
                     del kwargs[param]
                     del kwargs[param]
-            if backend in ['transformers', 'vllm-engine', "vllm-async-engine", "mlx-engine"] and not model_path:
+            if backend in ['transformers', 'vllm-engine', "vllm-async-engine", "mlx-engine", "lmdeploy-engine"] and not model_path:
                 model_path = auto_download_and_get_model_root_path("/","vlm")
                 model_path = auto_download_and_get_model_root_path("/","vlm")
                 if backend == "transformers":
                 if backend == "transformers":
                     try:
                     try:
@@ -118,10 +119,46 @@ class ModelSingleton:
                             kwargs["logits_processors"] = [MinerULogitsProcessor]
                             kwargs["logits_processors"] = [MinerULogitsProcessor]
                         # 使用kwargs为 vllm初始化参数
                         # 使用kwargs为 vllm初始化参数
                         vllm_async_llm = AsyncLLM.from_engine_args(AsyncEngineArgs(**kwargs))
                         vllm_async_llm = AsyncLLM.from_engine_args(AsyncEngineArgs(**kwargs))
+                    elif backend == "lmdeploy-engine":
+                        try:
+                            from lmdeploy import PytorchEngineConfig, TurbomindEngineConfig
+                            from lmdeploy.serve.vl_async_engine import VLAsyncEngine
+                        except ImportError:
+                            raise ImportError("Please install lmdeploy to use the lmdeploy-engine backend.")
+                        if "cache_max_entry_count" not in kwargs:
+                            kwargs["cache_max_entry_count"] = 0.5
+
+                        device = kwargs.get("device", "").lower()
+                        # 特定设备强制使用 pytorch backend
+                        if device in ["ascend", "maca", "camb"]:
+                            lm_backend = "pytorch"
+                            backend_config = PytorchEngineConfig(**kwargs)
+                        else:
+                            # 其他情况根据 lm_backend 参数决定,默认使用 turbomind
+                            lm_backend = kwargs.get("lm_backend", "turbomind")
+                            if lm_backend == "pytorch":
+                                backend_config = PytorchEngineConfig(**kwargs)
+                            else:
+                                lm_backend = "turbomind"  # 确保非 pytorch 时使用 turbomind
+                                backend_config = TurbomindEngineConfig(**kwargs)
+
+                        log_level = 'ERROR'
+                        from lmdeploy.utils import get_logger
+                        logger = get_logger('lmdeploy')
+                        logger.setLevel(log_level)
+                        if os.getenv('TM_LOG_LEVEL') is None:
+                            os.environ['TM_LOG_LEVEL'] = log_level
+
+                        lmdeploy_engine = VLAsyncEngine(
+                            model_path,
+                            backend=lm_backend,
+                            backend_config=backend_config,
+                        )
             self._models[key] = MinerUClient(
             self._models[key] = MinerUClient(
                 backend=backend,
                 backend=backend,
                 model=model,
                 model=model,
                 processor=processor,
                 processor=processor,
+                lmdeploy_engine=lmdeploy_engine,
                 vllm_llm=vllm_llm,
                 vllm_llm=vllm_llm,
                 vllm_async_llm=vllm_async_llm,
                 vllm_async_llm=vllm_async_llm,
                 server_url=server_url,
                 server_url=server_url,

+ 2 - 1
mineru/cli/client.py

@@ -13,7 +13,7 @@ from ..version import __version__
 from .common import do_parse, read_fn, pdf_suffixes, image_suffixes
 from .common import do_parse, read_fn, pdf_suffixes, image_suffixes
 
 
 
 
-backends = ['pipeline', 'vlm-transformers', 'vlm-vllm-engine', 'vlm-http-client']
+backends = ['pipeline', 'vlm-transformers', 'vlm-vllm-engine', 'vlm-lmdeploy-engine', 'vlm-http-client']
 if is_mac_os_version_supported():
 if is_mac_os_version_supported():
     backends.append("vlm-mlx-engine")
     backends.append("vlm-mlx-engine")
 
 
@@ -64,6 +64,7 @@ if is_mac_os_version_supported():
       vlm-transformers: More general, but slower.
       vlm-transformers: More general, but slower.
       vlm-mlx-engine: Faster than transformers.
       vlm-mlx-engine: Faster than transformers.
       vlm-vllm-engine: Faster(engine).
       vlm-vllm-engine: Faster(engine).
+      vlm-lmdeploy-engine: Faster(engine).
       vlm-http-client: Faster(client).
       vlm-http-client: Faster(client).
     Without method specified, pipeline will be used by default.""",
     Without method specified, pipeline will be used by default.""",
     default='pipeline',
     default='pipeline',

+ 27 - 3
mineru/cli/gradio_app.py

@@ -274,7 +274,7 @@ def to_pdf(file_path):
 
 
 # 更新界面函数
 # 更新界面函数
 def update_interface(backend_choice):
 def update_interface(backend_choice):
-    if backend_choice in ["vlm-transformers", "vlm-vllm-async-engine", "vlm-mlx-engine"]:
+    if backend_choice in ["vlm-transformers", "vlm-vllm-async-engine", "vlm-lmdeploy-engine", "vlm-mlx-engine"]:
         return gr.update(visible=False), gr.update(visible=False)
         return gr.update(visible=False), gr.update(visible=False)
     elif backend_choice in ["vlm-http-client"]:
     elif backend_choice in ["vlm-http-client"]:
         return gr.update(visible=True), gr.update(visible=False)
         return gr.update(visible=True), gr.update(visible=False)
@@ -302,6 +302,13 @@ def update_interface(backend_choice):
     default=False,
     default=False,
 )
 )
 @click.option(
 @click.option(
+    '--enable-lmdeploy-engine',
+    'lmdeploy_engine_enable',
+    type=bool,
+    help="Enable LMDeploy engine backend for faster processing.",
+    default=False,
+)
+@click.option(
     '--enable-api',
     '--enable-api',
     'api_enable',
     'api_enable',
     type=bool,
     type=bool,
@@ -338,7 +345,7 @@ def update_interface(backend_choice):
     default='all',
     default='all',
 )
 )
 def main(ctx,
 def main(ctx,
-        example_enable, vllm_engine_enable, api_enable, max_convert_pages,
+        example_enable, vllm_engine_enable, lmdeploy_engine_enable, api_enable, max_convert_pages,
         server_name, server_port, latex_delimiters_type, **kwargs
         server_name, server_port, latex_delimiters_type, **kwargs
 ):
 ):
 
 
@@ -367,6 +374,20 @@ def main(ctx,
             print("vLLM engine init successfully.")
             print("vLLM engine init successfully.")
         except Exception as e:
         except Exception as e:
             logger.exception(e)
             logger.exception(e)
+    elif lmdeploy_engine_enable:
+        try:
+            print("Start init LMDeploy engine...")
+            from mineru.backend.vlm.vlm_analyze import ModelSingleton
+            model_singleton = ModelSingleton()
+            predictor = model_singleton.get_model(
+                "lmdeploy-engine",
+                None,
+                None,
+                **kwargs
+            )
+            print("LMDeploy engine init successfully.")
+        except Exception as e:
+            logger.exception(e)
     suffixes = [f".{suffix}" for suffix in pdf_suffixes + image_suffixes]
     suffixes = [f".{suffix}" for suffix in pdf_suffixes + image_suffixes]
     with gr.Blocks() as demo:
     with gr.Blocks() as demo:
         gr.HTML(header)
         gr.HTML(header)
@@ -380,6 +401,9 @@ def main(ctx,
                     if vllm_engine_enable:
                     if vllm_engine_enable:
                         drop_list = ["pipeline", "vlm-vllm-async-engine"]
                         drop_list = ["pipeline", "vlm-vllm-async-engine"]
                         preferred_option = "vlm-vllm-async-engine"
                         preferred_option = "vlm-vllm-async-engine"
+                    elif lmdeploy_engine_enable:
+                        drop_list = ["pipeline", "vlm-lmdeploy-engine"]
+                        preferred_option = "vlm-lmdeploy-engine"
                     else:
                     else:
                         drop_list = ["pipeline", "vlm-transformers", "vlm-http-client"]
                         drop_list = ["pipeline", "vlm-transformers", "vlm-http-client"]
                         if is_mac_os_version_supported():
                         if is_mac_os_version_supported():
@@ -453,4 +477,4 @@ def main(ctx,
 
 
 
 
 if __name__ == '__main__':
 if __name__ == '__main__':
-    main()
+    main()

+ 10 - 0
mineru/cli/vlm_server.py

@@ -0,0 +1,10 @@
+
+
+def vllm_server():
+    from mineru.model.vlm.vllm_server import main
+    main()
+
+
+def lmdeploy_server():
+    from mineru.model.vlm.lmdeploy_server import main
+    main()

+ 0 - 4
mineru/cli/vlm_vllm_server.py

@@ -1,4 +0,0 @@
-from mineru.model.vlm_vllm_model.server import main
-
-if __name__ == "__main__":
-    main()

+ 0 - 0
mineru/model/vlm_vllm_model/__init__.py → mineru/model/vlm/__init__.py


+ 59 - 0
mineru/model/vlm/lmdeploy_server.py

@@ -0,0 +1,59 @@
+import os
+import sys
+
+from mineru.utils.models_download_utils import auto_download_and_get_model_root_path
+
+
+def main():
+    args = sys.argv[1:]
+
+    has_port_arg = False
+    has_gpu_memory_utilization_arg = False
+    has_log_level_arg = False
+    has_device_arg = False
+    device_type = "cuda"
+
+    # 检查现有参数
+    for i, arg in enumerate(args):
+        if arg == "--server-port" or arg.startswith("--server-port="):
+            has_port_arg = True
+        if arg == "--cache-max-entry-count" or arg.startswith("--cache-max-entry-count="):
+            has_gpu_memory_utilization_arg = True
+        if arg == "--log-level" or arg.startswith("--log-level="):
+            has_log_level_arg = True
+        if arg == "--device":
+            has_device_arg = True
+            if i + 1 < len(args):
+                device_type = args[i + 1]
+        elif arg.startswith("--device="):
+            has_device_arg = True
+            device_type = arg.split("=", 1)[1]
+
+    # 添加默认参数
+    if not has_port_arg:
+        args.extend(["--server-port", "30000"])
+    if not has_gpu_memory_utilization_arg:
+        args.extend(["--cache-max-entry-count", "0.5"])
+    if not has_log_level_arg:
+        args.extend(["--log-level", "ERROR"])
+    if has_device_arg:
+        if device_type.lower() in ["ascend", "maca", "camb"]:
+            args.extend(["--backend", "pytorch"])
+
+    model_path = auto_download_and_get_model_root_path("/", "vlm")
+
+    # 重构参数,将模型路径作为位置参数
+    sys.argv = [sys.argv[0]] + ["serve", "api_server", model_path] + args
+
+    if os.getenv('OMP_NUM_THREADS') is None:
+        os.environ["OMP_NUM_THREADS"] = "1"
+
+    # 启动 lmdeploy 服务器
+    print(f"start lmdeploy server: {sys.argv}")
+
+    # 使用os.system调用启动lmdeploy服务器
+    os.system("lmdeploy " + " ".join(sys.argv[1:]))
+
+
+if __name__ == "__main__":
+    main()

+ 0 - 0
mineru/model/vlm_vllm_model/server.py → mineru/model/vlm/vllm_server.py


+ 6 - 1
pyproject.toml

@@ -58,6 +58,10 @@ vlm = [
 vllm = [
 vllm = [
     "vllm>=0.10.1.1,<0.12",
     "vllm>=0.10.1.1,<0.12",
 ]
 ]
+lmdeploy = [
+    "lmdeploy>=0.10.2,<0.11",
+    "qwen_vl_utils>=0.0.14,<0.1",
+]
 mlx = [
 mlx = [
     "mlx-vlm>=0.3.3,<0.4",
     "mlx-vlm>=0.3.3,<0.4",
 ]
 ]
@@ -105,7 +109,8 @@ issues = "https://github.com/opendatalab/MinerU/issues"
 
 
 [project.scripts]
 [project.scripts]
 mineru = "mineru.cli:client.main"
 mineru = "mineru.cli:client.main"
-mineru-vllm-server = "mineru.cli.vlm_vllm_server:main"
+mineru-vllm-server = "mineru.cli.vlm_server:vllm_server"
+mineru-lmdeploy-server = "mineru.cli.vlm_server:lmdeploy_server"
 mineru-models-download = "mineru.cli.models_download:download_models"
 mineru-models-download = "mineru.cli.models_download:download_models"
 mineru-api = "mineru.cli.fast_api:main"
 mineru-api = "mineru.cli.fast_api:main"
 mineru-gradio = "mineru.cli.gradio_app:main"
 mineru-gradio = "mineru.cli.gradio_app:main"