Browse Source

Merge pull request #3946 from jinminxi104/add_lmdeploy_backend

add lmdeploy-backend
Xiaomeng Zhao 3 weeks ago
parent
commit
5cc13b919a
4 changed files with 60 additions and 5 deletions
  1. 25 1
      mineru/backend/vlm/vlm_analyze.py
  2. 2 1
      mineru/cli/client.py
  3. 6 0
      mineru/cli/common.py
  4. 27 3
      mineru/cli/gradio_app.py

+ 25 - 1
mineru/backend/vlm/vlm_analyze.py

@@ -40,6 +40,7 @@ class ModelSingleton:
             model = None
             processor = None
             vllm_llm = None
+            lmdeploy_engine = None
             vllm_async_llm = None
             batch_size = kwargs.get("batch_size", 0)  # for transformers backend only
             max_concurrency = kwargs.get("max_concurrency", 100)  # for http-client backend only
@@ -48,7 +49,7 @@ class ModelSingleton:
             for param in ["batch_size", "max_concurrency", "http_timeout"]:
                 if param in kwargs:
                     del kwargs[param]
-            if backend in ['transformers', 'vllm-engine', "vllm-async-engine", "mlx-engine"] and not model_path:
+            if backend in ['transformers', 'vllm-engine', "vllm-async-engine", "mlx-engine", "lmdeploy-engine", "lmdeploy-async-engine"] and not model_path:
                 model_path = auto_download_and_get_model_root_path("/","vlm")
                 if backend == "transformers":
                     try:
@@ -85,6 +86,28 @@ class ModelSingleton:
                     except ImportError:
                         raise ImportError("Please install mlx-vlm to use the mlx-engine backend.")
                     model, processor = mlx_load(model_path)
+                elif backend == "lmdeploy-engine":
+                        try:
+                            from lmdeploy.serve.vl_async_engine import VLAsyncEngine
+                            from lmdeploy import PytorchEngineConfig, GenerationConfig
+                        except ImportError:
+                            raise ImportError("Please install vllm to use the vllm-engine backend.")
+                        lmdeploy_engine = VLAsyncEngine(model_path, backend='pytorch',
+                                     backend_config=PytorchEngineConfig(tp=1, block_size=128,
+                                     cache_max_entry_count=0.8, max_batch_size=256,
+                                     device_type="ascend", session_len=16382))
+
+
+                elif backend == "lmdeploy-async-engine":
+                        try:
+                            from lmdeploy.serve.vl_async_engine import VLAsyncEngine
+                            from lmdeploy import PytorchEngineConfig, GenerationConfig
+                        except ImportError:
+                            raise ImportError("Please install vllm to use the vllm-async-engine backend.")
+                        lmdeploy_engine = VLAsyncEngine(model_path, backend='pytorch',
+                                     backend_config=PytorchEngineConfig(tp=1, block_size=128,
+                                     cache_max_entry_count=0.8, max_batch_size=256,
+                                     device_type="ascend", session_len=16384))
                 else:
                     if os.getenv('OMP_NUM_THREADS') is None:
                         os.environ["OMP_NUM_THREADS"] = "1"
@@ -122,6 +145,7 @@ class ModelSingleton:
                 backend=backend,
                 model=model,
                 processor=processor,
+                lmdeploy_engine=lmdeploy_engine,
                 vllm_llm=vllm_llm,
                 vllm_async_llm=vllm_async_llm,
                 server_url=server_url,

+ 2 - 1
mineru/cli/client.py

@@ -13,7 +13,7 @@ from ..version import __version__
 from .common import do_parse, read_fn, pdf_suffixes, image_suffixes
 
 
-backends = ['pipeline', 'vlm-transformers', 'vlm-vllm-engine', 'vlm-http-client']
+backends = ['pipeline', 'vlm-transformers', 'vlm-vllm-engine', 'vlm-lmdeploy-engine', 'vlm-http-client']
 if is_mac_os_version_supported():
     backends.append("vlm-mlx-engine")
 
@@ -64,6 +64,7 @@ if is_mac_os_version_supported():
       vlm-transformers: More general, but slower.
       vlm-mlx-engine: Faster than transformers.
       vlm-vllm-engine: Faster(engine).
+      vlm-lmdeploy-engine: Faster(engine).
       vlm-http-client: Faster(client).
     Without method specified, pipeline will be used by default.""",
     default='pipeline',

+ 6 - 0
mineru/cli/common.py

@@ -327,6 +327,9 @@ def do_parse(
         if backend == "vllm-async-engine":
             raise Exception("vlm-vllm-async-engine backend is not supported in sync mode, please use vlm-vllm-engine backend")
 
+        if backend == "lmdeploy-async-engine":
+            raise Exception("vlm-lmdeploy-async-engine backend is not supported in sync mode, please use vlm-lmdeploy-engine backend")
+
         os.environ['MINERU_VLM_FORMULA_ENABLE'] = str(formula_enable)
         os.environ['MINERU_VLM_TABLE_ENABLE'] = str(table_enable)
 
@@ -378,6 +381,9 @@ async def aio_do_parse(
         if backend == "vllm-engine":
             raise Exception("vlm-vllm-engine backend is not supported in async mode, please use vlm-vllm-async-engine backend")
 
+        if backend == "lmdeploy-engine":
+            raise Exception("vlm-lmdeploy-engine backend is not supported in async mode, please use vlm-lmdeploy-async-engine backend")
+
         os.environ['MINERU_VLM_FORMULA_ENABLE'] = str(formula_enable)
         os.environ['MINERU_VLM_TABLE_ENABLE'] = str(table_enable)
 

+ 27 - 3
mineru/cli/gradio_app.py

@@ -274,7 +274,7 @@ def to_pdf(file_path):
 
 # 更新界面函数
 def update_interface(backend_choice):
-    if backend_choice in ["vlm-transformers", "vlm-vllm-async-engine", "vlm-mlx-engine"]:
+    if backend_choice in ["vlm-transformers", "vlm-vllm-async-engine", "vlm-lmdeploy-async-engine", "vlm-mlx-engine"]:
         return gr.update(visible=False), gr.update(visible=False)
     elif backend_choice in ["vlm-http-client"]:
         return gr.update(visible=True), gr.update(visible=False)
@@ -302,6 +302,13 @@ def update_interface(backend_choice):
     default=False,
 )
 @click.option(
+    '--enable-lmdeploy-engine',
+    'lmdeploy_engine_enable',
+    type=bool,
+    help="Enable LMDeploy engine backend for faster processing.",
+    default=False,
+)
+@click.option(
     '--enable-api',
     'api_enable',
     type=bool,
@@ -338,7 +345,7 @@ def update_interface(backend_choice):
     default='all',
 )
 def main(ctx,
-        example_enable, vllm_engine_enable, api_enable, max_convert_pages,
+        example_enable, vllm_engine_enable, lmdeploy_engine_enable, api_enable, max_convert_pages,
         server_name, server_port, latex_delimiters_type, **kwargs
 ):
 
@@ -367,6 +374,20 @@ def main(ctx,
             print("vLLM engine init successfully.")
         except Exception as e:
             logger.exception(e)
+    elif lmdeploy_engine_enable:
+        try:
+            print("Start init LMDeploy engine...")
+            from mineru.backend.vlm.vlm_analyze import ModelSingleton
+            model_singleton = ModelSingleton()
+            predictor = model_singleton.get_model(
+                "lmdeploy-async-engine",
+                None,
+                None,
+                **kwargs
+            )
+            print("LMDeploy engine init successfully.")
+        except Exception as e:
+            logger.exception(e)
     suffixes = [f".{suffix}" for suffix in pdf_suffixes + image_suffixes]
     with gr.Blocks() as demo:
         gr.HTML(header)
@@ -380,6 +401,9 @@ def main(ctx,
                     if vllm_engine_enable:
                         drop_list = ["pipeline", "vlm-vllm-async-engine"]
                         preferred_option = "vlm-vllm-async-engine"
+                    elif lmdeploy_engine_enable:
+                        drop_list = ["pipeline", "vlm-lmdeploy-async-engine"]
+                        preferred_option = "vlm-lmdeploy-async-engine"
                     else:
                         drop_list = ["pipeline", "vlm-transformers", "vlm-http-client"]
                         if is_mac_os_version_supported():
@@ -453,4 +477,4 @@ def main(ctx,
 
 
 if __name__ == '__main__':
-    main()
+    main()