Browse Source

feat: add openai-server command for flexible inference engine selection in vlm_server

myhloli 1 week ago
parent
commit
506179f0c8
2 changed files with 34 additions and 0 deletions
  1. 33 0
      mineru/cli/vlm_server.py
  2. 1 0
      pyproject.toml

+ 33 - 0
mineru/cli/vlm_server.py

@@ -1,3 +1,7 @@
+import click
+import sys
+
+from loguru import logger
 
 
 def vllm_server():
@@ -8,3 +12,32 @@ def vllm_server():
 def lmdeploy_server():
     from mineru.model.vlm.lmdeploy_server import main
     main()
+
+
+@click.command(context_settings=dict(ignore_unknown_options=True, allow_extra_args=True))
+@click.option(
+    '-e',
+    '--engine',
+    'inference_engine',
+    type=click.Choice(['auto', 'vllm', 'lmdeploy']),
+    default='auto',
+    help='Select the inference engine used to accelerate VLM inference, default is "auto".',
+)
+@click.pass_context
+def openai_server(ctx, inference_engine):
+    sys.argv = [sys.argv[0]] + ctx.args
+    if inference_engine == 'auto':
+        try:
+            import vllm
+            inference_engine = 'vllm'
+            logger.info("Using vLLM as the inference engine for VLM server.")
+        except ImportError:
+            inference_engine = 'lmdeploy'
+            logger.info("vLLM not found, falling back to LMDeploy as the inference engine for VLM server.")
+    if inference_engine == 'vllm':
+        vllm_server()
+    elif inference_engine == 'lmdeploy':
+        lmdeploy_server()
+
+if __name__ == "__main__":
+    openai_server()

+ 1 - 0
pyproject.toml

@@ -111,6 +111,7 @@ issues = "https://github.com/opendatalab/MinerU/issues"
 mineru = "mineru.cli:client.main"
 mineru-vllm-server = "mineru.cli.vlm_server:vllm_server"
 mineru-lmdeploy-server = "mineru.cli.vlm_server:lmdeploy_server"
+mineru-openai-server = "mineru.cli.vlm_server:openai_server"
 mineru-models-download = "mineru.cli.models_download:download_models"
 mineru-api = "mineru.cli.fast_api:main"
 mineru-gradio = "mineru.cli.gradio_app:main"