vlm_server.py 1.2 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243
  1. import click
  2. import sys
  3. from loguru import logger
  4. def vllm_server():
  5. from mineru.model.vlm.vllm_server import main
  6. main()
  7. def lmdeploy_server():
  8. from mineru.model.vlm.lmdeploy_server import main
  9. main()
  10. @click.command(context_settings=dict(ignore_unknown_options=True, allow_extra_args=True))
  11. @click.option(
  12. '-e',
  13. '--engine',
  14. 'inference_engine',
  15. type=click.Choice(['auto', 'vllm', 'lmdeploy']),
  16. default='auto',
  17. help='Select the inference engine used to accelerate VLM inference, default is "auto".',
  18. )
  19. @click.pass_context
  20. def openai_server(ctx, inference_engine):
  21. sys.argv = [sys.argv[0]] + ctx.args
  22. if inference_engine == 'auto':
  23. try:
  24. import vllm
  25. inference_engine = 'vllm'
  26. logger.info("Using vLLM as the inference engine for VLM server.")
  27. except ImportError:
  28. inference_engine = 'lmdeploy'
  29. logger.info("vLLM not found, falling back to LMDeploy as the inference engine for VLM server.")
  30. if inference_engine == 'vllm':
  31. vllm_server()
  32. elif inference_engine == 'lmdeploy':
  33. lmdeploy_server()
  34. if __name__ == "__main__":
  35. openai_server()