vlm_analyze.py 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211
  1. # Copyright (c) Opendatalab. All rights reserved.
  2. import os
  3. import time
  4. from loguru import logger
  5. from .utils import enable_custom_logits_processors, set_default_gpu_memory_utilization, set_default_batch_size
  6. from .model_output_to_middle_json import result_to_middle_json
  7. from ...data.data_reader_writer import DataWriter
  8. from mineru.utils.pdf_image_tools import load_images_from_pdf
  9. from ...utils.check_sys_env import is_mac_os_version_supported
  10. from ...utils.config_reader import get_device
  11. from ...utils.enum_class import ImageType
  12. from ...utils.models_download_utils import auto_download_and_get_model_root_path
  13. from mineru_vl_utils import MinerUClient
  14. from packaging import version
  15. class ModelSingleton:
  16. _instance = None
  17. _models = {}
  18. def __new__(cls, *args, **kwargs):
  19. if cls._instance is None:
  20. cls._instance = super().__new__(cls)
  21. return cls._instance
  22. def get_model(
  23. self,
  24. backend: str,
  25. model_path: str | None,
  26. server_url: str | None,
  27. **kwargs,
  28. ) -> MinerUClient:
  29. key = (backend, model_path, server_url)
  30. if key not in self._models:
  31. start_time = time.time()
  32. model = None
  33. processor = None
  34. vllm_llm = None
  35. lmdeploy_engine = None
  36. vllm_async_llm = None
  37. batch_size = kwargs.get("batch_size", 0) # for transformers backend only
  38. max_concurrency = kwargs.get("max_concurrency", 100) # for http-client backend only
  39. http_timeout = kwargs.get("http_timeout", 600) # for http-client backend only
  40. # 从kwargs中移除这些参数,避免传递给不相关的初始化函数
  41. for param in ["batch_size", "max_concurrency", "http_timeout"]:
  42. if param in kwargs:
  43. del kwargs[param]
  44. if backend in ['transformers', 'vllm-engine', "vllm-async-engine", "mlx-engine", "lmdeploy-engine", "lmdeploy-async-engine"] and not model_path:
  45. model_path = auto_download_and_get_model_root_path("/","vlm")
  46. if backend == "transformers":
  47. try:
  48. from transformers import (
  49. AutoProcessor,
  50. Qwen2VLForConditionalGeneration,
  51. )
  52. from transformers import __version__ as transformers_version
  53. except ImportError:
  54. raise ImportError("Please install transformers to use the transformers backend.")
  55. if version.parse(transformers_version) >= version.parse("4.56.0"):
  56. dtype_key = "dtype"
  57. else:
  58. dtype_key = "torch_dtype"
  59. device = get_device()
  60. model = Qwen2VLForConditionalGeneration.from_pretrained(
  61. model_path,
  62. device_map={"": device},
  63. **{dtype_key: "auto"}, # type: ignore
  64. )
  65. processor = AutoProcessor.from_pretrained(
  66. model_path,
  67. use_fast=True,
  68. )
  69. if batch_size == 0:
  70. batch_size = set_default_batch_size()
  71. elif backend == "mlx-engine":
  72. mlx_supported = is_mac_os_version_supported()
  73. if not mlx_supported:
  74. raise EnvironmentError("mlx-engine backend is only supported on macOS 13.5+ with Apple Silicon.")
  75. try:
  76. from mlx_vlm import load as mlx_load
  77. except ImportError:
  78. raise ImportError("Please install mlx-vlm to use the mlx-engine backend.")
  79. model, processor = mlx_load(model_path)
  80. elif backend == "lmdeploy-engine":
  81. try:
  82. from lmdeploy.serve.vl_async_engine import VLAsyncEngine
  83. from lmdeploy import PytorchEngineConfig, GenerationConfig
  84. except ImportError:
  85. raise ImportError("Please install vllm to use the vllm-engine backend.")
  86. lmdeploy_engine = VLAsyncEngine(model_path, backend='pytorch',
  87. backend_config=PytorchEngineConfig(tp=1, block_size=128,
  88. cache_max_entry_count=0.8, max_batch_size=256,
  89. device_type="ascend", session_len=16382))
  90. elif backend == "lmdeploy-async-engine":
  91. try:
  92. from lmdeploy.serve.vl_async_engine import VLAsyncEngine
  93. from lmdeploy import PytorchEngineConfig, GenerationConfig
  94. except ImportError:
  95. raise ImportError("Please install vllm to use the vllm-async-engine backend.")
  96. lmdeploy_engine = VLAsyncEngine(model_path, backend='pytorch',
  97. backend_config=PytorchEngineConfig(tp=1, block_size=128,
  98. cache_max_entry_count=0.8, max_batch_size=256,
  99. device_type="ascend", session_len=16384))
  100. else:
  101. if os.getenv('OMP_NUM_THREADS') is None:
  102. os.environ["OMP_NUM_THREADS"] = "1"
  103. if backend == "vllm-engine":
  104. try:
  105. import vllm
  106. from mineru_vl_utils import MinerULogitsProcessor
  107. except ImportError:
  108. raise ImportError("Please install vllm to use the vllm-engine backend.")
  109. if "gpu_memory_utilization" not in kwargs:
  110. kwargs["gpu_memory_utilization"] = set_default_gpu_memory_utilization()
  111. if "model" not in kwargs:
  112. kwargs["model"] = model_path
  113. if enable_custom_logits_processors() and ("logits_processors" not in kwargs):
  114. kwargs["logits_processors"] = [MinerULogitsProcessor]
  115. # 使用kwargs为 vllm初始化参数
  116. vllm_llm = vllm.LLM(**kwargs)
  117. elif backend == "vllm-async-engine":
  118. try:
  119. from vllm.engine.arg_utils import AsyncEngineArgs
  120. from vllm.v1.engine.async_llm import AsyncLLM
  121. from mineru_vl_utils import MinerULogitsProcessor
  122. except ImportError:
  123. raise ImportError("Please install vllm to use the vllm-async-engine backend.")
  124. if "gpu_memory_utilization" not in kwargs:
  125. kwargs["gpu_memory_utilization"] = set_default_gpu_memory_utilization()
  126. if "model" not in kwargs:
  127. kwargs["model"] = model_path
  128. if enable_custom_logits_processors() and ("logits_processors" not in kwargs):
  129. kwargs["logits_processors"] = [MinerULogitsProcessor]
  130. # 使用kwargs为 vllm初始化参数
  131. vllm_async_llm = AsyncLLM.from_engine_args(AsyncEngineArgs(**kwargs))
  132. self._models[key] = MinerUClient(
  133. backend=backend,
  134. model=model,
  135. processor=processor,
  136. lmdeploy_engine=lmdeploy_engine,
  137. vllm_llm=vllm_llm,
  138. vllm_async_llm=vllm_async_llm,
  139. server_url=server_url,
  140. batch_size=batch_size,
  141. max_concurrency=max_concurrency,
  142. http_timeout=http_timeout,
  143. )
  144. elapsed = round(time.time() - start_time, 2)
  145. logger.info(f"get {backend} predictor cost: {elapsed}s")
  146. return self._models[key]
  147. def doc_analyze(
  148. pdf_bytes,
  149. image_writer: DataWriter | None,
  150. predictor: MinerUClient | None = None,
  151. backend="transformers",
  152. model_path: str | None = None,
  153. server_url: str | None = None,
  154. **kwargs,
  155. ):
  156. if predictor is None:
  157. predictor = ModelSingleton().get_model(backend, model_path, server_url, **kwargs)
  158. # load_images_start = time.time()
  159. images_list, pdf_doc = load_images_from_pdf(pdf_bytes, image_type=ImageType.PIL)
  160. images_pil_list = [image_dict["img_pil"] for image_dict in images_list]
  161. # load_images_time = round(time.time() - load_images_start, 2)
  162. # logger.info(f"load images cost: {load_images_time}, speed: {round(len(images_base64_list)/load_images_time, 3)} images/s")
  163. # infer_start = time.time()
  164. results = predictor.batch_two_step_extract(images=images_pil_list)
  165. # infer_time = round(time.time() - infer_start, 2)
  166. # logger.info(f"infer finished, cost: {infer_time}, speed: {round(len(results)/infer_time, 3)} page/s")
  167. middle_json = result_to_middle_json(results, images_list, pdf_doc, image_writer)
  168. return middle_json, results
  169. async def aio_doc_analyze(
  170. pdf_bytes,
  171. image_writer: DataWriter | None,
  172. predictor: MinerUClient | None = None,
  173. backend="transformers",
  174. model_path: str | None = None,
  175. server_url: str | None = None,
  176. **kwargs,
  177. ):
  178. if predictor is None:
  179. predictor = ModelSingleton().get_model(backend, model_path, server_url, **kwargs)
  180. # load_images_start = time.time()
  181. images_list, pdf_doc = load_images_from_pdf(pdf_bytes, image_type=ImageType.PIL)
  182. images_pil_list = [image_dict["img_pil"] for image_dict in images_list]
  183. # load_images_time = round(time.time() - load_images_start, 2)
  184. # logger.debug(f"load images cost: {load_images_time}, speed: {round(len(images_pil_list)/load_images_time, 3)} images/s")
  185. # infer_start = time.time()
  186. results = await predictor.aio_batch_two_step_extract(images=images_pil_list)
  187. # infer_time = round(time.time() - infer_start, 2)
  188. # logger.info(f"infer finished, cost: {infer_time}, speed: {round(len(results)/infer_time, 3)} page/s")
  189. middle_json = result_to_middle_json(results, images_list, pdf_doc, image_writer)
  190. return middle_json, results