vlm_analyze.py 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240
  1. # Copyright (c) Opendatalab. All rights reserved.
  2. import os
  3. import time
  4. from loguru import logger
  5. from .utils import enable_custom_logits_processors, set_default_gpu_memory_utilization, set_default_batch_size, \
  6. set_lmdeploy_backend
  7. from .model_output_to_middle_json import result_to_middle_json
  8. from ...data.data_reader_writer import DataWriter
  9. from mineru.utils.pdf_image_tools import load_images_from_pdf
  10. from ...utils.check_sys_env import is_mac_os_version_supported
  11. from ...utils.config_reader import get_device
  12. from ...utils.enum_class import ImageType
  13. from ...utils.models_download_utils import auto_download_and_get_model_root_path
  14. from mineru_vl_utils import MinerUClient
  15. from packaging import version
  16. class ModelSingleton:
  17. _instance = None
  18. _models = {}
  19. def __new__(cls, *args, **kwargs):
  20. if cls._instance is None:
  21. cls._instance = super().__new__(cls)
  22. return cls._instance
  23. def get_model(
  24. self,
  25. backend: str,
  26. model_path: str | None,
  27. server_url: str | None,
  28. **kwargs,
  29. ) -> MinerUClient:
  30. key = (backend, model_path, server_url)
  31. if key not in self._models:
  32. start_time = time.time()
  33. model = None
  34. processor = None
  35. vllm_llm = None
  36. lmdeploy_engine = None
  37. vllm_async_llm = None
  38. batch_size = kwargs.get("batch_size", 0) # for transformers backend only
  39. max_concurrency = kwargs.get("max_concurrency", 100) # for http-client backend only
  40. http_timeout = kwargs.get("http_timeout", 600) # for http-client backend only
  41. server_headers = kwargs.get("server_headers", None) # for http-client backend only
  42. max_retries = kwargs.get("max_retries", 3) # for http-client backend only
  43. retry_backoff_factor = kwargs.get("retry_backoff_factor", 0.5) # for http-client backend only
  44. # 从kwargs中移除这些参数,避免传递给不相关的初始化函数
  45. for param in ["batch_size", "max_concurrency", "http_timeout", "server_headers", "max_retries", "retry_backoff_factor"]:
  46. if param in kwargs:
  47. del kwargs[param]
  48. if backend not in ["http-client"] and not model_path:
  49. model_path = auto_download_and_get_model_root_path("/","vlm")
  50. if backend == "transformers":
  51. try:
  52. from transformers import (
  53. AutoProcessor,
  54. Qwen2VLForConditionalGeneration,
  55. )
  56. from transformers import __version__ as transformers_version
  57. except ImportError:
  58. raise ImportError("Please install transformers to use the transformers backend.")
  59. if version.parse(transformers_version) >= version.parse("4.56.0"):
  60. dtype_key = "dtype"
  61. else:
  62. dtype_key = "torch_dtype"
  63. device = get_device()
  64. model = Qwen2VLForConditionalGeneration.from_pretrained(
  65. model_path,
  66. device_map={"": device},
  67. **{dtype_key: "auto"}, # type: ignore
  68. )
  69. processor = AutoProcessor.from_pretrained(
  70. model_path,
  71. use_fast=True,
  72. )
  73. if batch_size == 0:
  74. batch_size = set_default_batch_size()
  75. elif backend == "mlx-engine":
  76. mlx_supported = is_mac_os_version_supported()
  77. if not mlx_supported:
  78. raise EnvironmentError("mlx-engine backend is only supported on macOS 13.5+ with Apple Silicon.")
  79. try:
  80. from mlx_vlm import load as mlx_load
  81. except ImportError:
  82. raise ImportError("Please install mlx-vlm to use the mlx-engine backend.")
  83. model, processor = mlx_load(model_path)
  84. else:
  85. if os.getenv('OMP_NUM_THREADS') is None:
  86. os.environ["OMP_NUM_THREADS"] = "1"
  87. if backend == "vllm-engine":
  88. try:
  89. import vllm
  90. from mineru_vl_utils import MinerULogitsProcessor
  91. except ImportError:
  92. raise ImportError("Please install vllm to use the vllm-engine backend.")
  93. if "gpu_memory_utilization" not in kwargs:
  94. kwargs["gpu_memory_utilization"] = set_default_gpu_memory_utilization()
  95. if "model" not in kwargs:
  96. kwargs["model"] = model_path
  97. if enable_custom_logits_processors() and ("logits_processors" not in kwargs):
  98. kwargs["logits_processors"] = [MinerULogitsProcessor]
  99. # 使用kwargs为 vllm初始化参数
  100. vllm_llm = vllm.LLM(**kwargs)
  101. elif backend == "vllm-async-engine":
  102. try:
  103. from vllm.engine.arg_utils import AsyncEngineArgs
  104. from vllm.v1.engine.async_llm import AsyncLLM
  105. from mineru_vl_utils import MinerULogitsProcessor
  106. except ImportError:
  107. raise ImportError("Please install vllm to use the vllm-async-engine backend.")
  108. if "gpu_memory_utilization" not in kwargs:
  109. kwargs["gpu_memory_utilization"] = set_default_gpu_memory_utilization()
  110. if "model" not in kwargs:
  111. kwargs["model"] = model_path
  112. if enable_custom_logits_processors() and ("logits_processors" not in kwargs):
  113. kwargs["logits_processors"] = [MinerULogitsProcessor]
  114. # 使用kwargs为 vllm初始化参数
  115. vllm_async_llm = AsyncLLM.from_engine_args(AsyncEngineArgs(**kwargs))
  116. elif backend == "lmdeploy-engine":
  117. try:
  118. from lmdeploy import PytorchEngineConfig, TurbomindEngineConfig
  119. from lmdeploy.serve.vl_async_engine import VLAsyncEngine
  120. except ImportError:
  121. raise ImportError("Please install lmdeploy to use the lmdeploy-engine backend.")
  122. if "cache_max_entry_count" not in kwargs:
  123. kwargs["cache_max_entry_count"] = 0.5
  124. if "lmdeploy_device" in kwargs:
  125. device_type = kwargs.pop("lmdeploy_device")
  126. if device_type not in ["cuda", "ascend", "maca", "camb"]:
  127. raise ValueError(f"Unsupported lmdeploy device type: {device_type}")
  128. else:
  129. device_type = "cuda"
  130. if "lmdeploy_backend" in kwargs:
  131. lm_backend = kwargs.pop("lmdeploy_backend")
  132. if lm_backend not in ["pytorch", "turbomind"]:
  133. raise ValueError(f"Unsupported lmdeploy backend: {lm_backend}")
  134. else:
  135. lm_backend = set_lmdeploy_backend(device_type)
  136. logger.info(f"lmdeploy device is: {device_type}, lmdeploy backend is: {lm_backend}")
  137. if lm_backend == "pytorch":
  138. kwargs["device_type"] = device_type
  139. backend_config = PytorchEngineConfig(**kwargs)
  140. elif lm_backend == "turbomind":
  141. backend_config = TurbomindEngineConfig(**kwargs)
  142. else:
  143. raise ValueError(f"Unsupported lmdeploy backend: {lm_backend}")
  144. log_level = 'ERROR'
  145. from lmdeploy.utils import get_logger
  146. lm_logger = get_logger('lmdeploy')
  147. lm_logger.setLevel(log_level)
  148. if os.getenv('TM_LOG_LEVEL') is None:
  149. os.environ['TM_LOG_LEVEL'] = log_level
  150. lmdeploy_engine = VLAsyncEngine(
  151. model_path,
  152. backend=lm_backend,
  153. backend_config=backend_config,
  154. )
  155. self._models[key] = MinerUClient(
  156. backend=backend,
  157. model=model,
  158. processor=processor,
  159. lmdeploy_engine=lmdeploy_engine,
  160. vllm_llm=vllm_llm,
  161. vllm_async_llm=vllm_async_llm,
  162. server_url=server_url,
  163. batch_size=batch_size,
  164. max_concurrency=max_concurrency,
  165. http_timeout=http_timeout,
  166. server_headers=server_headers,
  167. max_retries=max_retries,
  168. retry_backoff_factor=retry_backoff_factor,
  169. )
  170. elapsed = round(time.time() - start_time, 2)
  171. logger.info(f"get {backend} predictor cost: {elapsed}s")
  172. return self._models[key]
  173. def doc_analyze(
  174. pdf_bytes,
  175. image_writer: DataWriter | None,
  176. predictor: MinerUClient | None = None,
  177. backend="transformers",
  178. model_path: str | None = None,
  179. server_url: str | None = None,
  180. **kwargs,
  181. ):
  182. if predictor is None:
  183. predictor = ModelSingleton().get_model(backend, model_path, server_url, **kwargs)
  184. # load_images_start = time.time()
  185. images_list, pdf_doc = load_images_from_pdf(pdf_bytes, image_type=ImageType.PIL)
  186. images_pil_list = [image_dict["img_pil"] for image_dict in images_list]
  187. # load_images_time = round(time.time() - load_images_start, 2)
  188. # logger.info(f"load images cost: {load_images_time}, speed: {round(len(images_base64_list)/load_images_time, 3)} images/s")
  189. # infer_start = time.time()
  190. results = predictor.batch_two_step_extract(images=images_pil_list)
  191. # infer_time = round(time.time() - infer_start, 2)
  192. # logger.info(f"infer finished, cost: {infer_time}, speed: {round(len(results)/infer_time, 3)} page/s")
  193. middle_json = result_to_middle_json(results, images_list, pdf_doc, image_writer)
  194. return middle_json, results
  195. async def aio_doc_analyze(
  196. pdf_bytes,
  197. image_writer: DataWriter | None,
  198. predictor: MinerUClient | None = None,
  199. backend="transformers",
  200. model_path: str | None = None,
  201. server_url: str | None = None,
  202. **kwargs,
  203. ):
  204. if predictor is None:
  205. predictor = ModelSingleton().get_model(backend, model_path, server_url, **kwargs)
  206. # load_images_start = time.time()
  207. images_list, pdf_doc = load_images_from_pdf(pdf_bytes, image_type=ImageType.PIL)
  208. images_pil_list = [image_dict["img_pil"] for image_dict in images_list]
  209. # load_images_time = round(time.time() - load_images_start, 2)
  210. # logger.debug(f"load images cost: {load_images_time}, speed: {round(len(images_pil_list)/load_images_time, 3)} images/s")
  211. # infer_start = time.time()
  212. results = await predictor.aio_batch_two_step_extract(images=images_pil_list)
  213. # infer_time = round(time.time() - infer_start, 2)
  214. # logger.info(f"infer finished, cost: {infer_time}, speed: {round(len(results)/infer_time, 3)} page/s")
  215. middle_json = result_to_middle_json(results, images_list, pdf_doc, image_writer)
  216. return middle_json, results