predictor.py 7.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198
  1. # Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import copy
  15. import os
  16. from typing import List
  17. from ....modules.doc_vlm.model_list import MODELS
  18. from ....utils.device import TemporaryDeviceChanger
  19. from ....utils.env import get_device_type
  20. from ...common.batch_sampler import DocVLMBatchSampler
  21. from ..base import BasePredictor
  22. from .result import DocVLMResult
  23. class DocVLMPredictor(BasePredictor):
  24. entities = MODELS
  25. def __init__(self, *args, **kwargs):
  26. """Initializes DocVLMPredictor.
  27. Args:
  28. *args: Arbitrary positional arguments passed to the superclass.
  29. **kwargs: Arbitrary keyword arguments passed to the superclass.
  30. """
  31. super().__init__(*args, **kwargs)
  32. self.device = kwargs.get("device", None)
  33. self.infer, self.processor = self._build(**kwargs)
  34. def _build_batch_sampler(self):
  35. """Builds and returns an DocVLMBatchSampler instance.
  36. Returns:
  37. DocVLMBatchSampler: An instance of DocVLMBatchSampler.
  38. """
  39. return DocVLMBatchSampler()
  40. def _get_result_class(self):
  41. """Returns the result class, DocVLMResult.
  42. Returns:
  43. type: The DocVLMResult class.
  44. """
  45. return DocVLMResult
  46. def _build(self, **kwargs):
  47. """Build the model, and correspounding processor on the configuration.
  48. Returns:
  49. model: An instance of Paddle model, could be either a dynamic model or a static model.
  50. processor: The correspounding processor for the model.
  51. """
  52. import paddle
  53. from .modeling import PPDocBeeInference
  54. # build model
  55. if "PP-DocBee" in self.model_name:
  56. if kwargs.get("use_hpip", False):
  57. raise ValueError(
  58. f"PP-DocBee series do not support `use_hpip=True` for now."
  59. )
  60. dtype = (
  61. "bfloat16"
  62. if ("npu" in get_device_type() or paddle.amp.is_bfloat16_supported())
  63. else "float32"
  64. )
  65. with TemporaryDeviceChanger(self.device):
  66. model = PPDocBeeInference.from_pretrained(self.model_dir, dtype=dtype)
  67. else:
  68. raise NotImplementedError(f"Model {self.model_name} is not supported.")
  69. # build processor
  70. processor = self.build_processor()
  71. return model, processor
  72. def process(self, data: List[dict], **kwargs):
  73. """
  74. Process a batch of data through the preprocessing, inference, and postprocessing.
  75. Args:
  76. data (List[dict]): A batch of input data, must be a dict (e.g. {"image": /path/to/image, "query": some question}).
  77. kwargs (Optional[dict]): Arbitrary keyword arguments passed to model.generate.
  78. Returns:
  79. dict: A dictionary containing the raw sample information and prediction results for every instance of the batch.
  80. """
  81. assert (
  82. isinstance(data, List) and len(data) == 1
  83. ), "data must be a list of length 1"
  84. assert isinstance(data[0], dict)
  85. data = data[0]
  86. src_data = copy.copy(data)
  87. # preprocess
  88. data = self.processor.preprocess(**data)
  89. data = self._switch_inputs_to_device(data)
  90. # do infer
  91. with TemporaryDeviceChanger(self.device):
  92. preds = self.infer.generate(data, **kwargs)
  93. # postprocess
  94. preds = self.processor.postprocess(preds)
  95. result_dict = self._format_result_dict(preds, src_data)
  96. return result_dict
  97. def build_processor(self, **kwargs):
  98. from ..common.tokenizer import MIXQwen2Tokenizer
  99. from .processors import PPDocBeeProcessor, Qwen2VLImageProcessor
  100. if "PP-DocBee" in self.model_name:
  101. image_processor = Qwen2VLImageProcessor()
  102. tokenizer = MIXQwen2Tokenizer.from_pretrained(self.model_dir)
  103. return PPDocBeeProcessor(
  104. image_processor=image_processor, tokenizer=tokenizer
  105. )
  106. else:
  107. raise NotImplementedError
  108. def _format_result_dict(self, model_preds, src_data):
  109. if not isinstance(model_preds, list):
  110. model_preds = [model_preds]
  111. if not isinstance(src_data, list):
  112. src_data = [src_data]
  113. if len(model_preds) != len(src_data):
  114. raise ValueError(
  115. f"Model predicts {len(model_preds)} results while src data has {len(src_data)} samples."
  116. )
  117. rst_format_dict = {k: [] for k in src_data[0].keys()}
  118. rst_format_dict["result"] = []
  119. for data_sample, model_pred in zip(src_data, model_preds):
  120. for k in data_sample.keys():
  121. rst_format_dict[k].append(data_sample[k])
  122. rst_format_dict["result"].append(model_pred)
  123. return rst_format_dict
  124. def _infer_dynamic_forward_device(self, device):
  125. """infer the forward device for dynamic graph model"""
  126. import GPUtil
  127. from ....utils.device import parse_device
  128. if device is None:
  129. return None
  130. if "cpu" in device.lower():
  131. return "cpu"
  132. device_type, device_ids = parse_device(device)
  133. cuda_visible_devices = os.environ.get("CUDA_VISIBLE_DEVICES", None)
  134. if cuda_visible_devices is None:
  135. env_gpu_num = len(GPUtil.getGPUs())
  136. cuda_visible_devices = ",".join([str(i) for i in range(env_gpu_num)])
  137. env_device_ids = cuda_visible_devices.split(",")
  138. for env_device_id in env_device_ids:
  139. if not env_device_id.isdigit():
  140. raise ValueError(
  141. f"CUDA_VISIBLE_DEVICES ID must be an integer. Invalid device ID: {env_device_id}"
  142. )
  143. if max(device_ids) >= len(env_device_ids):
  144. raise ValueError(
  145. f"Required gpu ids {device_ids} even larger than the number of visible devices {cuda_visible_devices}."
  146. )
  147. rst_global_gpu_ids = [env_device_ids[idx] for idx in device_ids]
  148. return device_type + ":" + ",".join(rst_global_gpu_ids)
  149. def _switch_inputs_to_device(self, input_dict):
  150. """Switch the input to the specified device"""
  151. import paddle
  152. if self.device is None:
  153. return input_dict
  154. rst_dict = {
  155. k: (
  156. paddle.to_tensor(input_dict[k], place=self.device)
  157. if isinstance(input_dict[k], paddle.Tensor)
  158. else input_dict[k]
  159. )
  160. for k in input_dict
  161. }
  162. return rst_dict