post_quantization.py 9.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223
  1. # copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. from paddle.fluid.contrib.slim.quantization.quantization_pass import QuantizationTransformPass
  15. from paddle.fluid.contrib.slim.quantization.quantization_pass import AddQuantDequantPass
  16. from paddle.fluid.contrib.slim.quantization.quantization_pass import _op_real_in_out_name
  17. from paddle.fluid.contrib.slim.quantization import PostTrainingQuantization
  18. import paddlex.utils.logging as logging
  19. import paddle.fluid as fluid
  20. import os
  21. class PaddleXPostTrainingQuantization(PostTrainingQuantization):
  22. def __init__(self,
  23. executor,
  24. dataset,
  25. program,
  26. inputs,
  27. outputs,
  28. batch_size=10,
  29. batch_nums=None,
  30. scope=None,
  31. algo="KL",
  32. quantizable_op_type=["conv2d", "depthwise_conv2d", "mul"],
  33. is_full_quantize=False,
  34. is_use_cache_file=False,
  35. cache_dir="./temp_post_training"):
  36. '''
  37. The class utilizes post training quantization methon to quantize the
  38. fp32 model. It uses calibrate data to calculate the scale factor of
  39. quantized variables, and inserts fake quant/dequant op to obtain the
  40. quantized model.
  41. Args:
  42. executor(fluid.Executor): The executor to load, run and save the
  43. quantized model.
  44. dataset(Python Iterator): The data Reader.
  45. program(fluid.Program): The paddle program, save the parameters for model.
  46. inputs(dict): The input of prigram.
  47. outputs(dict): The output of program.
  48. batch_size(int, optional): The batch size of DataLoader. Default is 10.
  49. batch_nums(int, optional): If batch_nums is not None, the number of
  50. calibrate data is batch_size*batch_nums. If batch_nums is None, use
  51. all data provided by sample_generator as calibrate data.
  52. scope(fluid.Scope, optional): The scope of the program, use it to load
  53. and save variables. If scope=None, get scope by global_scope().
  54. algo(str, optional): If algo=KL, use KL-divergenc method to
  55. get the more precise scale factor. If algo='direct', use
  56. abs_max methon to get the scale factor. Default is KL.
  57. quantizable_op_type(list[str], optional): List the type of ops
  58. that will be quantized. Default is ["conv2d", "depthwise_conv2d",
  59. "mul"].
  60. is_full_quantized(bool, optional): If set is_full_quantized as True,
  61. apply quantization to all supported quantizable op type. If set
  62. is_full_quantized as False, only apply quantization to the op type
  63. according to the input quantizable_op_type.
  64. is_use_cache_file(bool, optional): If set is_use_cache_file as False,
  65. all temp data will be saved in memory. If set is_use_cache_file as True,
  66. it will save temp data to disk. When the fp32 model is complex or
  67. the number of calibrate data is large, we should set is_use_cache_file
  68. as True. Defalut is False.
  69. cache_dir(str, optional): When is_use_cache_file is True, set cache_dir as
  70. the directory for saving temp data. Default is ./temp_post_training.
  71. Returns:
  72. None
  73. '''
  74. self._executor = executor
  75. self._dataset = dataset
  76. self._batch_size = batch_size
  77. self._batch_nums = batch_nums
  78. self._scope = fluid.global_scope() if scope == None else scope
  79. self._algo = algo
  80. self._is_use_cache_file = is_use_cache_file
  81. self._cache_dir = cache_dir
  82. if self._is_use_cache_file and not os.path.exists(self._cache_dir):
  83. os.mkdir(self._cache_dir)
  84. supported_quantizable_op_type = \
  85. QuantizationTransformPass._supported_quantizable_op_type + \
  86. AddQuantDequantPass._supported_quantizable_op_type
  87. if is_full_quantize:
  88. self._quantizable_op_type = supported_quantizable_op_type
  89. else:
  90. self._quantizable_op_type = quantizable_op_type
  91. for op_type in self._quantizable_op_type:
  92. assert op_type in supported_quantizable_op_type + \
  93. AddQuantDequantPass._activation_type, \
  94. op_type + " is not supported for quantization."
  95. self._place = self._executor.place
  96. self._program = program
  97. self._feed_list = list(inputs.values())
  98. self._fetch_list = list(outputs.values())
  99. self._data_loader = None
  100. self._op_real_in_out_name = _op_real_in_out_name
  101. self._bit_length = 8
  102. self._quantized_weight_var_name = set()
  103. self._quantized_act_var_name = set()
  104. self._sampling_data = {}
  105. self._quantized_var_scale_factor = {}
  106. def quantize(self):
  107. '''
  108. Quantize the fp32 model. Use calibrate data to calculate the scale factor of
  109. quantized variables, and inserts fake quant/dequant op to obtain the
  110. quantized model.
  111. Args:
  112. None
  113. Returns:
  114. the program of quantized model.
  115. '''
  116. self._preprocess()
  117. batch_id = 0
  118. for data in self._data_loader():
  119. self._executor.run(
  120. program=self._program,
  121. feed=data,
  122. fetch_list=self._fetch_list,
  123. return_numpy=False)
  124. self._sample_data(batch_id)
  125. if batch_id % 5 == 0:
  126. logging.info("run batch: {}".format(batch_id))
  127. batch_id += 1
  128. if self._batch_nums and batch_id >= self._batch_nums:
  129. break
  130. logging.info("all run batch: ".format(batch_id))
  131. logging.info("calculate scale factor ...")
  132. self._calculate_scale_factor()
  133. logging.info("update the program ...")
  134. self._update_program()
  135. self._save_output_scale()
  136. return self._program
  137. def save_quantized_model(self, save_model_path):
  138. '''
  139. Save the quantized model to the disk.
  140. Args:
  141. save_model_path(str): The path to save the quantized model
  142. Returns:
  143. None
  144. '''
  145. feed_vars_names = [var.name for var in self._feed_list]
  146. fluid.io.save_inference_model(
  147. dirname=save_model_path,
  148. feeded_var_names=feed_vars_names,
  149. target_vars=self._fetch_list,
  150. executor=self._executor,
  151. params_filename='__params__',
  152. main_program=self._program)
  153. def _preprocess(self):
  154. '''
  155. Load model and set data loader, collect the variable names for sampling,
  156. and set activation variables to be persistable.
  157. '''
  158. feed_vars = [fluid.framework._get_var(var.name, self._program) \
  159. for var in self._feed_list]
  160. self._data_loader = fluid.io.DataLoader.from_generator(
  161. feed_list=feed_vars, capacity=3 * self._batch_size, iterable=True)
  162. self._data_loader.set_sample_list_generator(
  163. self._dataset.generator(self._batch_size, drop_last=True),
  164. places=self._place)
  165. # collect the variable names for sampling
  166. persistable_var_names = []
  167. for var in self._program.list_vars():
  168. if var.persistable:
  169. persistable_var_names.append(var.name)
  170. for op in self._program.global_block().ops:
  171. op_type = op.type
  172. if op_type in self._quantizable_op_type:
  173. if op_type in ("conv2d", "depthwise_conv2d"):
  174. self._quantized_act_var_name.add(op.input("Input")[0])
  175. self._quantized_weight_var_name.add(op.input("Filter")[0])
  176. self._quantized_act_var_name.add(op.output("Output")[0])
  177. elif op_type == "mul":
  178. if self._is_input_all_not_persistable(
  179. op, persistable_var_names):
  180. op._set_attr("skip_quant", True)
  181. logging.warning(
  182. "Skip quant a mul op for two input variables are not persistable"
  183. )
  184. else:
  185. self._quantized_act_var_name.add(op.input("X")[0])
  186. self._quantized_weight_var_name.add(op.input("Y")[0])
  187. self._quantized_act_var_name.add(op.output("Out")[0])
  188. else:
  189. # process other quantizable op type, the input must all not persistable
  190. if self._is_input_all_not_persistable(
  191. op, persistable_var_names):
  192. input_output_name_list = self._op_real_in_out_name[
  193. op_type]
  194. for input_name in input_output_name_list[0]:
  195. for var_name in op.input(input_name):
  196. self._quantized_act_var_name.add(var_name)
  197. for output_name in input_output_name_list[1]:
  198. for var_name in op.output(output_name):
  199. self._quantized_act_var_name.add(var_name)
  200. # set activation variables to be persistable, so can obtain
  201. # the tensor data in sample_data
  202. for var in self._program.list_vars():
  203. if var.name in self._quantized_act_var_name:
  204. var.persistable = True