callbacks.py 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340
  1. # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. from __future__ import absolute_import
  15. from __future__ import division
  16. from __future__ import print_function
  17. import os
  18. import sys
  19. import datetime
  20. import six
  21. import copy
  22. import json
  23. import paddle
  24. import paddle.distributed as dist
  25. from paddlex.ppdet.utils.checkpoint import save_model
  26. from paddlex.ppdet.metrics import get_infer_results
  27. from paddlex.ppdet.utils.logger import setup_logger
  28. logger = setup_logger('ppdet.engine')
  29. __all__ = [
  30. 'Callback', 'ComposeCallback', 'LogPrinter', 'Checkpointer',
  31. 'VisualDLWriter', 'SniperProposalsGenerator'
  32. ]
  33. class Callback(object):
  34. def __init__(self, model):
  35. self.model = model
  36. def on_step_begin(self, status):
  37. pass
  38. def on_step_end(self, status):
  39. pass
  40. def on_epoch_begin(self, status):
  41. pass
  42. def on_epoch_end(self, status):
  43. pass
  44. def on_train_begin(self, status):
  45. pass
  46. def on_train_end(self, status):
  47. pass
  48. class ComposeCallback(object):
  49. def __init__(self, callbacks):
  50. callbacks = [c for c in list(callbacks) if c is not None]
  51. for c in callbacks:
  52. assert isinstance(
  53. c, Callback), "callback should be subclass of Callback"
  54. self._callbacks = callbacks
  55. def on_step_begin(self, status):
  56. for c in self._callbacks:
  57. c.on_step_begin(status)
  58. def on_step_end(self, status):
  59. for c in self._callbacks:
  60. c.on_step_end(status)
  61. def on_epoch_begin(self, status):
  62. for c in self._callbacks:
  63. c.on_epoch_begin(status)
  64. def on_epoch_end(self, status):
  65. for c in self._callbacks:
  66. c.on_epoch_end(status)
  67. def on_train_begin(self, status):
  68. for c in self._callbacks:
  69. c.on_train_begin(status)
  70. def on_train_end(self, status):
  71. for c in self._callbacks:
  72. c.on_train_end(status)
  73. class LogPrinter(Callback):
  74. def __init__(self, model):
  75. super(LogPrinter, self).__init__(model)
  76. def on_step_end(self, status):
  77. if dist.get_world_size() < 2 or dist.get_rank() == 0:
  78. mode = status['mode']
  79. if mode == 'train':
  80. epoch_id = status['epoch_id']
  81. step_id = status['step_id']
  82. steps_per_epoch = status['steps_per_epoch']
  83. training_staus = status['training_staus']
  84. batch_time = status['batch_time']
  85. data_time = status['data_time']
  86. epoches = self.model.cfg.epoch
  87. batch_size = self.model.cfg['{}Reader'.format(mode.capitalize(
  88. ))]['batch_size']
  89. logs = training_staus.log()
  90. space_fmt = ':' + str(len(str(steps_per_epoch))) + 'd'
  91. if step_id % self.model.cfg.log_iter == 0:
  92. eta_steps = (epoches - epoch_id
  93. ) * steps_per_epoch - step_id
  94. eta_sec = eta_steps * batch_time.global_avg
  95. eta_str = str(datetime.timedelta(seconds=int(eta_sec)))
  96. ips = float(batch_size) / batch_time.avg
  97. fmt = ' '.join([
  98. 'Epoch: [{}]',
  99. '[{' + space_fmt + '}/{}]',
  100. 'learning_rate: {lr:.6f}',
  101. '{meters}',
  102. 'eta: {eta}',
  103. 'batch_cost: {btime}',
  104. 'data_cost: {dtime}',
  105. 'ips: {ips:.4f} images/s',
  106. ])
  107. fmt = fmt.format(
  108. epoch_id,
  109. step_id,
  110. steps_per_epoch,
  111. lr=status['learning_rate'],
  112. meters=logs,
  113. eta=eta_str,
  114. btime=str(batch_time),
  115. dtime=str(data_time),
  116. ips=ips)
  117. logger.info(fmt)
  118. if mode == 'eval':
  119. step_id = status['step_id']
  120. if step_id % 100 == 0:
  121. logger.info("Eval iter: {}".format(step_id))
  122. def on_epoch_end(self, status):
  123. if dist.get_world_size() < 2 or dist.get_rank() == 0:
  124. mode = status['mode']
  125. if mode == 'eval':
  126. sample_num = status['sample_num']
  127. cost_time = status['cost_time']
  128. logger.info('Total sample number: {}, averge FPS: {}'.format(
  129. sample_num, sample_num / cost_time))
  130. class Checkpointer(Callback):
  131. def __init__(self, model):
  132. super(Checkpointer, self).__init__(model)
  133. cfg = self.model.cfg
  134. self.best_ap = 0.
  135. self.save_dir = os.path.join(self.model.cfg.save_dir,
  136. self.model.cfg.filename)
  137. if hasattr(self.model.model, 'student_model'):
  138. self.weight = self.model.model.student_model
  139. else:
  140. self.weight = self.model.model
  141. def on_epoch_end(self, status):
  142. # Checkpointer only performed during training
  143. mode = status['mode']
  144. epoch_id = status['epoch_id']
  145. weight = None
  146. save_name = None
  147. if dist.get_world_size() < 2 or dist.get_rank() == 0:
  148. if mode == 'train':
  149. end_epoch = self.model.cfg.epoch
  150. if (
  151. epoch_id + 1
  152. ) % self.model.cfg.snapshot_epoch == 0 or epoch_id == end_epoch - 1:
  153. save_name = str(
  154. epoch_id
  155. ) if epoch_id != end_epoch - 1 else "model_final"
  156. weight = self.weight
  157. elif mode == 'eval':
  158. if 'save_best_model' in status and status['save_best_model']:
  159. for metric in self.model._metrics:
  160. map_res = metric.get_results()
  161. if 'bbox' in map_res:
  162. key = 'bbox'
  163. elif 'keypoint' in map_res:
  164. key = 'keypoint'
  165. else:
  166. key = 'mask'
  167. if key not in map_res:
  168. logger.warning("Evaluation results empty, this may be due to " \
  169. "training iterations being too few or not " \
  170. "loading the correct weights.")
  171. return
  172. if map_res[key][0] > self.best_ap:
  173. self.best_ap = map_res[key][0]
  174. save_name = 'best_model'
  175. weight = self.weight
  176. logger.info("Best test {} ap is {:0.3f}.".format(
  177. key, self.best_ap))
  178. if weight:
  179. save_model(weight, self.model.optimizer, self.save_dir,
  180. save_name, epoch_id + 1)
  181. class WiferFaceEval(Callback):
  182. def __init__(self, model):
  183. super(WiferFaceEval, self).__init__(model)
  184. def on_epoch_begin(self, status):
  185. assert self.model.mode == 'eval', \
  186. "WiferFaceEval can only be set during evaluation"
  187. for metric in self.model._metrics:
  188. metric.update(self.model.model)
  189. sys.exit()
  190. class VisualDLWriter(Callback):
  191. """
  192. Use VisualDL to log data or image
  193. """
  194. def __init__(self, model):
  195. super(VisualDLWriter, self).__init__(model)
  196. assert six.PY3, "VisualDL requires Python >= 3.5"
  197. try:
  198. from visualdl import LogWriter
  199. except Exception as e:
  200. logger.error('visualdl not found, plaese install visualdl. '
  201. 'for example: `pip install visualdl`.')
  202. raise e
  203. self.vdl_writer = LogWriter(
  204. model.cfg.get('vdl_log_dir', 'vdl_log_dir/scalar'))
  205. self.vdl_loss_step = 0
  206. self.vdl_mAP_step = 0
  207. self.vdl_image_step = 0
  208. self.vdl_image_frame = 0
  209. def on_step_end(self, status):
  210. mode = status['mode']
  211. if dist.get_world_size() < 2 or dist.get_rank() == 0:
  212. if mode == 'train':
  213. training_staus = status['training_staus']
  214. for loss_name, loss_value in training_staus.get().items():
  215. self.vdl_writer.add_scalar(loss_name, loss_value,
  216. self.vdl_loss_step)
  217. self.vdl_loss_step += 1
  218. elif mode == 'test':
  219. ori_image = status['original_image']
  220. result_image = status['result_image']
  221. self.vdl_writer.add_image(
  222. "original/frame_{}".format(self.vdl_image_frame),
  223. ori_image, self.vdl_image_step)
  224. self.vdl_writer.add_image(
  225. "result/frame_{}".format(self.vdl_image_frame),
  226. result_image, self.vdl_image_step)
  227. self.vdl_image_step += 1
  228. # each frame can display ten pictures at most.
  229. if self.vdl_image_step % 10 == 0:
  230. self.vdl_image_step = 0
  231. self.vdl_image_frame += 1
  232. def on_epoch_end(self, status):
  233. mode = status['mode']
  234. if dist.get_world_size() < 2 or dist.get_rank() == 0:
  235. if mode == 'eval':
  236. for metric in self.model._metrics:
  237. for key, map_value in metric.get_results().items():
  238. self.vdl_writer.add_scalar("{}-mAP".format(key),
  239. map_value[0],
  240. self.vdl_mAP_step)
  241. self.vdl_mAP_step += 1
  242. class SniperProposalsGenerator(Callback):
  243. def __init__(self, model):
  244. super(SniperProposalsGenerator, self).__init__(model)
  245. ori_dataset = self.model.dataset
  246. self.dataset = self._create_new_dataset(ori_dataset)
  247. self.loader = self.model.loader
  248. self.cfg = self.model.cfg
  249. self.infer_model = self.model.model
  250. def _create_new_dataset(self, ori_dataset):
  251. dataset = copy.deepcopy(ori_dataset)
  252. # init anno_cropper
  253. dataset.init_anno_cropper()
  254. # generate infer roidbs
  255. ori_roidbs = dataset.get_ori_roidbs()
  256. roidbs = dataset.anno_cropper.crop_infer_anno_records(ori_roidbs)
  257. # set new roidbs
  258. dataset.set_roidbs(roidbs)
  259. return dataset
  260. def _eval_with_loader(self, loader):
  261. results = []
  262. with paddle.no_grad():
  263. self.infer_model.eval()
  264. for step_id, data in enumerate(loader):
  265. outs = self.infer_model(data)
  266. for key in ['im_shape', 'scale_factor', 'im_id']:
  267. outs[key] = data[key]
  268. for key, value in outs.items():
  269. if hasattr(value, 'numpy'):
  270. outs[key] = value.numpy()
  271. results.append(outs)
  272. return results
  273. def on_train_end(self, status):
  274. self.loader.dataset = self.dataset
  275. results = self._eval_with_loader(self.loader)
  276. results = self.dataset.anno_cropper.aggregate_chips_detections(results)
  277. # sniper
  278. proposals = []
  279. clsid2catid = {v: k for k, v in self.dataset.catid2clsid.items()}
  280. for outs in results:
  281. batch_res = get_infer_results(outs, clsid2catid)
  282. start = 0
  283. for i, im_id in enumerate(outs['im_id']):
  284. bbox_num = outs['bbox_num']
  285. end = start + bbox_num[i]
  286. bbox_res = batch_res['bbox'][start:end] \
  287. if 'bbox' in batch_res else None
  288. if bbox_res:
  289. proposals += bbox_res
  290. logger.info("save proposals in {}".format(self.cfg.proposals_path))
  291. with open(self.cfg.proposals_path, 'w') as f:
  292. json.dump(proposals, f)