trainer.py 36 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872
  1. # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. from __future__ import absolute_import
  15. from __future__ import division
  16. from __future__ import print_function
  17. import os
  18. import sys
  19. import copy
  20. import time
  21. from tqdm import tqdm
  22. import numpy as np
  23. import typing
  24. from PIL import Image, ImageOps, ImageFile
  25. ImageFile.LOAD_TRUNCATED_IMAGES = True
  26. import paddle
  27. import paddle.nn as nn
  28. import paddle.distributed as dist
  29. from paddle.distributed import fleet
  30. from paddle.static import InputSpec
  31. from paddlex.ppdet.optimizer import ModelEMA
  32. from paddlex.ppdet.core.workspace import create
  33. from paddlex.ppdet.utils.checkpoint import load_weight, load_pretrain_weight
  34. from paddlex.ppdet.utils.visualizer import visualize_results, save_result
  35. from paddlex.ppdet.metrics import Metric, COCOMetric, VOCMetric, WiderFaceMetric, get_infer_results, KeyPointTopDownCOCOEval, KeyPointTopDownMPIIEval
  36. from paddlex.ppdet.metrics import RBoxMetric, JDEDetMetric, SNIPERCOCOMetric
  37. from paddlex.ppdet.data.source.sniper_coco import SniperCOCODataSet
  38. from paddlex.ppdet.data.source.category import get_categories
  39. import paddlex.ppdet.utils.stats as stats
  40. from paddlex.ppdet.utils.fuse_utils import fuse_conv_bn
  41. from paddlex.ppdet.utils import profiler
  42. from .callbacks import Callback, ComposeCallback, LogPrinter, Checkpointer, WiferFaceEval, VisualDLWriter, SniperProposalsGenerator, WandbCallback
  43. from .export_utils import _dump_infer_config, _prune_input_spec
  44. from paddlex.ppdet.utils.logger import setup_logger
  45. logger = setup_logger('ppdet.engine')
  46. __all__ = ['Trainer']
  47. MOT_ARCH = ['DeepSORT', 'JDE', 'FairMOT', 'ByteTrack']
  48. class Trainer(object):
  49. def __init__(self, cfg, mode='train'):
  50. self.cfg = cfg
  51. assert mode.lower() in ['train', 'eval', 'test'], \
  52. "mode should be 'train', 'eval' or 'test'"
  53. self.mode = mode.lower()
  54. self.optimizer = None
  55. self.is_loaded_weights = False
  56. # build data loader
  57. capital_mode = self.mode.capitalize()
  58. if cfg.architecture in MOT_ARCH and self.mode in ['eval', 'test']:
  59. self.dataset = self.cfg['{}MOTDataset'.format(
  60. capital_mode)] = create('{}MOTDataset'.format(capital_mode))()
  61. else:
  62. self.dataset = self.cfg['{}Dataset'.format(capital_mode)] = create(
  63. '{}Dataset'.format(capital_mode))()
  64. if cfg.architecture == 'DeepSORT' and self.mode == 'train':
  65. logger.error('DeepSORT has no need of training on mot dataset.')
  66. sys.exit(1)
  67. if cfg.architecture == 'FairMOT' and self.mode == 'eval':
  68. images = self.parse_mot_images(cfg)
  69. self.dataset.set_images(images)
  70. if self.mode == 'train':
  71. self.loader = create('{}Reader'.format(capital_mode))(
  72. self.dataset, cfg.worker_num)
  73. if cfg.architecture == 'JDE' and self.mode == 'train':
  74. cfg['JDEEmbeddingHead'][
  75. 'num_identities'] = self.dataset.num_identities_dict[0]
  76. # JDE only support single class MOT now.
  77. if cfg.architecture == 'FairMOT' and self.mode == 'train':
  78. cfg['FairMOTEmbeddingHead'][
  79. 'num_identities_dict'] = self.dataset.num_identities_dict
  80. # FairMOT support single class and multi-class MOT now.
  81. # build model
  82. if 'model' not in self.cfg:
  83. self.model = create(cfg.architecture)
  84. else:
  85. self.model = self.cfg.model
  86. self.is_loaded_weights = True
  87. if cfg.architecture == 'YOLOX':
  88. for k, m in self.model.named_sublayers():
  89. if isinstance(m, nn.BatchNorm2D):
  90. m._epsilon = 1e-3 # for amp(fp16)
  91. m._momentum = 0.97 # 0.03 in pytorch
  92. #normalize params for deploy
  93. if 'slim' in cfg and cfg['slim_type'] == 'OFA':
  94. self.model.model.load_meanstd(cfg['TestReader'][
  95. 'sample_transforms'])
  96. elif 'slim' in cfg and cfg['slim_type'] == 'Distill':
  97. self.model.student_model.load_meanstd(cfg['TestReader'][
  98. 'sample_transforms'])
  99. elif 'slim' in cfg and cfg[
  100. 'slim_type'] == 'DistillPrune' and self.mode == 'train':
  101. self.model.student_model.load_meanstd(cfg['TestReader'][
  102. 'sample_transforms'])
  103. else:
  104. self.model.load_meanstd(cfg['TestReader']['sample_transforms'])
  105. self.use_ema = ('use_ema' in cfg and cfg['use_ema'])
  106. if self.use_ema:
  107. ema_decay = self.cfg.get('ema_decay', 0.9998)
  108. cycle_epoch = self.cfg.get('cycle_epoch', -1)
  109. ema_decay_type = self.cfg.get('ema_decay_type', 'threshold')
  110. self.ema = ModelEMA(
  111. self.model,
  112. decay=ema_decay,
  113. ema_decay_type=ema_decay_type,
  114. cycle_epoch=cycle_epoch)
  115. # EvalDataset build with BatchSampler to evaluate in single device
  116. # TODO: multi-device evaluate
  117. if self.mode == 'eval':
  118. if cfg.architecture == 'FairMOT':
  119. self.loader = create('EvalMOTReader')(self.dataset, 0)
  120. else:
  121. self._eval_batch_sampler = paddle.io.BatchSampler(
  122. self.dataset, batch_size=self.cfg.EvalReader['batch_size'])
  123. reader_name = '{}Reader'.format(self.mode.capitalize())
  124. # If metric is VOC, need to be set collate_batch=False.
  125. if cfg.metric == 'VOC':
  126. cfg[reader_name]['collate_batch'] = False
  127. self.loader = create(reader_name)(self.dataset, cfg.worker_num,
  128. self._eval_batch_sampler)
  129. # TestDataset build after user set images, skip loader creation here
  130. # build optimizer in train mode
  131. if self.mode == 'train':
  132. steps_per_epoch = len(self.loader)
  133. self.lr = create('LearningRate')(steps_per_epoch)
  134. self.optimizer = create('OptimizerBuilder')(self.lr, self.model)
  135. # Unstructured pruner is only enabled in the train mode.
  136. if self.cfg.get('unstructured_prune'):
  137. self.pruner = create('UnstructuredPruner')(self.model,
  138. steps_per_epoch)
  139. self._nranks = dist.get_world_size()
  140. self._local_rank = dist.get_rank()
  141. self.status = {}
  142. self.start_epoch = 0
  143. self.end_epoch = 0 if 'epoch' not in cfg else cfg.epoch
  144. # initial default callbacks
  145. self._init_callbacks()
  146. # initial default metrics
  147. self._init_metrics()
  148. self._reset_metrics()
  149. def _init_callbacks(self):
  150. if self.mode == 'train':
  151. self._callbacks = [LogPrinter(self), Checkpointer(self)]
  152. if self.cfg.get('use_vdl', False):
  153. self._callbacks.append(VisualDLWriter(self))
  154. if self.cfg.get('save_proposals', False):
  155. self._callbacks.append(SniperProposalsGenerator(self))
  156. if self.cfg.get('use_wandb', False) or 'wandb' in self.cfg:
  157. self._callbacks.append(WandbCallback(self))
  158. self._compose_callback = ComposeCallback(self._callbacks)
  159. elif self.mode == 'eval':
  160. self._callbacks = [LogPrinter(self)]
  161. if self.cfg.metric == 'WiderFace':
  162. self._callbacks.append(WiferFaceEval(self))
  163. self._compose_callback = ComposeCallback(self._callbacks)
  164. elif self.mode == 'test' and self.cfg.get('use_vdl', False):
  165. self._callbacks = [VisualDLWriter(self)]
  166. self._compose_callback = ComposeCallback(self._callbacks)
  167. else:
  168. self._callbacks = []
  169. self._compose_callback = None
  170. def _init_metrics(self, validate=False):
  171. if self.mode == 'test' or (self.mode == 'train' and not validate):
  172. self._metrics = []
  173. return
  174. classwise = self.cfg['classwise'] if 'classwise' in self.cfg else False
  175. if self.cfg.metric == 'COCO' or self.cfg.metric == "SNIPERCOCO":
  176. # TODO: bias should be unified
  177. bias = 1 if self.cfg.get('bias', False) else 0
  178. output_eval = self.cfg['output_eval'] \
  179. if 'output_eval' in self.cfg else None
  180. save_prediction_only = self.cfg.get('save_prediction_only', False)
  181. # pass clsid2catid info to metric instance to avoid multiple loading
  182. # annotation file
  183. clsid2catid = {v: k for k, v in self.dataset.catid2clsid.items()} \
  184. if self.mode == 'eval' else None
  185. # when do validation in train, annotation file should be get from
  186. # EvalReader instead of self.dataset(which is TrainReader)
  187. if self.mode == 'train' and validate:
  188. eval_dataset = self.cfg['EvalDataset']
  189. eval_dataset.check_or_download_dataset()
  190. anno_file = eval_dataset.get_anno()
  191. dataset = eval_dataset
  192. else:
  193. dataset = self.dataset
  194. anno_file = dataset.get_anno()
  195. IouType = self.cfg['IouType'] if 'IouType' in self.cfg else 'bbox'
  196. if self.cfg.metric == "COCO":
  197. self._metrics = [
  198. COCOMetric(
  199. anno_file=anno_file,
  200. clsid2catid=clsid2catid,
  201. classwise=classwise,
  202. output_eval=output_eval,
  203. bias=bias,
  204. IouType=IouType,
  205. save_prediction_only=save_prediction_only)
  206. ]
  207. elif self.cfg.metric == "SNIPERCOCO": # sniper
  208. self._metrics = [
  209. SNIPERCOCOMetric(
  210. anno_file=anno_file,
  211. dataset=dataset,
  212. clsid2catid=clsid2catid,
  213. classwise=classwise,
  214. output_eval=output_eval,
  215. bias=bias,
  216. IouType=IouType,
  217. save_prediction_only=save_prediction_only)
  218. ]
  219. elif self.cfg.metric == 'RBOX':
  220. # TODO: bias should be unified
  221. bias = self.cfg['bias'] if 'bias' in self.cfg else 0
  222. output_eval = self.cfg['output_eval'] \
  223. if 'output_eval' in self.cfg else None
  224. save_prediction_only = self.cfg.get('save_prediction_only', False)
  225. # pass clsid2catid info to metric instance to avoid multiple loading
  226. # annotation file
  227. clsid2catid = {v: k for k, v in self.dataset.catid2clsid.items()} \
  228. if self.mode == 'eval' else None
  229. # when do validation in train, annotation file should be get from
  230. # EvalReader instead of self.dataset(which is TrainReader)
  231. anno_file = self.dataset.get_anno()
  232. if self.mode == 'train' and validate:
  233. eval_dataset = self.cfg['EvalDataset']
  234. eval_dataset.check_or_download_dataset()
  235. anno_file = eval_dataset.get_anno()
  236. self._metrics = [
  237. RBoxMetric(
  238. anno_file=anno_file,
  239. clsid2catid=clsid2catid,
  240. classwise=classwise,
  241. output_eval=output_eval,
  242. bias=bias,
  243. save_prediction_only=save_prediction_only)
  244. ]
  245. elif self.cfg.metric == 'VOC':
  246. self._metrics = [
  247. VOCMetric(
  248. label_list=self.dataset.get_label_list(),
  249. class_num=self.cfg.num_classes,
  250. map_type=self.cfg.map_type,
  251. classwise=classwise)
  252. ]
  253. elif self.cfg.metric == 'WiderFace':
  254. multi_scale = self.cfg.multi_scale_eval if 'multi_scale_eval' in self.cfg else True
  255. self._metrics = [
  256. WiderFaceMetric(
  257. image_dir=os.path.join(self.dataset.dataset_dir,
  258. self.dataset.image_dir),
  259. anno_file=self.dataset.get_anno(),
  260. multi_scale=multi_scale)
  261. ]
  262. elif self.cfg.metric == 'KeyPointTopDownCOCOEval':
  263. eval_dataset = self.cfg['EvalDataset']
  264. eval_dataset.check_or_download_dataset()
  265. anno_file = eval_dataset.get_anno()
  266. save_prediction_only = self.cfg.get('save_prediction_only', False)
  267. self._metrics = [
  268. KeyPointTopDownCOCOEval(
  269. anno_file,
  270. len(eval_dataset),
  271. self.cfg.num_joints,
  272. self.cfg.save_dir,
  273. save_prediction_only=save_prediction_only)
  274. ]
  275. elif self.cfg.metric == 'KeyPointTopDownMPIIEval':
  276. eval_dataset = self.cfg['EvalDataset']
  277. eval_dataset.check_or_download_dataset()
  278. anno_file = eval_dataset.get_anno()
  279. save_prediction_only = self.cfg.get('save_prediction_only', False)
  280. self._metrics = [
  281. KeyPointTopDownMPIIEval(
  282. anno_file,
  283. len(eval_dataset),
  284. self.cfg.num_joints,
  285. self.cfg.save_dir,
  286. save_prediction_only=save_prediction_only)
  287. ]
  288. elif self.cfg.metric == 'MOTDet':
  289. self._metrics = [JDEDetMetric(), ]
  290. else:
  291. logger.warning("Metric not support for metric type {}".format(
  292. self.cfg.metric))
  293. self._metrics = []
  294. def _reset_metrics(self):
  295. for metric in self._metrics:
  296. metric.reset()
  297. def register_callbacks(self, callbacks):
  298. callbacks = [c for c in list(callbacks) if c is not None]
  299. for c in callbacks:
  300. assert isinstance(c, Callback), \
  301. "metrics shoule be instances of subclass of Metric"
  302. self._callbacks.extend(callbacks)
  303. self._compose_callback = ComposeCallback(self._callbacks)
  304. def register_metrics(self, metrics):
  305. metrics = [m for m in list(metrics) if m is not None]
  306. for m in metrics:
  307. assert isinstance(m, Metric), \
  308. "metrics shoule be instances of subclass of Metric"
  309. self._metrics.extend(metrics)
  310. def load_weights(self, weights):
  311. if self.is_loaded_weights:
  312. return
  313. self.start_epoch = 0
  314. load_pretrain_weight(self.model, weights)
  315. logger.debug("Load weights {} to start training".format(weights))
  316. def load_weights_sde(self, det_weights, reid_weights):
  317. if self.model.detector:
  318. load_weight(self.model.detector, det_weights)
  319. load_weight(self.model.reid, reid_weights)
  320. else:
  321. load_weight(self.model.reid, reid_weights)
  322. def resume_weights(self, weights):
  323. # support Distill resume weights
  324. if hasattr(self.model, 'student_model'):
  325. self.start_epoch = load_weight(self.model.student_model, weights,
  326. self.optimizer)
  327. else:
  328. self.start_epoch = load_weight(self.model, weights, self.optimizer,
  329. self.ema if self.use_ema else None)
  330. logger.debug("Resume weights of epoch {}".format(self.start_epoch))
  331. def train(self, validate=False):
  332. assert self.mode == 'train', "Model not in 'train' mode"
  333. Init_mark = False
  334. if validate:
  335. self.cfg['EvalDataset'] = self.cfg.EvalDataset = create(
  336. "EvalDataset")()
  337. model = self.model
  338. sync_bn = (getattr(self.cfg, 'norm_type', None) == 'sync_bn' and
  339. self.cfg.use_gpu and self._nranks > 1)
  340. if sync_bn:
  341. model = paddle.nn.SyncBatchNorm.convert_sync_batchnorm(model)
  342. # enabel auto mixed precision mode
  343. use_amp = self.cfg.get('amp', False)
  344. amp_level = self.cfg.get('amp_level', 'O1')
  345. if use_amp:
  346. scaler = paddle.amp.GradScaler(
  347. enable=self.cfg.use_gpu or self.cfg.use_npu,
  348. init_loss_scaling=self.cfg.get('init_loss_scaling', 1024))
  349. model = paddle.amp.decorate(models=model, level=amp_level)
  350. # get distributed model
  351. if self.cfg.get('fleet', False):
  352. model = fleet.distributed_model(model)
  353. self.optimizer = fleet.distributed_optimizer(self.optimizer)
  354. elif self._nranks > 1:
  355. find_unused_parameters = self.cfg[
  356. 'find_unused_parameters'] if 'find_unused_parameters' in self.cfg else False
  357. model = paddle.DataParallel(
  358. model, find_unused_parameters=find_unused_parameters)
  359. self.status.update({
  360. 'epoch_id': self.start_epoch,
  361. 'step_id': 0,
  362. 'steps_per_epoch': len(self.loader)
  363. })
  364. self.status['batch_time'] = stats.SmoothedValue(
  365. self.cfg.log_iter, fmt='{avg:.4f}')
  366. self.status['data_time'] = stats.SmoothedValue(
  367. self.cfg.log_iter, fmt='{avg:.4f}')
  368. self.status['training_staus'] = stats.TrainingStats(self.cfg.log_iter)
  369. if self.cfg.get('print_flops', False):
  370. flops_loader = create('{}Reader'.format(self.mode.capitalize()))(
  371. self.dataset, self.cfg.worker_num)
  372. self._flops(flops_loader)
  373. profiler_options = self.cfg.get('profiler_options', None)
  374. self._compose_callback.on_train_begin(self.status)
  375. for epoch_id in range(self.start_epoch, self.cfg.epoch):
  376. self.status['mode'] = 'train'
  377. self.status['epoch_id'] = epoch_id
  378. self._compose_callback.on_epoch_begin(self.status)
  379. self.loader.dataset.set_epoch(epoch_id)
  380. model.train()
  381. iter_tic = time.time()
  382. for step_id, data in enumerate(self.loader):
  383. self.status['data_time'].update(time.time() - iter_tic)
  384. self.status['step_id'] = step_id
  385. profiler.add_profiler_step(profiler_options)
  386. self._compose_callback.on_step_begin(self.status)
  387. data['epoch_id'] = epoch_id
  388. if use_amp:
  389. with paddle.amp.auto_cast(
  390. enable=self.cfg.use_gpu, level=amp_level):
  391. # model forward
  392. outputs = model(data)
  393. loss = outputs['loss']
  394. # model backward
  395. scaled_loss = scaler.scale(loss)
  396. scaled_loss.backward()
  397. # in dygraph mode, optimizer.minimize is equal to optimizer.step
  398. scaler.minimize(self.optimizer, scaled_loss)
  399. else:
  400. # model forward
  401. outputs = model(data)
  402. loss = outputs['loss']
  403. # model backward
  404. loss.backward()
  405. self.optimizer.step()
  406. curr_lr = self.optimizer.get_lr()
  407. self.lr.step()
  408. if self.cfg.get('unstructured_prune'):
  409. self.pruner.step()
  410. self.optimizer.clear_grad()
  411. self.status['learning_rate'] = curr_lr
  412. if self._nranks < 2 or self._local_rank == 0:
  413. self.status['training_staus'].update(outputs)
  414. self.status['batch_time'].update(time.time() - iter_tic)
  415. self._compose_callback.on_step_end(self.status)
  416. if self.use_ema:
  417. self.ema.update()
  418. iter_tic = time.time()
  419. if self.cfg.get('unstructured_prune'):
  420. self.pruner.update_params()
  421. is_snapshot = (self._nranks < 2 or self._local_rank == 0) \
  422. and ((epoch_id + 1) % self.cfg.snapshot_epoch == 0 or epoch_id == self.end_epoch - 1)
  423. if is_snapshot and self.use_ema:
  424. # apply ema weight on model
  425. weight = copy.deepcopy(self.model.state_dict())
  426. self.model.set_dict(self.ema.apply())
  427. self.status['weight'] = weight
  428. self._compose_callback.on_epoch_end(self.status)
  429. if validate and is_snapshot:
  430. if not hasattr(self, '_eval_loader'):
  431. # build evaluation dataset and loader
  432. self._eval_dataset = self.cfg.EvalDataset
  433. self._eval_batch_sampler = \
  434. paddle.io.BatchSampler(
  435. self._eval_dataset,
  436. batch_size=self.cfg.EvalReader['batch_size'])
  437. # If metric is VOC, need to be set collate_batch=False.
  438. if self.cfg.metric == 'VOC':
  439. self.cfg['EvalReader']['collate_batch'] = False
  440. self._eval_loader = create('EvalReader')(
  441. self._eval_dataset,
  442. self.cfg.worker_num,
  443. batch_sampler=self._eval_batch_sampler)
  444. # if validation in training is enabled, metrics should be re-init
  445. # Init_mark makes sure this code will only execute once
  446. if validate and Init_mark == False:
  447. Init_mark = True
  448. self._init_metrics(validate=validate)
  449. self._reset_metrics()
  450. with paddle.no_grad():
  451. self.status['save_best_model'] = True
  452. self._eval_with_loader(self._eval_loader)
  453. if is_snapshot and self.use_ema:
  454. # reset original weight
  455. self.model.set_dict(weight)
  456. self.status.pop('weight')
  457. self._compose_callback.on_train_end(self.status)
  458. def _eval_with_loader(self, loader):
  459. sample_num = 0
  460. tic = time.time()
  461. self._compose_callback.on_epoch_begin(self.status)
  462. self.status['mode'] = 'eval'
  463. self.model.eval()
  464. if self.cfg.get('print_flops', False):
  465. flops_loader = create('{}Reader'.format(self.mode.capitalize()))(
  466. self.dataset, self.cfg.worker_num, self._eval_batch_sampler)
  467. self._flops(flops_loader)
  468. for step_id, data in enumerate(loader):
  469. self.status['step_id'] = step_id
  470. self._compose_callback.on_step_begin(self.status)
  471. # forward
  472. outs = self.model(data)
  473. # update metrics
  474. for metric in self._metrics:
  475. metric.update(data, outs)
  476. # multi-scale inputs: all inputs have same im_id
  477. if isinstance(data, typing.Sequence):
  478. sample_num += data[0]['im_id'].numpy().shape[0]
  479. else:
  480. sample_num += data['im_id'].numpy().shape[0]
  481. self._compose_callback.on_step_end(self.status)
  482. self.status['sample_num'] = sample_num
  483. self.status['cost_time'] = time.time() - tic
  484. # accumulate metric to log out
  485. for metric in self._metrics:
  486. metric.accumulate()
  487. metric.log()
  488. self._compose_callback.on_epoch_end(self.status)
  489. # reset metric states for metric may performed multiple times
  490. self._reset_metrics()
  491. def evaluate(self):
  492. with paddle.no_grad():
  493. self._eval_with_loader(self.loader)
  494. def predict(self,
  495. images,
  496. draw_threshold=0.5,
  497. output_dir='output',
  498. save_results=False):
  499. self.dataset.set_images(images)
  500. loader = create('TestReader')(self.dataset, 0)
  501. def setup_metrics_for_loader():
  502. # mem
  503. metrics = copy.deepcopy(self._metrics)
  504. mode = self.mode
  505. save_prediction_only = self.cfg[
  506. 'save_prediction_only'] if 'save_prediction_only' in self.cfg else None
  507. output_eval = self.cfg[
  508. 'output_eval'] if 'output_eval' in self.cfg else None
  509. # modify
  510. self.mode = '_test'
  511. self.cfg['save_prediction_only'] = True
  512. self.cfg['output_eval'] = output_dir
  513. self._init_metrics()
  514. # restore
  515. self.mode = mode
  516. self.cfg.pop('save_prediction_only')
  517. if save_prediction_only is not None:
  518. self.cfg['save_prediction_only'] = save_prediction_only
  519. self.cfg.pop('output_eval')
  520. if output_eval is not None:
  521. self.cfg['output_eval'] = output_eval
  522. _metrics = copy.deepcopy(self._metrics)
  523. self._metrics = metrics
  524. return _metrics
  525. if save_results:
  526. metrics = setup_metrics_for_loader()
  527. else:
  528. metrics = []
  529. imid2path = self.dataset.get_imid2path()
  530. anno_file = self.dataset.get_anno()
  531. clsid2catid, catid2name = get_categories(
  532. self.cfg.metric, anno_file=anno_file)
  533. # Run Infer
  534. self.status['mode'] = 'test'
  535. self.model.eval()
  536. if self.cfg.get('print_flops', False):
  537. flops_loader = create('TestReader')(self.dataset, 0)
  538. self._flops(flops_loader)
  539. results = []
  540. for step_id, data in enumerate(tqdm(loader)):
  541. self.status['step_id'] = step_id
  542. # forward
  543. outs = self.model(data)
  544. for _m in metrics:
  545. _m.update(data, outs)
  546. for key in ['im_shape', 'scale_factor', 'im_id']:
  547. if isinstance(data, typing.Sequence):
  548. outs[key] = data[0][key]
  549. else:
  550. outs[key] = data[key]
  551. for key, value in outs.items():
  552. if hasattr(value, 'numpy'):
  553. outs[key] = value.numpy()
  554. results.append(outs)
  555. # sniper
  556. if type(self.dataset) == SniperCOCODataSet:
  557. results = self.dataset.anno_cropper.aggregate_chips_detections(
  558. results)
  559. for _m in metrics:
  560. _m.accumulate()
  561. _m.reset()
  562. for outs in results:
  563. batch_res = get_infer_results(outs, clsid2catid)
  564. bbox_num = outs['bbox_num']
  565. start = 0
  566. for i, im_id in enumerate(outs['im_id']):
  567. image_path = imid2path[int(im_id)]
  568. image = Image.open(image_path).convert('RGB')
  569. image = ImageOps.exif_transpose(image)
  570. self.status['original_image'] = np.array(image.copy())
  571. end = start + bbox_num[i]
  572. bbox_res = batch_res['bbox'][start:end] \
  573. if 'bbox' in batch_res else None
  574. mask_res = batch_res['mask'][start:end] \
  575. if 'mask' in batch_res else None
  576. segm_res = batch_res['segm'][start:end] \
  577. if 'segm' in batch_res else None
  578. keypoint_res = batch_res['keypoint'][start:end] \
  579. if 'keypoint' in batch_res else None
  580. image = visualize_results(
  581. image, bbox_res, mask_res, segm_res, keypoint_res,
  582. int(im_id), catid2name, draw_threshold)
  583. self.status['result_image'] = np.array(image.copy())
  584. if self._compose_callback:
  585. self._compose_callback.on_step_end(self.status)
  586. # save image with detection
  587. save_name = self._get_save_image_name(output_dir, image_path)
  588. logger.info("Detection bbox results save in {}".format(
  589. save_name))
  590. image.save(save_name, quality=95)
  591. start = end
  592. def _get_save_image_name(self, output_dir, image_path):
  593. """
  594. Get save image name from source image path.
  595. """
  596. if not os.path.exists(output_dir):
  597. os.makedirs(output_dir)
  598. image_name = os.path.split(image_path)[-1]
  599. name, ext = os.path.splitext(image_name)
  600. return os.path.join(output_dir, "{}".format(name)) + ext
  601. def _get_infer_cfg_and_input_spec(self, save_dir, prune_input=True):
  602. image_shape = None
  603. im_shape = [None, 2]
  604. scale_factor = [None, 2]
  605. if self.cfg.architecture in MOT_ARCH:
  606. test_reader_name = 'TestMOTReader'
  607. else:
  608. test_reader_name = 'TestReader'
  609. if 'inputs_def' in self.cfg[test_reader_name]:
  610. inputs_def = self.cfg[test_reader_name]['inputs_def']
  611. image_shape = inputs_def.get('image_shape', None)
  612. # set image_shape=[None, 3, -1, -1] as default
  613. if image_shape is None:
  614. image_shape = [None, 3, -1, -1]
  615. if len(image_shape) == 3:
  616. image_shape = [None] + image_shape
  617. else:
  618. im_shape = [image_shape[0], 2]
  619. scale_factor = [image_shape[0], 2]
  620. if hasattr(self.model, 'deploy'):
  621. self.model.deploy = True
  622. if 'slim' not in self.cfg:
  623. for layer in self.model.sublayers():
  624. if hasattr(layer, 'convert_to_deploy'):
  625. layer.convert_to_deploy()
  626. export_post_process = self.cfg['export'].get(
  627. 'post_process', False) if hasattr(self.cfg, 'export') else True
  628. export_nms = self.cfg['export'].get('nms', False) if hasattr(
  629. self.cfg, 'export') else True
  630. export_benchmark = self.cfg['export'].get(
  631. 'benchmark', False) if hasattr(self.cfg, 'export') else False
  632. if hasattr(self.model, 'fuse_norm'):
  633. self.model.fuse_norm = self.cfg['TestReader'].get('fuse_normalize',
  634. False)
  635. if hasattr(self.model, 'export_post_process'):
  636. self.model.export_post_process = export_post_process if not export_benchmark else False
  637. if hasattr(self.model, 'export_nms'):
  638. self.model.export_nms = export_nms if not export_benchmark else False
  639. if export_post_process and not export_benchmark:
  640. image_shape = [None] + image_shape[1:]
  641. # Save infer cfg
  642. _dump_infer_config(self.cfg,
  643. os.path.join(save_dir, 'infer_cfg.yml'),
  644. image_shape, self.model)
  645. input_spec = [{
  646. "image": InputSpec(
  647. shape=image_shape, name='image'),
  648. "im_shape": InputSpec(
  649. shape=im_shape, name='im_shape'),
  650. "scale_factor": InputSpec(
  651. shape=scale_factor, name='scale_factor')
  652. }]
  653. if self.cfg.architecture == 'DeepSORT':
  654. input_spec[0].update({
  655. "crops": InputSpec(
  656. shape=[None, 3, 192, 64], name='crops')
  657. })
  658. if prune_input:
  659. static_model = paddle.jit.to_static(
  660. self.model, input_spec=input_spec)
  661. # NOTE: dy2st do not pruned program, but jit.save will prune program
  662. # input spec, prune input spec here and save with pruned input spec
  663. pruned_input_spec = _prune_input_spec(
  664. input_spec, static_model.forward.main_program,
  665. static_model.forward.outputs)
  666. else:
  667. static_model = None
  668. pruned_input_spec = input_spec
  669. # TODO: Hard code, delete it when support prune input_spec.
  670. if self.cfg.architecture == 'PicoDet' and not export_post_process:
  671. pruned_input_spec = [{
  672. "image": InputSpec(
  673. shape=image_shape, name='image')
  674. }]
  675. return static_model, pruned_input_spec
  676. def export(self, output_dir='output_inference'):
  677. self.model.eval()
  678. if hasattr(self.cfg, 'export') and 'fuse_conv_bn' in self.cfg[
  679. 'export'] and self.cfg['export']['fuse_conv_bn']:
  680. self.model = fuse_conv_bn(self.model)
  681. model_name = os.path.splitext(os.path.split(self.cfg.filename)[-1])[0]
  682. save_dir = os.path.join(output_dir, model_name)
  683. if not os.path.exists(save_dir):
  684. os.makedirs(save_dir)
  685. static_model, pruned_input_spec = self._get_infer_cfg_and_input_spec(
  686. save_dir)
  687. # dy2st and save model
  688. if 'slim' not in self.cfg or 'QAT' not in self.cfg['slim_type']:
  689. paddle.jit.save(
  690. static_model,
  691. os.path.join(save_dir, 'model'),
  692. input_spec=pruned_input_spec)
  693. else:
  694. self.cfg.slim.save_quantized_model(
  695. self.model,
  696. os.path.join(save_dir, 'model'),
  697. input_spec=pruned_input_spec)
  698. logger.info("Export model and saved in {}".format(save_dir))
  699. def post_quant(self, output_dir='output_inference'):
  700. model_name = os.path.splitext(os.path.split(self.cfg.filename)[-1])[0]
  701. save_dir = os.path.join(output_dir, model_name)
  702. if not os.path.exists(save_dir):
  703. os.makedirs(save_dir)
  704. for idx, data in enumerate(self.loader):
  705. self.model(data)
  706. if idx == int(self.cfg.get('quant_batch_num', 10)):
  707. break
  708. # TODO: support prune input_spec
  709. _, pruned_input_spec = self._get_infer_cfg_and_input_spec(
  710. save_dir, prune_input=False)
  711. self.cfg.slim.save_quantized_model(
  712. self.model,
  713. os.path.join(save_dir, 'model'),
  714. input_spec=pruned_input_spec)
  715. logger.info("Export Post-Quant model and saved in {}".format(save_dir))
  716. def _flops(self, loader):
  717. self.model.eval()
  718. try:
  719. import paddleslim
  720. except Exception as e:
  721. logger.warning(
  722. 'Unable to calculate flops, please install paddleslim, for example: `pip install paddleslim`'
  723. )
  724. return
  725. from paddleslim.analysis import dygraph_flops as flops
  726. input_data = None
  727. for data in loader:
  728. input_data = data
  729. break
  730. input_spec = [{
  731. "image": input_data['image'][0].unsqueeze(0),
  732. "im_shape": input_data['im_shape'][0].unsqueeze(0),
  733. "scale_factor": input_data['scale_factor'][0].unsqueeze(0)
  734. }]
  735. flops = flops(self.model, input_spec) / (1000**3)
  736. logger.info(" Model FLOPs : {:.6f}G. (image shape is {})".format(
  737. flops, input_data['image'][0].unsqueeze(0).shape))
  738. def parse_mot_images(self, cfg):
  739. import glob
  740. # for quant
  741. dataset_dir = cfg['EvalMOTDataset'].dataset_dir
  742. data_root = cfg['EvalMOTDataset'].data_root
  743. data_root = '{}/{}'.format(dataset_dir, data_root)
  744. seqs = os.listdir(data_root)
  745. seqs.sort()
  746. all_images = []
  747. for seq in seqs:
  748. infer_dir = os.path.join(data_root, seq)
  749. assert infer_dir is None or os.path.isdir(infer_dir), \
  750. "{} is not a directory".format(infer_dir)
  751. images = set()
  752. exts = ['jpg', 'jpeg', 'png', 'bmp']
  753. exts += [ext.upper() for ext in exts]
  754. for ext in exts:
  755. images.update(glob.glob('{}/*.{}'.format(infer_dir, ext)))
  756. images = list(images)
  757. images.sort()
  758. assert len(images) > 0, "no image found in {}".format(infer_dir)
  759. all_images.extend(images)
  760. logger.info("Found {} inference images in total.".format(
  761. len(images)))
  762. return all_images