utils.py 2.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172
  1. # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. from __future__ import absolute_import, division, print_function
  15. import datetime
  16. from paddlex.ppcls.utils import logger
  17. from paddlex.ppcls.utils.misc import AverageMeter
  18. def update_metric(trainer, out, batch, batch_size):
  19. # calc metric
  20. if trainer.train_metric_func is not None:
  21. metric_dict = trainer.train_metric_func(out, batch[-1])
  22. for key in metric_dict:
  23. if key not in trainer.output_info:
  24. trainer.output_info[key] = AverageMeter(key, '7.5f')
  25. trainer.output_info[key].update(metric_dict[key].numpy()[0],
  26. batch_size)
  27. def update_loss(trainer, loss_dict, batch_size):
  28. # update_output_info
  29. for key in loss_dict:
  30. if key not in trainer.output_info:
  31. trainer.output_info[key] = AverageMeter(key, '7.5f')
  32. trainer.output_info[key].update(loss_dict[key].numpy()[0], batch_size)
  33. def log_info(trainer, batch_size, epoch_id, iter_id):
  34. lr_msg = "lr: {:.5f}".format(trainer.lr_sch.get_lr())
  35. metric_msg = ", ".join([
  36. "{}: {:.5f}".format(key, trainer.output_info[key].avg)
  37. for key in trainer.output_info
  38. ])
  39. time_msg = "s, ".join([
  40. "{}: {:.5f}".format(key, trainer.time_info[key].avg)
  41. for key in trainer.time_info
  42. ])
  43. ips_msg = "ips: {:.5f} images/sec".format(
  44. batch_size / trainer.time_info["batch_cost"].avg)
  45. eta_sec = ((trainer.config["Global"]["epochs"] - epoch_id + 1
  46. ) * len(trainer.train_dataloader) - iter_id
  47. ) * trainer.time_info["batch_cost"].avg
  48. eta_msg = "eta: {:s}".format(str(datetime.timedelta(seconds=int(eta_sec))))
  49. logger.info("[Train][Epoch {}/{}][Iter: {}/{}]{}, {}, {}, {}, {}".format(
  50. epoch_id, trainer.config["Global"]["epochs"], iter_id,
  51. len(trainer.train_dataloader), lr_msg, metric_msg, time_msg, ips_msg,
  52. eta_msg))
  53. logger.scaler(
  54. name="lr",
  55. value=trainer.lr_sch.get_lr(),
  56. step=trainer.global_step,
  57. writer=trainer.vdl_writer)
  58. for key in trainer.output_info:
  59. logger.scaler(
  60. name="train_{}".format(key),
  61. value=trainer.output_info[key].avg,
  62. step=trainer.global_step,
  63. writer=trainer.vdl_writer)