runner.py 8.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245
  1. # !/usr/bin/env python3
  2. # -*- coding: UTF-8 -*-
  3. ################################################################################
  4. #
  5. # Copyright (c) 2024 Baidu.com, Inc. All Rights Reserved
  6. #
  7. ################################################################################
  8. """
  9. Author: PaddlePaddle Authors
  10. """
  11. import os
  12. import tempfile
  13. from ...base import BaseRunner
  14. from ...base.utils.arg import gather_opts_args
  15. from ...base.utils.subprocess import CompletedProcess
  16. class SegRunner(BaseRunner):
  17. """ Semantic Segmentation Runner """
  18. def train(self,
  19. config_path: str,
  20. cli_args: list,
  21. device: str,
  22. ips: str,
  23. save_dir: str,
  24. do_eval=True) -> CompletedProcess:
  25. """train model
  26. Args:
  27. config_path (str): the config file path used to train.
  28. cli_args (list): the additional parameters.
  29. device (str): the training device.
  30. ips (str): the ip addresses of nodes when using distribution.
  31. save_dir (str): the directory path to save training output.
  32. do_eval (bool, optional): whether or not to evaluate model during training. Defaults to True.
  33. Returns:
  34. CompletedProcess: the result of training subprocess execution.
  35. """
  36. args, env = self.distributed(device, ips, log_dir=save_dir)
  37. cli_args = self._gather_opts_args(cli_args)
  38. cmd = [*args, 'tools/train.py']
  39. if do_eval:
  40. cmd.append('--do_eval')
  41. cmd.extend(['--config', config_path, *cli_args])
  42. return self.run_cmd(
  43. cmd,
  44. env=env,
  45. switch_wdir=True,
  46. echo=True,
  47. silent=False,
  48. capture_output=True,
  49. log_path=self._get_train_log_path(save_dir))
  50. def evaluate(self, config_path: str, cli_args: list, device: str,
  51. ips: str) -> CompletedProcess:
  52. """run model evaluating
  53. Args:
  54. config_path (str): the config file path used to evaluate.
  55. cli_args (list): the additional parameters.
  56. device (str): the evaluating device.
  57. ips (str): the ip addresses of nodes when using distribution.
  58. Returns:
  59. CompletedProcess: the result of evaluating subprocess execution.
  60. """
  61. args, env = self.distributed(device, ips)
  62. cli_args = self._gather_opts_args(cli_args)
  63. cmd = [*args, 'tools/val.py', '--config', config_path, *cli_args]
  64. cp = self.run_cmd(
  65. cmd,
  66. env=env,
  67. switch_wdir=True,
  68. echo=True,
  69. silent=False,
  70. capture_output=True)
  71. if cp.returncode == 0:
  72. metric_dict = _extract_eval_metrics(cp.stdout)
  73. cp.metrics = metric_dict
  74. return cp
  75. def predict(self, config_path: str, cli_args: list,
  76. device: str) -> CompletedProcess:
  77. """run predicting using dynamic mode
  78. Args:
  79. config_path (str): the config file path used to predict.
  80. cli_args (list): the additional parameters.
  81. device (str): unused.
  82. Returns:
  83. CompletedProcess: the result of predicting subprocess execution.
  84. """
  85. # `device` unused
  86. cli_args = self._gather_opts_args(cli_args)
  87. cmd = [
  88. self.python, 'tools/predict.py', '--config', config_path, *cli_args
  89. ]
  90. return self.run_cmd(cmd, switch_wdir=True, echo=True, silent=False)
  91. def analyse(self, config_path, cli_args, device, ips):
  92. """ analyse """
  93. args, env = self.distributed(device, ips)
  94. cli_args = self._gather_opts_args(cli_args)
  95. cmd = [*args, 'tools/analyse.py', '--config', config_path, *cli_args]
  96. cp = self.run_cmd(
  97. cmd,
  98. env=env,
  99. switch_wdir=True,
  100. echo=True,
  101. silent=False,
  102. capture_output=True)
  103. return cp
  104. def export(self, config_path: str, cli_args: list,
  105. device: str) -> CompletedProcess:
  106. """run exporting
  107. Args:
  108. config_path (str): the path of config file used to export.
  109. cli_args (list): the additional parameters.
  110. device (str): unused.
  111. Returns:
  112. CompletedProcess: the result of exporting subprocess execution.
  113. """
  114. # `device` unused
  115. cli_args = self._gather_opts_args(cli_args)
  116. cmd = [
  117. self.python, 'tools/export.py', '--for_fd', '--config', config_path,
  118. *cli_args
  119. ]
  120. cp = self.run_cmd(cmd, switch_wdir=True, echo=True, silent=False)
  121. return cp
  122. def infer(self, config_path: str, cli_args: list,
  123. device: str) -> CompletedProcess:
  124. """run predicting using inference model
  125. Args:
  126. config_path (str): the path of config file used to predict.
  127. cli_args (list): the additional parameters.
  128. device (str): unused.
  129. Returns:
  130. CompletedProcess: the result of infering subprocess execution.
  131. """
  132. # `device` unused
  133. cli_args = self._gather_opts_args(cli_args)
  134. cmd = [
  135. self.python, 'deploy/python/infer.py', '--config', config_path,
  136. *cli_args
  137. ]
  138. return self.run_cmd(cmd, switch_wdir=True, echo=True, silent=False)
  139. def compression(self,
  140. config_path: str,
  141. train_cli_args: list,
  142. export_cli_args: list,
  143. device: str,
  144. train_save_dir: str) -> CompletedProcess:
  145. """run compression model
  146. Args:
  147. config_path (str): the path of config file used to predict.
  148. train_cli_args (list): the additional training parameters.
  149. export_cli_args (list): the additional exporting parameters.
  150. device (str): the running device.
  151. train_save_dir (str): the directory path to save output.
  152. Returns:
  153. CompletedProcess: the result of compression subprocess execution.
  154. """
  155. # Step 1: Train model
  156. args, env = self.distributed(device, log_dir=train_save_dir)
  157. train_cli_args = self._gather_opts_args(train_cli_args)
  158. # Note that we add `--do_eval` here so we can have `train_save_dir/best_model/model.pdparams` saved
  159. cmd = [
  160. *args, 'deploy/slim/quant/qat_train.py', '--do_eval', '--config',
  161. config_path, *train_cli_args
  162. ]
  163. cp_train = self.run_cmd(
  164. cmd,
  165. env=env,
  166. switch_wdir=True,
  167. echo=True,
  168. silent=False,
  169. capture_output=True,
  170. log_path=self._get_train_log_path(train_save_dir))
  171. # Step 2: Export model
  172. export_cli_args = self._gather_opts_args(export_cli_args)
  173. # We export the best model on the validation dataset
  174. weight_path = os.path.join(train_save_dir, 'best_model',
  175. 'model.pdparams')
  176. cmd = [
  177. self.python, 'deploy/slim/quant/qat_export.py', '--for_fd',
  178. '--config', config_path, '--model_path', weight_path,
  179. *export_cli_args
  180. ]
  181. cp_export = self.run_cmd(cmd, switch_wdir=True, echo=True, silent=False)
  182. return cp_train, cp_export
  183. def _gather_opts_args(self, args):
  184. # Since `--opts` in PaddleSeg does not use `action='append'`
  185. # We collect and arrange all opts args here
  186. # e.g.: python tools/train.py --config xxx --opts a=1 c=3 --opts b=2
  187. # => python tools/train.py --config xxx c=3 --opts a=1 b=2
  188. return gather_opts_args(args, '--opts')
  189. def _extract_eval_metrics(stdout: str) -> dict:
  190. """extract evaluation metrics from training log
  191. Args:
  192. stdout (str): the training log
  193. Returns:
  194. dict: the training metric
  195. """
  196. import re
  197. _DP = r'[-+]?[0-9]*\.?[0-9]+(?:[eE][-+]?[0-9]+)?'
  198. pattern = r'Images: \d+ mIoU: (_dp) Acc: (_dp) Kappa: (_dp) Dice: (_dp)'.replace(
  199. '_dp', _DP)
  200. keys = ['mIoU', 'Acc', 'Kappa', 'Dice']
  201. metric_dict = dict()
  202. pattern = re.compile(pattern)
  203. # TODO: Use lazy version to make it more efficient
  204. lines = stdout.splitlines()
  205. for line in lines:
  206. match = pattern.search(line)
  207. if match:
  208. for k, v in zip(keys, map(float, match.groups())):
  209. metric_dict[k] = v
  210. return metric_dict