model.py 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419
  1. # !/usr/bin/env python3
  2. # -*- coding: UTF-8 -*-
  3. ################################################################################
  4. #
  5. # Copyright (c) 2024 Baidu.com, Inc. All Rights Reserved
  6. #
  7. ################################################################################
  8. """
  9. Author: PaddlePaddle Authors
  10. """
  11. import os
  12. from ...base import BaseModel
  13. from ...base.utils.arg import CLIArgument
  14. from ...base.utils.subprocess import CompletedProcess
  15. from ....utils.misc import abspath
  16. class SegModel(BaseModel):
  17. """ Semantic Segmentation Model """
  18. def train(self,
  19. batch_size: int=None,
  20. learning_rate: float=None,
  21. epochs_iters: int=None,
  22. ips: str=None,
  23. device: str='gpu',
  24. resume_path: str=None,
  25. dy2st: bool=False,
  26. amp: str='OFF',
  27. num_workers: int=None,
  28. use_vdl: bool=True,
  29. save_dir: str=None,
  30. **kwargs) -> CompletedProcess:
  31. """train self
  32. Args:
  33. batch_size (int, optional): the train batch size value. Defaults to None.
  34. learning_rate (float, optional): the train learning rate value. Defaults to None.
  35. epochs_iters (int, optional): the train epochs value. Defaults to None.
  36. ips (str, optional): the ip addresses of nodes when using distribution. Defaults to None.
  37. device (str, optional): the running device. Defaults to 'gpu'.
  38. resume_path (str, optional): the checkpoint file path to resume training. Train from scratch if it is set
  39. to None. Defaults to None.
  40. dy2st (bool, optional): Enable dynamic to static. Defaults to False.
  41. amp (str, optional): the amp settings. Defaults to 'OFF'.
  42. num_workers (int, optional): the workers number. Defaults to None.
  43. use_vdl (bool, optional): enable VisualDL. Defaults to True.
  44. save_dir (str, optional): the directory path to save train output. Defaults to None.
  45. Returns:
  46. CompletedProcess: the result of training subprocess execution.
  47. """
  48. config = self.config.copy()
  49. cli_args = []
  50. if batch_size is not None:
  51. cli_args.append(CLIArgument('--batch_size', batch_size))
  52. if learning_rate is not None:
  53. cli_args.append(CLIArgument('--learning_rate', learning_rate))
  54. if epochs_iters is not None:
  55. cli_args.append(CLIArgument('--iters', epochs_iters))
  56. # No need to handle `ips`
  57. if device is not None:
  58. device_type, _ = self.runner.parse_device(device)
  59. cli_args.append(CLIArgument('--device', device_type))
  60. # For compatibility
  61. resume_dir = kwargs.pop('resume_dir', None)
  62. if resume_path is None and resume_dir is not None:
  63. resume_path = os.path.join(resume_dir, 'model.pdparams')
  64. if resume_path is not None:
  65. # NOTE: We must use an absolute path here,
  66. # so we can run the scripts either inside or outside the repo dir.
  67. resume_path = abspath(resume_path)
  68. if os.path.basename(resume_path) != 'model.pdparams':
  69. raise ValueError(f"{resume_path} has an incorrect file name.")
  70. if not os.path.exists(resume_path):
  71. raise FileNotFoundError(f"{resume_path} does not exist.")
  72. resume_dir = os.path.dirname(resume_path)
  73. opts_path = os.path.join(resume_dir, 'model.pdopt')
  74. if not os.path.exists(opts_path):
  75. raise FileNotFoundError(f"{opts_path} must exist.")
  76. cli_args.append(CLIArgument('--resume_model', resume_dir))
  77. if dy2st:
  78. config.update_dy2st(dy2st)
  79. if amp is not None:
  80. if amp != 'OFF':
  81. cli_args.append(CLIArgument('--precision', 'fp16'))
  82. cli_args.append(CLIArgument('--amp_level', amp))
  83. if num_workers is not None:
  84. cli_args.append(CLIArgument('--num_workers', num_workers))
  85. if use_vdl:
  86. cli_args.append(CLIArgument('--use_vdl'))
  87. if save_dir is not None:
  88. save_dir = abspath(save_dir)
  89. else:
  90. # `save_dir` is None
  91. save_dir = abspath(os.path.join('output', 'train'))
  92. cli_args.append(CLIArgument('--save_dir', save_dir))
  93. save_interval = kwargs.pop('save_interval', None)
  94. if save_interval is not None:
  95. cli_args.append(CLIArgument('--save_interval', save_interval))
  96. do_eval = kwargs.pop('do_eval', True)
  97. profile = kwargs.pop('profile', None)
  98. if profile is not None:
  99. cli_args.append(CLIArgument('--profiler_options', profile))
  100. log_iters = kwargs.pop('log_iters', None)
  101. if log_iters is not None:
  102. cli_args.append(CLIArgument('--log_iters', log_iters))
  103. repeats = kwargs.pop('repeats', None)
  104. if repeats is not None:
  105. cli_args.append(CLIArgument('--repeats', repeats))
  106. seed = kwargs.pop('seed', None)
  107. if seed is not None:
  108. cli_args.append(CLIArgument('--seed', seed))
  109. self._assert_empty_kwargs(kwargs)
  110. with self._create_new_config_file() as config_path:
  111. config.dump(config_path)
  112. return self.runner.train(
  113. config_path, cli_args, device, ips, save_dir, do_eval=do_eval)
  114. def evaluate(self,
  115. weight_path: str,
  116. batch_size: int=None,
  117. ips: str=None,
  118. device: str='gpu',
  119. amp: str='OFF',
  120. num_workers: int=None,
  121. **kwargs) -> CompletedProcess:
  122. """evaluate self using specified weight
  123. Args:
  124. weight_path (str): the path of model weight file to be evaluated.
  125. batch_size (int, optional): the batch size value in evaluating. Defaults to None.
  126. ips (str, optional): the ip addresses of nodes when using distribution. Defaults to None.
  127. device (str, optional): the running device. Defaults to 'gpu'.
  128. amp (str, optional): the AMP setting. Defaults to 'OFF'.
  129. num_workers (int, optional): the workers number in evaluating. Defaults to None.
  130. Returns:
  131. CompletedProcess: the result of evaluating subprocess execution.
  132. """
  133. config = self.config.copy()
  134. cli_args = []
  135. weight_path = abspath(weight_path)
  136. cli_args.append(CLIArgument('--model_path', weight_path))
  137. if batch_size is not None:
  138. if batch_size != 1:
  139. raise ValueError("Batch size other than 1 is not supported.")
  140. # No need to handle `ips`
  141. if device is not None:
  142. device_type, _ = self.runner.parse_device(device)
  143. cli_args.append(CLIArgument('--device', device_type))
  144. if amp is not None:
  145. if amp != 'OFF':
  146. cli_args.append(CLIArgument('--precision', 'fp16'))
  147. cli_args.append(CLIArgument('--amp_level', amp))
  148. if num_workers is not None:
  149. cli_args.append(CLIArgument('--num_workers', num_workers))
  150. self._assert_empty_kwargs(kwargs)
  151. with self._create_new_config_file() as config_path:
  152. config.dump(config_path)
  153. cp = self.runner.evaluate(config_path, cli_args, device, ips)
  154. return cp
  155. def predict(self,
  156. weight_path: str,
  157. input_path: str,
  158. device: str='gpu',
  159. save_dir: str=None,
  160. **kwargs) -> CompletedProcess:
  161. """predict using specified weight
  162. Args:
  163. weight_path (str): the path of model weight file used to predict.
  164. input_path (str): the path of image file to be predicted.
  165. device (str, optional): the running device. Defaults to 'gpu'.
  166. save_dir (str, optional): the directory path to save predict output. Defaults to None.
  167. Returns:
  168. CompletedProcess: the result of predicting subprocess execution.
  169. """
  170. config = self.config.copy()
  171. cli_args = []
  172. weight_path = abspath(weight_path)
  173. cli_args.append(CLIArgument('--model_path', weight_path))
  174. input_path = abspath(input_path)
  175. cli_args.append(CLIArgument('--image_path', input_path))
  176. if device is not None:
  177. device_type, _ = self.runner.parse_device(device)
  178. cli_args.append(CLIArgument('--device', device_type))
  179. if save_dir is not None:
  180. save_dir = abspath(save_dir)
  181. else:
  182. # `save_dir` is None
  183. save_dir = abspath(os.path.join('output', 'predict'))
  184. cli_args.append(CLIArgument('--save_dir', save_dir))
  185. self._assert_empty_kwargs(kwargs)
  186. with self._create_new_config_file() as config_path:
  187. config.dump(config_path)
  188. return self.runner.predict(config_path, cli_args, device)
  189. def analyse(self,
  190. weight_path,
  191. ips=None,
  192. device='gpu',
  193. save_dir=None,
  194. **kwargs):
  195. """ analyse """
  196. config = self.config.copy()
  197. cli_args = []
  198. weight_path = abspath(weight_path)
  199. cli_args.append(CLIArgument('--model_path', weight_path))
  200. if device is not None:
  201. device_type, _ = self.runner.parse_device(device)
  202. cli_args.append(CLIArgument('--device', device_type))
  203. if save_dir is not None:
  204. save_dir = abspath(save_dir)
  205. else:
  206. # `save_dir` is None
  207. save_dir = abspath(os.path.join('output', 'analysis'))
  208. cli_args.append(CLIArgument('--save_dir', save_dir))
  209. self._assert_empty_kwargs(kwargs)
  210. with self._create_new_config_file() as config_path:
  211. config.dump(config_path)
  212. cp = self.runner.analyse(config_path, cli_args, device, ips)
  213. return cp
  214. def export(self, weight_path: str, save_dir: str,
  215. **kwargs) -> CompletedProcess:
  216. """export the dynamic model to static model
  217. Args:
  218. weight_path (str): the model weight file path that used to export.
  219. save_dir (str): the directory path to save export output.
  220. Returns:
  221. CompletedProcess: the result of exporting subprocess execution.
  222. """
  223. config = self.config.copy()
  224. cli_args = []
  225. weight_path = abspath(weight_path)
  226. cli_args.append(CLIArgument('--model_path', weight_path))
  227. if save_dir is not None:
  228. save_dir = abspath(save_dir)
  229. else:
  230. # `save_dir` is None
  231. save_dir = abspath(os.path.join('output', 'export'))
  232. cli_args.append(CLIArgument('--save_dir', save_dir))
  233. input_shape = kwargs.pop('input_shape', None)
  234. if input_shape is not None:
  235. cli_args.append(CLIArgument('--input_shape', *input_shape))
  236. output_op = kwargs.pop('output_op', None)
  237. if output_op is not None:
  238. assert output_op in ['softmax', 'argmax'
  239. ], "`output_op` must be 'softmax' or 'argmax'."
  240. cli_args.append(CLIArgument('--output_op', output_op))
  241. self._assert_empty_kwargs(kwargs)
  242. with self._create_new_config_file() as config_path:
  243. config.dump(config_path)
  244. return self.runner.export(config_path, cli_args, None)
  245. def infer(self,
  246. model_dir: str,
  247. input_path: str,
  248. device: str='gpu',
  249. save_dir: str=None,
  250. **kwargs) -> CompletedProcess:
  251. """predict image using infernece model
  252. Args:
  253. model_dir (str): the directory path of inference model files that would use to predict.
  254. input_path (str): the path of image that would be predict.
  255. device (str, optional): the running device. Defaults to 'gpu'.
  256. save_dir (str, optional): the directory path to save output. Defaults to None.
  257. Returns:
  258. CompletedProcess: the result of infering subprocess execution.
  259. """
  260. config = self.config.copy()
  261. cli_args = []
  262. model_dir = abspath(model_dir)
  263. input_path = abspath(input_path)
  264. cli_args.append(CLIArgument('--image_path', input_path))
  265. if device is not None:
  266. device_type, _ = self.runner.parse_device(device)
  267. cli_args.append(CLIArgument('--device', device_type))
  268. if save_dir is not None:
  269. save_dir = abspath(save_dir)
  270. else:
  271. # `save_dir` is None
  272. save_dir = abspath(os.path.join('output', 'infer'))
  273. cli_args.append(CLIArgument('--save_dir', save_dir))
  274. self._assert_empty_kwargs(kwargs)
  275. with self._create_new_config_file() as config_path:
  276. config.dump(config_path)
  277. deploy_config_path = os.path.join(model_dir, 'inference.yml')
  278. return self.runner.infer(deploy_config_path, cli_args, device)
  279. def compression(self,
  280. weight_path: str,
  281. batch_size: int=None,
  282. learning_rate: float=None,
  283. epochs_iters: int=None,
  284. device: str='gpu',
  285. use_vdl: bool=True,
  286. save_dir: str=None,
  287. **kwargs) -> CompletedProcess:
  288. """compression model
  289. Args:
  290. weight_path (str): the path to weight file of model.
  291. batch_size (int, optional): the batch size value of compression training. Defaults to None.
  292. learning_rate (float, optional): the learning rate value of compression training. Defaults to None.
  293. epochs_iters (int, optional): the epochs or iters of compression training. Defaults to None.
  294. device (str, optional): the device to run compression training. Defaults to 'gpu'.
  295. use_vdl (bool, optional): whether or not to use VisualDL. Defaults to True.
  296. save_dir (str, optional): the directory to save output. Defaults to None.
  297. Returns:
  298. CompletedProcess: the result of compression subprocess execution.
  299. """
  300. # Update YAML config file
  301. # NOTE: In PaddleSeg, QAT does not use a different config file than regular training
  302. # Reusing `self.config` preserves the config items modified by the user when
  303. # `SegModel` is initialized with a `SegConfig` object.
  304. config = self.config.copy()
  305. train_cli_args = []
  306. export_cli_args = []
  307. weight_path = abspath(weight_path)
  308. train_cli_args.append(CLIArgument('--model_path', weight_path))
  309. if batch_size is not None:
  310. train_cli_args.append(CLIArgument('--batch_size', batch_size))
  311. if learning_rate is not None:
  312. train_cli_args.append(CLIArgument('--learning_rate', learning_rate))
  313. if epochs_iters is not None:
  314. train_cli_args.append(CLIArgument('--iters', epochs_iters))
  315. if device is not None:
  316. device_type, _ = self.runner.parse_device(device)
  317. train_cli_args.append(CLIArgument('--device', device_type))
  318. if use_vdl:
  319. train_cli_args.append(CLIArgument('--use_vdl'))
  320. if save_dir is not None:
  321. save_dir = abspath(save_dir)
  322. else:
  323. # `save_dir` is None
  324. save_dir = abspath(os.path.join('output', 'compress'))
  325. train_cli_args.append(CLIArgument('--save_dir', save_dir))
  326. # The exported model saved in a subdirectory named `export`
  327. export_cli_args.append(
  328. CLIArgument('--save_dir', os.path.join(save_dir, 'export')))
  329. input_shape = kwargs.pop('input_shape', None)
  330. if input_shape is not None:
  331. export_cli_args.append(CLIArgument('--input_shape', *input_shape))
  332. self._assert_empty_kwargs(kwargs)
  333. with self._create_new_config_file() as config_path:
  334. config.dump(config_path)
  335. return self.runner.compression(config_path, train_cli_args,
  336. export_cli_args, device, save_dir)