config.py 5.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168
  1. # !/usr/bin/env python3
  2. # -*- coding: UTF-8 -*-
  3. ################################################################################
  4. #
  5. # Copyright (c) 2024 Baidu.com, Inc. All Rights Reserved
  6. #
  7. ################################################################################
  8. """
  9. Author: PaddlePaddle Authors
  10. """
  11. import os
  12. from functools import lru_cache
  13. import yaml
  14. from paddleseg.utils import NoAliasDumper
  15. from ..base_seg_config import BaseSegConfig
  16. from ....utils.misc import abspath
  17. from ....utils import logging
  18. class SegConfig(BaseSegConfig):
  19. """ Semantic Segmentation Config """
  20. def update_dataset(self, dataset_path: str, dataset_type: str=None):
  21. """update dataset settings
  22. Args:
  23. dataset_path (str): the root path of dataset.
  24. dataset_type (str, optional): dataset type. Defaults to None.
  25. Raises:
  26. ValueError: the dataset_type error.
  27. """
  28. dataset_dir = abspath(dataset_path)
  29. if dataset_type is None:
  30. dataset_type = 'SegDataset'
  31. if dataset_type == 'SegDataset':
  32. # TODO: Prune extra keys
  33. ds_cfg = self._make_custom_dataset_config(dataset_dir)
  34. self.update(ds_cfg)
  35. elif dataset_type == '_dummy':
  36. # XXX: A special dataset type to tease PaddleSeg val dataset checkers
  37. self.update({
  38. 'val_dataset': {
  39. 'type': 'SegDataset',
  40. 'dataset_root': dataset_dir,
  41. 'val_path': os.path.join(dataset_dir, 'val.txt'),
  42. 'mode': 'val'
  43. },
  44. })
  45. else:
  46. raise ValueError(f"{repr(dataset_type)} is not supported.")
  47. def update_num_classes(self, num_classes: int):
  48. """update classes number
  49. Args:
  50. num_classes (int): the classes number value to set.
  51. """
  52. if 'train_dataset' in self:
  53. self.train_dataset['num_classes'] = num_classes
  54. if 'val_dataset' in self:
  55. self.val_dataset['num_classes'] = num_classes
  56. if 'model' in self:
  57. self.model['num_classes'] = num_classes
  58. def update_train_crop_size(self, crop_size: int | list):
  59. """update the image cropping size of training preprocessing
  60. Args:
  61. crop_size (int | list): the size of image to be cropped.
  62. Raises:
  63. ValueError: the `crop_size` error.
  64. """
  65. # XXX: This method is highly coupled to PaddleSeg's internal functions
  66. if isinstance(crop_size, int):
  67. crop_size = [crop_size, crop_size]
  68. else:
  69. crop_size = list(crop_size)
  70. if len(crop_size) != 2:
  71. raise ValueError
  72. crop_size = [int(crop_size[0]), int(crop_size[1])]
  73. tf_cfg_list = self.train_dataset['transforms']
  74. modified = False
  75. for tf_cfg in tf_cfg_list:
  76. if tf_cfg['type'] == 'RandomPaddingCrop':
  77. tf_cfg['crop_size'] = crop_size
  78. modified = True
  79. if not modified:
  80. logging.warning(
  81. "Could not find configuration item of image cropping transformation operator. "
  82. "Therefore, the crop size was not updated.")
  83. def get_epochs_iters(self) -> int:
  84. """get epochs
  85. Returns:
  86. int: the epochs value, i.e., `Global.epochs` in config.
  87. """
  88. if 'iters' in self:
  89. return self.iters
  90. else:
  91. # Default iters
  92. return 1000
  93. def get_learning_rate(self) -> float:
  94. """get learning rate
  95. Returns:
  96. float: the learning rate value, i.e., `Optimizer.lr.learning_rate` in config.
  97. """
  98. if 'lr_scheduler' not in self or 'learning_rate' not in self.lr_scheduler:
  99. # Default lr
  100. return 0.0001
  101. else:
  102. return self.lr_scheduler['learning_rate']
  103. def get_batch_size(self, mode='train') -> int:
  104. """get batch size
  105. Args:
  106. mode (str, optional): the mode that to be get batch size value, must be one of 'train', 'eval', 'test'.
  107. Defaults to 'train'.
  108. Raises:
  109. ValueError: the `mode` error. `train` is supported only.
  110. Returns:
  111. int: the batch size value of `mode`, i.e., `DataLoader.{mode}.sampler.batch_size` in config.
  112. """
  113. if mode == 'train':
  114. if 'batch_size' in self:
  115. return self.batch_size
  116. else:
  117. # Default batch size
  118. return 4
  119. else:
  120. raise ValueError(
  121. f"Getting `batch_size` in {repr(mode)} mode is not supported.")
  122. def _make_custom_dataset_config(self, dataset_root_path: str) -> dict:
  123. """construct the dataset config that meets the format requirements
  124. Args:
  125. dataset_root_path (str): the root directory of dataset.
  126. Returns:
  127. dict: the dataset config.
  128. """
  129. ds_cfg = {
  130. 'train_dataset': {
  131. 'type': 'SegDataset',
  132. 'dataset_root': dataset_root_path,
  133. 'train_path': os.path.join(dataset_root_path, 'train.txt'),
  134. 'mode': 'train'
  135. },
  136. 'val_dataset': {
  137. 'type': 'SegDataset',
  138. 'dataset_root': dataset_root_path,
  139. 'val_path': os.path.join(dataset_root_path, 'val.txt'),
  140. 'mode': 'val'
  141. },
  142. }
  143. return ds_cfg