segmentation.py 7.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191
  1. # copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import os.path as osp
  15. from paddleslim import L1NormFilterPruner
  16. def build_transforms(params):
  17. from paddlex import transforms as T
  18. seg_list = []
  19. seg_list.extend([
  20. T.Resize(target_size=params.image_shape),
  21. T.RandomBlur(prob=params.blur_prob)
  22. ])
  23. if params.scale_aspect:
  24. seg_list.append(
  25. T.RandomScaleAspect(
  26. min_scale=params.min_ratio, aspect_ratio=params.aspect_ratio))
  27. seg_list.extend([
  28. T.RandomDistort(
  29. brightness_range=params.brightness_range,
  30. brightness_prob=params.brightness_prob,
  31. contrast_range=params.contrast_range,
  32. contrast_prob=params.contrast_prob,
  33. saturation_range=params.saturation_range,
  34. saturation_prob=params.saturation_prob,
  35. hue_range=params.hue_range,
  36. hue_prob=params.hue_prob),
  37. T.RandomVerticalFlip(prob=params.vertical_flip_prob),
  38. T.RandomHorizontalFlip(prob=params.horizontal_flip_prob), T.Normalize(
  39. mean=params.image_mean, std=params.image_std)
  40. ])
  41. train_transforms = T.Compose(seg_list)
  42. eval_transforms = T.Compose([
  43. T.Resize(target_size=params.image_shape), T.Normalize(
  44. mean=params.image_mean, std=params.image_std)
  45. ])
  46. return train_transforms, eval_transforms
  47. def build_datasets(dataset_path, train_transforms, eval_transforms):
  48. import paddlex as pdx
  49. train_file_list = osp.join(dataset_path, 'train_list.txt')
  50. eval_file_list = osp.join(dataset_path, 'val_list.txt')
  51. label_list = osp.join(dataset_path, 'labels.txt')
  52. train_dataset = pdx.datasets.SegDataset(
  53. data_dir=dataset_path,
  54. file_list=train_file_list,
  55. label_list=label_list,
  56. transforms=train_transforms,
  57. shuffle=True)
  58. eval_dataset = pdx.datasets.SegDataset(
  59. data_dir=dataset_path,
  60. file_list=eval_file_list,
  61. label_list=label_list,
  62. transforms=eval_transforms)
  63. return train_dataset, eval_dataset
  64. def build_optimizer(parameters, step_each_epoch, params):
  65. import paddle
  66. from paddle.regularizer import L2Decay
  67. learning_rate = params.learning_rate
  68. num_epochs = params.num_epochs
  69. if params.lr_policy == 'Piecewise':
  70. lr_decay_epochs = params.lr_decay_epochs
  71. gamma = 0.1
  72. boundaries = [step_each_epoch * e for e in lr_decay_epochs]
  73. values = [
  74. learning_rate * (gamma**i)
  75. for i in range(len(lr_decay_epochs) + 1)
  76. ]
  77. decayed_lr = paddle.optimizer.lr.PiecewiseDecay(
  78. boundaries=boundaries, values=values)
  79. elif params.lr_policy == 'Polynomial':
  80. decay_step = num_epochs * step_each_epoch
  81. decayed_lr = paddle.optimizer.lr.PolynomialDecay(
  82. learning_rate=learning_rate,
  83. decay_steps=decay_step,
  84. end_lr=0.0,
  85. power=.9)
  86. elif params.lr_policy == 'Cosine':
  87. decayed_lr = paddle.optimizer.lr.CosineAnnealingDecay(
  88. learning_rate=.001, T_max=step_each_epoch * num_epochs)
  89. else:
  90. raise Exception(
  91. 'lr_policy only support Polynomial or Piecewise, but you set {}'.
  92. format(params.lr_policy))
  93. if params.optimizer.lower() == 'sgd':
  94. momentum = 0.9
  95. regularize_coef = 1e-4
  96. optimizer = paddle.optimizer.Momentum(
  97. learning_rate=decayed_lr,
  98. momentum=momentum,
  99. weight_decay=L2Decay(regularize_coef),
  100. parameters=parameters)
  101. elif params.optimizer.lower() == 'adam':
  102. momentum = 0.9
  103. momentum2 = 0.999
  104. regularize_coef = 1e-4
  105. optimizer = paddle.optimizer.Adam(
  106. learning_rate=decayed_lr,
  107. beta1=momentum,
  108. beta2=momentum2,
  109. weight_decay=L2Decay(regularize_coef),
  110. parameters=parameters)
  111. return optimizer
  112. def train(task_path, dataset_path, params):
  113. import paddlex as pdx
  114. pdx.log_level = 3
  115. train_transforms, eval_transforms = build_transforms(params)
  116. train_dataset, eval_dataset = build_datasets(
  117. dataset_path=dataset_path,
  118. train_transforms=train_transforms,
  119. eval_transforms=eval_transforms)
  120. step_each_epoch = train_dataset.num_samples // params.batch_size
  121. save_interval_epochs = params.save_interval_epochs
  122. save_dir = osp.join(task_path, 'output')
  123. pretrain_weights = params.pretrain_weights
  124. if pretrain_weights is not None and osp.exists(pretrain_weights):
  125. pretrain_weights = osp.join(pretrain_weights, 'model.pdparams')
  126. segmenter = getattr(pdx.seg, 'HRNet'
  127. if params.model.startswith('HRNet') else params.model)
  128. use_dice_loss, use_bce_loss = params.loss_type
  129. if use_bce_loss and use_dice_loss:
  130. use_mixed_loss = [('CrossEntropyLoss', 1), ('DiceLoss', 1)]
  131. elif use_bce_loss:
  132. use_mixed_loss = [('CrossEntropyLoss', 1)]
  133. elif use_dice_loss:
  134. use_mixed_loss = [('DiceLoss', 1)]
  135. else:
  136. use_mixed_loss = False
  137. backbone = params.backbone
  138. sensitivities_path = params.sensitivities_path
  139. pruned_flops = params.pruned_flops
  140. if params.model in ['UNet', 'HRNet_W18', 'FastSCNN', 'BiSeNetV2']:
  141. model = segmenter(
  142. num_classes=len(train_dataset.labels),
  143. use_mixed_loss=use_mixed_loss)
  144. elif params.model == 'DeepLabV3P':
  145. model = segmenter(
  146. num_classes=len(train_dataset.labels),
  147. backbone=backbone,
  148. use_mixed_loss=use_mixed_loss)
  149. if sensitivities_path is not None:
  150. # load weights
  151. model.net_initialize(pretrain_weights=osp.join(pretrain_weights,
  152. 'model.pdparams'))
  153. pretrain_weights = None
  154. # prune
  155. dataset = eval_dataset or train_dataset
  156. inputs = [1, 3] + list(dataset[0]['image'].shape[:2])
  157. model.pruner = L1NormFilterPruner(
  158. model.net, inputs=inputs, sen_file=sensitivities_path)
  159. #model.pruner.sensitive_prune(pruned_flops=pruned_flops)
  160. model.prune(pruned_flops=pruned_flops)
  161. optimizer = build_optimizer(model.net.parameters(), step_each_epoch,
  162. params)
  163. model.train(
  164. num_epochs=params.num_epochs,
  165. train_dataset=train_dataset,
  166. train_batch_size=params.batch_size,
  167. eval_dataset=eval_dataset,
  168. save_interval_epochs=save_interval_epochs,
  169. log_interval_steps=2,
  170. save_dir=save_dir,
  171. pretrain_weights=pretrain_weights,
  172. optimizer=optimizer,
  173. use_vdl=True,
  174. resume_checkpoint=params.resume_checkpoint)