MaskRCNN-ResNeXt101-vd-FPN.yaml 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197
  1. epoch: 24
  2. use_gpu: true
  3. use_xpu: false
  4. use_mlu: false
  5. use_npu: false
  6. log_iter: 20
  7. save_dir: output
  8. target_metrics: mask
  9. snapshot_epoch: 1
  10. print_flops: false
  11. print_params: false
  12. # dataset
  13. metric: COCO
  14. num_classes: 80
  15. worker_num: 2
  16. TrainDataset:
  17. name: COCODataSet
  18. image_dir: train2017
  19. anno_path: annotations/instances_train2017.json
  20. dataset_dir: dataset/coco
  21. data_fields: ['image', 'gt_bbox', 'gt_class', 'gt_poly', 'is_crowd']
  22. EvalDataset:
  23. name: COCODataSet
  24. image_dir: val2017
  25. anno_path: annotations/instances_val2017.json
  26. dataset_dir: dataset/coco
  27. TestDataset:
  28. name: ImageFolder
  29. anno_path: annotations/instances_val2017.json # also support txt (like VOC's label_list.txt)
  30. dataset_dir: dataset/coco # if set, anno_path will be 'dataset_dir/anno_path'
  31. TrainReader:
  32. sample_transforms:
  33. - Decode: {}
  34. - RandomResize: {target_size: [[640, 1333], [672, 1333], [704, 1333], [736, 1333], [768, 1333], [800, 1333]], interp: 2, keep_ratio: True}
  35. - RandomFlip: {prob: 0.5}
  36. - NormalizeImage: {is_scale: true, mean: [0.485,0.456,0.406], std: [0.229, 0.224,0.225]}
  37. - Permute: {}
  38. batch_transforms:
  39. - PadBatch: {pad_to_stride: 32}
  40. batch_size: 1
  41. shuffle: true
  42. drop_last: true
  43. collate_batch: false
  44. use_shared_memory: true
  45. EvalReader:
  46. sample_transforms:
  47. - Decode: {}
  48. - Resize: {interp: 2, target_size: [800, 1333], keep_ratio: True}
  49. - NormalizeImage: {is_scale: true, mean: [0.485,0.456,0.406], std: [0.229, 0.224,0.225]}
  50. - Permute: {}
  51. batch_transforms:
  52. - PadBatch: {pad_to_stride: 32}
  53. batch_size: 1
  54. shuffle: false
  55. drop_last: false
  56. TestReader:
  57. sample_transforms:
  58. - Decode: {}
  59. - Resize: {interp: 2, target_size: [800, 1333], keep_ratio: True}
  60. - NormalizeImage: {is_scale: true, mean: [0.485,0.456,0.406], std: [0.229, 0.224,0.225]}
  61. - Permute: {}
  62. batch_transforms:
  63. - PadBatch: {pad_to_stride: 32}
  64. batch_size: 1
  65. shuffle: false
  66. drop_last: false
  67. LearningRate:
  68. base_lr: 0.01
  69. schedulers:
  70. - !PiecewiseDecay
  71. gamma: 0.1
  72. milestones: [16, 22]
  73. - !LinearWarmup
  74. start_factor: 0.1
  75. steps: 1000
  76. OptimizerBuilder:
  77. optimizer:
  78. momentum: 0.9
  79. type: Momentum
  80. regularizer:
  81. factor: 0.0001
  82. type: L2
  83. # model
  84. architecture: MaskRCNN
  85. pretrain_weights: https://paddledet.bj.bcebos.com/models/pretrained/ResNeXt101_vd_64x4d_pretrained.pdparams
  86. MaskRCNN:
  87. backbone: ResNet
  88. neck: FPN
  89. rpn_head: RPNHead
  90. bbox_head: BBoxHead
  91. mask_head: MaskHead
  92. # post process
  93. bbox_post_process: BBoxPostProcess
  94. mask_post_process: MaskPostProcess
  95. ResNet:
  96. # for ResNeXt: groups, base_width, base_channels
  97. depth: 101
  98. variant: d
  99. groups: 64
  100. base_width: 4
  101. norm_type: bn
  102. freeze_at: 0
  103. return_idx: [0,1,2,3]
  104. num_stages: 4
  105. FPN:
  106. out_channel: 256
  107. RPNHead:
  108. anchor_generator:
  109. aspect_ratios: [0.5, 1.0, 2.0]
  110. anchor_sizes: [[32], [64], [128], [256], [512]]
  111. strides: [4, 8, 16, 32, 64]
  112. rpn_target_assign:
  113. batch_size_per_im: 256
  114. fg_fraction: 0.5
  115. negative_overlap: 0.3
  116. positive_overlap: 0.7
  117. use_random: True
  118. train_proposal:
  119. min_size: 0.0
  120. nms_thresh: 0.7
  121. pre_nms_top_n: 2000
  122. post_nms_top_n: 1000
  123. topk_after_collect: True
  124. test_proposal:
  125. min_size: 0.0
  126. nms_thresh: 0.7
  127. pre_nms_top_n: 1000
  128. post_nms_top_n: 1000
  129. BBoxHead:
  130. head: TwoFCHead
  131. roi_extractor:
  132. resolution: 7
  133. sampling_ratio: 0
  134. aligned: True
  135. bbox_assigner: BBoxAssigner
  136. BBoxAssigner:
  137. batch_size_per_im: 512
  138. bg_thresh: 0.5
  139. fg_thresh: 0.5
  140. fg_fraction: 0.25
  141. use_random: True
  142. TwoFCHead:
  143. out_channel: 1024
  144. BBoxPostProcess:
  145. decode: RCNNBox
  146. nms:
  147. name: MultiClassNMS
  148. keep_top_k: 100
  149. score_threshold: 0.05
  150. nms_threshold: 0.5
  151. MaskHead:
  152. head: MaskFeat
  153. roi_extractor:
  154. resolution: 14
  155. sampling_ratio: 0
  156. aligned: True
  157. mask_assigner: MaskAssigner
  158. share_bbox_feat: False
  159. MaskFeat:
  160. num_convs: 4
  161. out_channel: 256
  162. MaskAssigner:
  163. mask_resolution: 28
  164. MaskPostProcess:
  165. binary_thresh: 0.5
  166. # Exporting the model
  167. export:
  168. post_process: True
  169. nms: True
  170. benchmark: False
  171. fuse_conv_bn: False