reader.py 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304
  1. # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import os
  15. import copy
  16. import traceback
  17. import six
  18. import sys
  19. import multiprocessing as mp
  20. if sys.version_info >= (3, 0):
  21. import queue as Queue
  22. else:
  23. import Queue
  24. import numpy as np
  25. from paddle.io import DataLoader, DistributedBatchSampler
  26. from paddle.fluid.dataloader.collate import default_collate_fn
  27. from paddlex.ppdet.core.workspace import register, serializable, create
  28. from . import transform
  29. from .shm_utils import _get_shared_memory_size_in_M
  30. from paddlex.ppdet.utils.logger import setup_logger
  31. logger = setup_logger('reader')
  32. MAIN_PID = os.getpid()
  33. class Compose(object):
  34. def __init__(self, transforms, num_classes=80):
  35. self.transforms = transforms
  36. self.transforms_cls = []
  37. for t in self.transforms:
  38. for k, v in t.items():
  39. op_cls = getattr(transform, k)
  40. f = op_cls(**v)
  41. if hasattr(f, 'num_classes'):
  42. f.num_classes = num_classes
  43. self.transforms_cls.append(f)
  44. def __call__(self, data):
  45. for f in self.transforms_cls:
  46. try:
  47. data = f(data)
  48. except Exception as e:
  49. stack_info = traceback.format_exc()
  50. logger.warn("fail to map sample transform [{}] "
  51. "with error: {} and stack:\n{}".format(
  52. f, e, str(stack_info)))
  53. raise e
  54. return data
  55. class BatchCompose(Compose):
  56. def __init__(self, transforms, num_classes=80, collate_batch=True):
  57. super(BatchCompose, self).__init__(transforms, num_classes)
  58. self.collate_batch = collate_batch
  59. def __call__(self, data):
  60. for f in self.transforms_cls:
  61. try:
  62. data = f(data)
  63. except Exception as e:
  64. stack_info = traceback.format_exc()
  65. logger.warn("fail to map batch transform [{}] "
  66. "with error: {} and stack:\n{}".format(
  67. f, e, str(stack_info)))
  68. raise e
  69. # remove keys which is not needed by model
  70. extra_key = ['h', 'w', 'flipped']
  71. for k in extra_key:
  72. for sample in data:
  73. if k in sample:
  74. sample.pop(k)
  75. # batch data, if user-define batch function needed
  76. # use user-defined here
  77. if self.collate_batch:
  78. batch_data = default_collate_fn(data)
  79. else:
  80. batch_data = {}
  81. for k in data[0].keys():
  82. tmp_data = []
  83. for i in range(len(data)):
  84. tmp_data.append(data[i][k])
  85. if not 'gt_' in k and not 'is_crowd' in k and not 'difficult' in k:
  86. tmp_data = np.stack(tmp_data, axis=0)
  87. batch_data[k] = tmp_data
  88. return batch_data
  89. class BaseDataLoader(object):
  90. """
  91. Base DataLoader implementation for detection models
  92. Args:
  93. sample_transforms (list): a list of transforms to perform
  94. on each sample
  95. batch_transforms (list): a list of transforms to perform
  96. on batch
  97. batch_size (int): batch size for batch collating, default 1.
  98. shuffle (bool): whether to shuffle samples
  99. drop_last (bool): whether to drop the last incomplete,
  100. default False
  101. num_classes (int): class number of dataset, default 80
  102. collate_batch (bool): whether to collate batch in dataloader.
  103. If set to True, the samples will collate into batch according
  104. to the batch size. Otherwise, the ground-truth will not collate,
  105. which is used when the number of ground-truch is different in
  106. samples.
  107. use_shared_memory (bool): whether to use shared memory to
  108. accelerate data loading, enable this only if you
  109. are sure that the shared memory size of your OS
  110. is larger than memory cost of input datas of model.
  111. Note that shared memory will be automatically
  112. disabled if the shared memory of OS is less than
  113. 1G, which is not enough for detection models.
  114. Default False.
  115. """
  116. def __init__(self,
  117. sample_transforms=[],
  118. batch_transforms=[],
  119. batch_size=1,
  120. shuffle=False,
  121. drop_last=False,
  122. num_classes=80,
  123. collate_batch=True,
  124. use_shared_memory=False,
  125. **kwargs):
  126. # sample transform
  127. self._sample_transforms = Compose(
  128. sample_transforms, num_classes=num_classes)
  129. # batch transfrom
  130. self._batch_transforms = BatchCompose(batch_transforms, num_classes,
  131. collate_batch)
  132. self.batch_size = batch_size
  133. self.shuffle = shuffle
  134. self.drop_last = drop_last
  135. self.use_shared_memory = use_shared_memory
  136. self.kwargs = kwargs
  137. def __call__(self,
  138. dataset,
  139. worker_num,
  140. batch_sampler=None,
  141. return_list=False):
  142. self.dataset = dataset
  143. self.dataset.check_or_download_dataset()
  144. self.dataset.parse_dataset()
  145. # get data
  146. self.dataset.set_transform(self._sample_transforms)
  147. # set kwargs
  148. self.dataset.set_kwargs(**self.kwargs)
  149. # batch sampler
  150. if batch_sampler is None:
  151. self._batch_sampler = DistributedBatchSampler(
  152. self.dataset,
  153. batch_size=self.batch_size,
  154. shuffle=self.shuffle,
  155. drop_last=self.drop_last)
  156. else:
  157. self._batch_sampler = batch_sampler
  158. # DataLoader do not start sub-process in Windows and Mac
  159. # system, do not need to use shared memory
  160. use_shared_memory = self.use_shared_memory and \
  161. sys.platform not in ['win32', 'darwin']
  162. # check whether shared memory size is bigger than 1G(1024M)
  163. if use_shared_memory:
  164. shm_size = _get_shared_memory_size_in_M()
  165. if shm_size is not None and shm_size < 1024.:
  166. logger.warn("Shared memory size is less than 1G, "
  167. "disable shared_memory in DataLoader")
  168. use_shared_memory = False
  169. self.dataloader = DataLoader(
  170. dataset=self.dataset,
  171. batch_sampler=self._batch_sampler,
  172. collate_fn=self._batch_transforms,
  173. num_workers=worker_num,
  174. return_list=return_list,
  175. use_shared_memory=use_shared_memory)
  176. self.loader = iter(self.dataloader)
  177. return self
  178. def __len__(self):
  179. return len(self._batch_sampler)
  180. def __iter__(self):
  181. return self
  182. def __next__(self):
  183. try:
  184. return next(self.loader)
  185. except StopIteration:
  186. self.loader = iter(self.dataloader)
  187. six.reraise(*sys.exc_info())
  188. def next(self):
  189. # python2 compatibility
  190. return self.__next__()
  191. @register
  192. class TrainReader(BaseDataLoader):
  193. __shared__ = ['num_classes']
  194. def __init__(self,
  195. sample_transforms=[],
  196. batch_transforms=[],
  197. batch_size=1,
  198. shuffle=True,
  199. drop_last=True,
  200. num_classes=80,
  201. collate_batch=True,
  202. **kwargs):
  203. super(TrainReader, self).__init__(sample_transforms, batch_transforms,
  204. batch_size, shuffle, drop_last,
  205. num_classes, collate_batch, **kwargs)
  206. @register
  207. class EvalReader(BaseDataLoader):
  208. __shared__ = ['num_classes']
  209. def __init__(self,
  210. sample_transforms=[],
  211. batch_transforms=[],
  212. batch_size=1,
  213. shuffle=False,
  214. drop_last=True,
  215. num_classes=80,
  216. **kwargs):
  217. super(EvalReader, self).__init__(sample_transforms, batch_transforms,
  218. batch_size, shuffle, drop_last,
  219. num_classes, **kwargs)
  220. @register
  221. class TestReader(BaseDataLoader):
  222. __shared__ = ['num_classes']
  223. def __init__(self,
  224. sample_transforms=[],
  225. batch_transforms=[],
  226. batch_size=1,
  227. shuffle=False,
  228. drop_last=False,
  229. num_classes=80,
  230. **kwargs):
  231. super(TestReader, self).__init__(sample_transforms, batch_transforms,
  232. batch_size, shuffle, drop_last,
  233. num_classes, **kwargs)
  234. @register
  235. class EvalMOTReader(BaseDataLoader):
  236. __shared__ = ['num_classes']
  237. def __init__(self,
  238. sample_transforms=[],
  239. batch_transforms=[],
  240. batch_size=1,
  241. shuffle=False,
  242. drop_last=False,
  243. num_classes=1,
  244. **kwargs):
  245. super(EvalMOTReader, self).__init__(
  246. sample_transforms, batch_transforms, batch_size, shuffle,
  247. drop_last, num_classes, **kwargs)
  248. @register
  249. class TestMOTReader(BaseDataLoader):
  250. __shared__ = ['num_classes']
  251. def __init__(self,
  252. sample_transforms=[],
  253. batch_transforms=[],
  254. batch_size=1,
  255. shuffle=False,
  256. drop_last=False,
  257. num_classes=1,
  258. **kwargs):
  259. super(TestMOTReader, self).__init__(
  260. sample_transforms, batch_transforms, batch_size, shuffle,
  261. drop_last, num_classes, **kwargs)