transforms.py 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934
  1. # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import random
  15. import cv2
  16. import numpy as np
  17. from PIL import Image
  18. from paddlex.paddleseg.cvlibs import manager
  19. from paddlex.paddleseg.transforms import functional
  20. @manager.TRANSFORMS.add_component
  21. class Compose:
  22. """
  23. Do transformation on input data with corresponding pre-processing and augmentation operations.
  24. The shape of input data to all operations is [height, width, channels].
  25. Args:
  26. transforms (list): A list contains data pre-processing or augmentation. Empty list means only reading images, no transformation.
  27. to_rgb (bool, optional): If converting image to RGB color space. Default: True.
  28. Raises:
  29. TypeError: When 'transforms' is not a list.
  30. ValueError: when the length of 'transforms' is less than 1.
  31. """
  32. def __init__(self, transforms, to_rgb=True):
  33. if not isinstance(transforms, list):
  34. raise TypeError('The transforms must be a list!')
  35. self.transforms = transforms
  36. self.to_rgb = to_rgb
  37. def __call__(self, im, label=None):
  38. """
  39. Args:
  40. im (str|np.ndarray): It is either image path or image object.
  41. label (str|np.ndarray): It is either label path or label ndarray.
  42. Returns:
  43. (tuple). A tuple including image, image info, and label after transformation.
  44. """
  45. if isinstance(im, str):
  46. im = cv2.imread(im).astype('float32')
  47. if isinstance(label, str):
  48. label = np.asarray(Image.open(label))
  49. if im is None:
  50. raise ValueError('Can\'t read The image file {}!'.format(im))
  51. if self.to_rgb:
  52. im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
  53. for op in self.transforms:
  54. outputs = op(im, label)
  55. im = outputs[0]
  56. if len(outputs) == 2:
  57. label = outputs[1]
  58. im = np.transpose(im, (2, 0, 1))
  59. return (im, label)
  60. @manager.TRANSFORMS.add_component
  61. class RandomHorizontalFlip:
  62. """
  63. Flip an image horizontally with a certain probability.
  64. Args:
  65. prob (float, optional): A probability of horizontally flipping. Default: 0.5.
  66. """
  67. def __init__(self, prob=0.5):
  68. self.prob = prob
  69. def __call__(self, im, label=None):
  70. if random.random() < self.prob:
  71. im = functional.horizontal_flip(im)
  72. if label is not None:
  73. label = functional.horizontal_flip(label)
  74. if label is None:
  75. return (im, )
  76. else:
  77. return (im, label)
  78. @manager.TRANSFORMS.add_component
  79. class RandomVerticalFlip:
  80. """
  81. Flip an image vertically with a certain probability.
  82. Args:
  83. prob (float, optional): A probability of vertical flipping. Default: 0.1.
  84. """
  85. def __init__(self, prob=0.1):
  86. self.prob = prob
  87. def __call__(self, im, label=None):
  88. if random.random() < self.prob:
  89. im = functional.vertical_flip(im)
  90. if label is not None:
  91. label = functional.vertical_flip(label)
  92. if label is None:
  93. return (im, )
  94. else:
  95. return (im, label)
  96. @manager.TRANSFORMS.add_component
  97. class Resize:
  98. """
  99. Resize an image.
  100. Args:
  101. target_size (list|tuple, optional): The target size of image. Default: (512, 512).
  102. interp (str, optional): The interpolation mode of resize is consistent with opencv.
  103. ['NEAREST', 'LINEAR', 'CUBIC', 'AREA', 'LANCZOS4', 'RANDOM']. Note that when it is
  104. 'RANDOM', a random interpolation mode would be specified. Default: "LINEAR".
  105. Raises:
  106. TypeError: When 'target_size' type is neither list nor tuple.
  107. ValueError: When "interp" is out of pre-defined methods ('NEAREST', 'LINEAR', 'CUBIC',
  108. 'AREA', 'LANCZOS4', 'RANDOM').
  109. """
  110. # The interpolation mode
  111. interp_dict = {
  112. 'NEAREST': cv2.INTER_NEAREST,
  113. 'LINEAR': cv2.INTER_LINEAR,
  114. 'CUBIC': cv2.INTER_CUBIC,
  115. 'AREA': cv2.INTER_AREA,
  116. 'LANCZOS4': cv2.INTER_LANCZOS4
  117. }
  118. def __init__(self, target_size=(512, 512), interp='LINEAR'):
  119. self.interp = interp
  120. if not (interp == "RANDOM" or interp in self.interp_dict):
  121. raise ValueError("`interp` should be one of {}".format(
  122. self.interp_dict.keys()))
  123. if isinstance(target_size, list) or isinstance(target_size, tuple):
  124. if len(target_size) != 2:
  125. raise ValueError(
  126. '`target_size` should include 2 elements, but it is {}'.
  127. format(target_size))
  128. else:
  129. raise TypeError(
  130. "Type of `target_size` is invalid. It should be list or tuple, but it is {}"
  131. .format(type(target_size)))
  132. self.target_size = target_size
  133. def __call__(self, im, label=None):
  134. """
  135. Args:
  136. im (np.ndarray): The Image data.
  137. label (np.ndarray, optional): The label data. Default: None.
  138. Returns:
  139. (tuple). When label is None, it returns (im, ), otherwise it returns (im, label),
  140. Raises:
  141. TypeError: When the 'img' type is not numpy.
  142. ValueError: When the length of "im" shape is not 3.
  143. """
  144. if not isinstance(im, np.ndarray):
  145. raise TypeError("Resize: image type is not numpy.")
  146. if len(im.shape) != 3:
  147. raise ValueError('Resize: image is not 3-dimensional.')
  148. if self.interp == "RANDOM":
  149. interp = random.choice(list(self.interp_dict.keys()))
  150. else:
  151. interp = self.interp
  152. im = functional.resize(im, self.target_size, self.interp_dict[interp])
  153. if label is not None:
  154. label = functional.resize(label, self.target_size,
  155. cv2.INTER_NEAREST)
  156. if label is None:
  157. return (im, )
  158. else:
  159. return (im, label)
  160. @manager.TRANSFORMS.add_component
  161. class ResizeByLong:
  162. """
  163. Resize the long side of an image to given size, and then scale the other side proportionally.
  164. Args:
  165. long_size (int): The target size of long side.
  166. """
  167. def __init__(self, long_size):
  168. self.long_size = long_size
  169. def __call__(self, im, label=None):
  170. """
  171. Args:
  172. im (np.ndarray): The Image data.
  173. label (np.ndarray, optional): The label data. Default: None.
  174. Returns:
  175. (tuple). When label is None, it returns (im, ), otherwise it returns (im, label).
  176. """
  177. im = functional.resize_long(im, self.long_size)
  178. if label is not None:
  179. label = functional.resize_long(label, self.long_size,
  180. cv2.INTER_NEAREST)
  181. if label is None:
  182. return (im, )
  183. else:
  184. return (im, label)
  185. @manager.TRANSFORMS.add_component
  186. class LimitLong:
  187. """
  188. Limit the long edge of image.
  189. If the long edge is larger than max_long, resize the long edge
  190. to max_long, while scale the short edge proportionally.
  191. If the long edge is smaller than min_long, resize the long edge
  192. to min_long, while scale the short edge proportionally.
  193. Args:
  194. max_long (int, optional): If the long edge of image is larger than max_long,
  195. it will be resize to max_long. Default: None.
  196. min_long (int, optional): If the long edge of image is smaller than min_long,
  197. it will be resize to min_long. Default: None.
  198. """
  199. def __init__(self, max_long=None, min_long=None):
  200. if max_long is not None:
  201. if not isinstance(max_long, int):
  202. raise TypeError(
  203. "Type of `max_long` is invalid. It should be int, but it is {}"
  204. .format(type(max_long)))
  205. if min_long is not None:
  206. if not isinstance(min_long, int):
  207. raise TypeError(
  208. "Type of `min_long` is invalid. It should be int, but it is {}"
  209. .format(type(min_long)))
  210. if (max_long is not None) and (min_long is not None):
  211. if min_long > max_long:
  212. raise ValueError(
  213. '`max_long should not smaller than min_long, but they are {} and {}'
  214. .format(max_long, min_long))
  215. self.max_long = max_long
  216. self.min_long = min_long
  217. def __call__(self, im, label=None):
  218. """
  219. Args:
  220. im (np.ndarray): The Image data.
  221. label (np.ndarray, optional): The label data. Default: None.
  222. Returns:
  223. (tuple). When label is None, it returns (im, ), otherwise it returns (im, label).
  224. """
  225. h, w = im.shape[0], im.shape[1]
  226. long_edge = max(h, w)
  227. target = long_edge
  228. if (self.max_long is not None) and (long_edge > self.max_long):
  229. target = self.max_long
  230. elif (self.min_long is not None) and (long_edge < self.min_long):
  231. target = self.min_long
  232. if target != long_edge:
  233. im = functional.resize_long(im, target)
  234. if label is not None:
  235. label = functional.resize_long(label, target, cv2.INTER_NEAREST)
  236. if label is None:
  237. return (im, )
  238. else:
  239. return (im, label)
  240. @manager.TRANSFORMS.add_component
  241. class ResizeRangeScaling:
  242. """
  243. Resize the long side of an image into a range, and then scale the other side proportionally.
  244. Args:
  245. min_value (int, optional): The minimum value of long side after resize. Default: 400.
  246. max_value (int, optional): The maximum value of long side after resize. Default: 600.
  247. """
  248. def __init__(self, min_value=400, max_value=600):
  249. if min_value > max_value:
  250. raise ValueError('min_value must be less than max_value, '
  251. 'but they are {} and {}.'.format(
  252. min_value, max_value))
  253. self.min_value = min_value
  254. self.max_value = max_value
  255. def __call__(self, im, label=None):
  256. """
  257. Args:
  258. im (np.ndarray): The Image data.
  259. label (np.ndarray, optional): The label data. Default: None.
  260. Returns:
  261. (tuple). When label is None, it returns (im, ), otherwise it returns (im, label).
  262. """
  263. if self.min_value == self.max_value:
  264. random_size = self.max_value
  265. else:
  266. random_size = int(
  267. np.random.uniform(self.min_value, self.max_value) + 0.5)
  268. im = functional.resize_long(im, random_size, cv2.INTER_LINEAR)
  269. if label is not None:
  270. label = functional.resize_long(label, random_size,
  271. cv2.INTER_NEAREST)
  272. if label is None:
  273. return (im, )
  274. else:
  275. return (im, label)
  276. @manager.TRANSFORMS.add_component
  277. class ResizeStepScaling:
  278. """
  279. Scale an image proportionally within a range.
  280. Args:
  281. min_scale_factor (float, optional): The minimum scale. Default: 0.75.
  282. max_scale_factor (float, optional): The maximum scale. Default: 1.25.
  283. scale_step_size (float, optional): The scale interval. Default: 0.25.
  284. Raises:
  285. ValueError: When min_scale_factor is smaller than max_scale_factor.
  286. """
  287. def __init__(self,
  288. min_scale_factor=0.75,
  289. max_scale_factor=1.25,
  290. scale_step_size=0.25):
  291. if min_scale_factor > max_scale_factor:
  292. raise ValueError(
  293. 'min_scale_factor must be less than max_scale_factor, '
  294. 'but they are {} and {}.'.format(min_scale_factor,
  295. max_scale_factor))
  296. self.min_scale_factor = min_scale_factor
  297. self.max_scale_factor = max_scale_factor
  298. self.scale_step_size = scale_step_size
  299. def __call__(self, im, label=None):
  300. """
  301. Args:
  302. im (np.ndarray): The Image data.
  303. label (np.ndarray, optional): The label data. Default: None.
  304. Returns:
  305. (tuple). When label is None, it returns (im, ), otherwise it returns (im, label).
  306. """
  307. if self.min_scale_factor == self.max_scale_factor:
  308. scale_factor = self.min_scale_factor
  309. elif self.scale_step_size == 0:
  310. scale_factor = np.random.uniform(self.min_scale_factor,
  311. self.max_scale_factor)
  312. else:
  313. num_steps = int((self.max_scale_factor - self.min_scale_factor) /
  314. self.scale_step_size + 1)
  315. scale_factors = np.linspace(self.min_scale_factor,
  316. self.max_scale_factor,
  317. num_steps).tolist()
  318. np.random.shuffle(scale_factors)
  319. scale_factor = scale_factors[0]
  320. w = int(round(scale_factor * im.shape[1]))
  321. h = int(round(scale_factor * im.shape[0]))
  322. im = functional.resize(im, (w, h), cv2.INTER_LINEAR)
  323. if label is not None:
  324. label = functional.resize(label, (w, h), cv2.INTER_NEAREST)
  325. if label is None:
  326. return (im, )
  327. else:
  328. return (im, label)
  329. @manager.TRANSFORMS.add_component
  330. class Normalize:
  331. """
  332. Normalize an image.
  333. Args:
  334. mean (list, optional): The mean value of a data set. Default: [0.5, 0.5, 0.5].
  335. std (list, optional): The standard deviation of a data set. Default: [0.5, 0.5, 0.5].
  336. Raises:
  337. ValueError: When mean/std is not list or any value in std is 0.
  338. """
  339. def __init__(self, mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)):
  340. self.mean = mean
  341. self.std = std
  342. if not (isinstance(self.mean, (list, tuple))
  343. and isinstance(self.std, (list, tuple))):
  344. raise ValueError(
  345. "{}: input type is invalid. It should be list or tuple".format(
  346. self))
  347. from functools import reduce
  348. if reduce(lambda x, y: x * y, self.std) == 0:
  349. raise ValueError('{}: std is invalid!'.format(self))
  350. def __call__(self, im, label=None):
  351. """
  352. Args:
  353. im (np.ndarray): The Image data.
  354. label (np.ndarray, optional): The label data. Default: None.
  355. Returns:
  356. (tuple). When label is None, it returns (im, ), otherwise it returns (im, label).
  357. """
  358. mean = np.array(self.mean)[np.newaxis, np.newaxis, :]
  359. std = np.array(self.std)[np.newaxis, np.newaxis, :]
  360. im = functional.normalize(im, mean, std)
  361. if label is None:
  362. return (im, )
  363. else:
  364. return (im, label)
  365. @manager.TRANSFORMS.add_component
  366. class Padding:
  367. """
  368. Add bottom-right padding to a raw image or annotation image.
  369. Args:
  370. target_size (list|tuple): The target size after padding.
  371. im_padding_value (list, optional): The padding value of raw image.
  372. Default: [127.5, 127.5, 127.5].
  373. label_padding_value (int, optional): The padding value of annotation image. Default: 255.
  374. Raises:
  375. TypeError: When target_size is neither list nor tuple.
  376. ValueError: When the length of target_size is not 2.
  377. """
  378. def __init__(self,
  379. target_size,
  380. im_padding_value=(127.5, 127.5, 127.5),
  381. label_padding_value=255):
  382. if isinstance(target_size, list) or isinstance(target_size, tuple):
  383. if len(target_size) != 2:
  384. raise ValueError(
  385. '`target_size` should include 2 elements, but it is {}'.
  386. format(target_size))
  387. else:
  388. raise TypeError(
  389. "Type of target_size is invalid. It should be list or tuple, now is {}"
  390. .format(type(target_size)))
  391. self.target_size = target_size
  392. self.im_padding_value = im_padding_value
  393. self.label_padding_value = label_padding_value
  394. def __call__(self, im, label=None):
  395. """
  396. Args:
  397. im (np.ndarray): The Image data.
  398. label (np.ndarray, optional): The label data. Default: None.
  399. Returns:
  400. (tuple). When label is None, it returns (im, ), otherwise it returns (im, label).
  401. """
  402. im_height, im_width = im.shape[0], im.shape[1]
  403. if isinstance(self.target_size, int):
  404. target_height = self.target_size
  405. target_width = self.target_size
  406. else:
  407. target_height = self.target_size[1]
  408. target_width = self.target_size[0]
  409. pad_height = target_height - im_height
  410. pad_width = target_width - im_width
  411. if pad_height < 0 or pad_width < 0:
  412. raise ValueError(
  413. 'The size of image should be less than `target_size`, but the size of image ({}, {}) is larger than `target_size` ({}, {})'
  414. .format(im_width, im_height, target_width, target_height))
  415. else:
  416. im = cv2.copyMakeBorder(
  417. im,
  418. 0,
  419. pad_height,
  420. 0,
  421. pad_width,
  422. cv2.BORDER_CONSTANT,
  423. value=self.im_padding_value)
  424. if label is not None:
  425. label = cv2.copyMakeBorder(
  426. label,
  427. 0,
  428. pad_height,
  429. 0,
  430. pad_width,
  431. cv2.BORDER_CONSTANT,
  432. value=self.label_padding_value)
  433. if label is None:
  434. return (im, )
  435. else:
  436. return (im, label)
  437. @manager.TRANSFORMS.add_component
  438. class PaddingByAspectRatio:
  439. """
  440. Args:
  441. aspect_ratio (int|float, optional): The aspect ratio = width / height. Default: 1.
  442. """
  443. def __init__(self,
  444. aspect_ratio=1,
  445. im_padding_value=(127.5, 127.5, 127.5),
  446. label_padding_value=255):
  447. self.aspect_ratio = aspect_ratio
  448. self.im_padding_value = im_padding_value
  449. self.label_padding_value = label_padding_value
  450. def __call__(self, im, label=None):
  451. """
  452. Args:
  453. im (np.ndarray): The Image data.
  454. label (np.ndarray, optional): The label data. Default: None.
  455. Returns:
  456. (tuple). When label is None, it returns (im, ), otherwise it returns (im, label).
  457. """
  458. img_height = im.shape[0]
  459. img_width = im.shape[1]
  460. ratio = img_width / img_height
  461. if ratio == self.aspect_ratio:
  462. if label is None:
  463. return (im, )
  464. else:
  465. return (im, label)
  466. elif ratio > self.aspect_ratio:
  467. img_height = int(img_width / self.aspect_ratio)
  468. else:
  469. img_width = int(img_height * self.aspect_ratio)
  470. padding = Padding((img_width, img_height),
  471. im_padding_value=self.im_padding_value,
  472. label_padding_value=self.label_padding_value)
  473. return padding(im, label)
  474. @manager.TRANSFORMS.add_component
  475. class RandomPaddingCrop:
  476. """
  477. Crop a sub-image from a raw image and annotation image randomly. If the target cropping size
  478. is larger than original image, then the bottom-right padding will be added.
  479. Args:
  480. crop_size (tuple, optional): The target cropping size. Default: (512, 512).
  481. im_padding_value (list, optional): The padding value of raw image.
  482. Default: [127.5, 127.5, 127.5].
  483. label_padding_value (int, optional): The padding value of annotation image. Default: 255.
  484. Raises:
  485. TypeError: When crop_size is neither list nor tuple.
  486. ValueError: When the length of crop_size is not 2.
  487. """
  488. def __init__(self,
  489. crop_size=(512, 512),
  490. im_padding_value=(127.5, 127.5, 127.5),
  491. label_padding_value=255):
  492. if isinstance(crop_size, list) or isinstance(crop_size, tuple):
  493. if len(crop_size) != 2:
  494. raise ValueError(
  495. 'Type of `crop_size` is list or tuple. It should include 2 elements, but it is {}'
  496. .format(crop_size))
  497. else:
  498. raise TypeError(
  499. "The type of `crop_size` is invalid. It should be list or tuple, but it is {}"
  500. .format(type(crop_size)))
  501. self.crop_size = crop_size
  502. self.im_padding_value = im_padding_value
  503. self.label_padding_value = label_padding_value
  504. def __call__(self, im, label=None):
  505. """
  506. Args:
  507. im (np.ndarray): The Image data.
  508. label (np.ndarray, optional): The label data. Default: None.
  509. Returns:
  510. (tuple). When label is None, it returns (im, ), otherwise it returns (im, label).
  511. """
  512. if isinstance(self.crop_size, int):
  513. crop_width = self.crop_size
  514. crop_height = self.crop_size
  515. else:
  516. crop_width = self.crop_size[0]
  517. crop_height = self.crop_size[1]
  518. img_height = im.shape[0]
  519. img_width = im.shape[1]
  520. if img_height == crop_height and img_width == crop_width:
  521. if label is None:
  522. return (im, )
  523. else:
  524. return (im, label)
  525. else:
  526. pad_height = max(crop_height - img_height, 0)
  527. pad_width = max(crop_width - img_width, 0)
  528. if (pad_height > 0 or pad_width > 0):
  529. im = cv2.copyMakeBorder(
  530. im,
  531. 0,
  532. pad_height,
  533. 0,
  534. pad_width,
  535. cv2.BORDER_CONSTANT,
  536. value=self.im_padding_value)
  537. if label is not None:
  538. label = cv2.copyMakeBorder(
  539. label,
  540. 0,
  541. pad_height,
  542. 0,
  543. pad_width,
  544. cv2.BORDER_CONSTANT,
  545. value=self.label_padding_value)
  546. img_height = im.shape[0]
  547. img_width = im.shape[1]
  548. if crop_height > 0 and crop_width > 0:
  549. h_off = np.random.randint(img_height - crop_height + 1)
  550. w_off = np.random.randint(img_width - crop_width + 1)
  551. im = im[h_off:(crop_height + h_off), w_off:(
  552. w_off + crop_width), :]
  553. if label is not None:
  554. label = label[h_off:(crop_height + h_off), w_off:(
  555. w_off + crop_width)]
  556. if label is None:
  557. return (im, )
  558. else:
  559. return (im, label)
  560. @manager.TRANSFORMS.add_component
  561. class RandomBlur:
  562. """
  563. Blurring an image by a Gaussian function with a certain probability.
  564. Args:
  565. prob (float, optional): A probability of blurring an image. Default: 0.1.
  566. """
  567. def __init__(self, prob=0.1):
  568. self.prob = prob
  569. def __call__(self, im, label=None):
  570. """
  571. Args:
  572. im (np.ndarray): The Image data.
  573. label (np.ndarray, optional): The label data. Default: None.
  574. Returns:
  575. (tuple). When label is None, it returns (im, ), otherwise it returns (im, label).
  576. """
  577. if self.prob <= 0:
  578. n = 0
  579. elif self.prob >= 1:
  580. n = 1
  581. else:
  582. n = int(1.0 / self.prob)
  583. if n > 0:
  584. if np.random.randint(0, n) == 0:
  585. radius = np.random.randint(3, 10)
  586. if radius % 2 != 1:
  587. radius = radius + 1
  588. if radius > 9:
  589. radius = 9
  590. im = cv2.GaussianBlur(im, (radius, radius), 0, 0)
  591. if label is None:
  592. return (im, )
  593. else:
  594. return (im, label)
  595. @manager.TRANSFORMS.add_component
  596. class RandomRotation:
  597. """
  598. Rotate an image randomly with padding.
  599. Args:
  600. max_rotation (float, optional): The maximum rotation degree. Default: 15.
  601. im_padding_value (list, optional): The padding value of raw image.
  602. Default: [127.5, 127.5, 127.5].
  603. label_padding_value (int, optional): The padding value of annotation image. Default: 255.
  604. """
  605. def __init__(self,
  606. max_rotation=15,
  607. im_padding_value=(127.5, 127.5, 127.5),
  608. label_padding_value=255):
  609. self.max_rotation = max_rotation
  610. self.im_padding_value = im_padding_value
  611. self.label_padding_value = label_padding_value
  612. def __call__(self, im, label=None):
  613. """
  614. Args:
  615. im (np.ndarray): The Image data.
  616. label (np.ndarray, optional): The label data. Default: None.
  617. Returns:
  618. (tuple). When label is None, it returns (im, ), otherwise it returns (im, label).
  619. """
  620. if self.max_rotation > 0:
  621. (h, w) = im.shape[:2]
  622. do_rotation = np.random.uniform(-self.max_rotation,
  623. self.max_rotation)
  624. pc = (w // 2, h // 2)
  625. r = cv2.getRotationMatrix2D(pc, do_rotation, 1.0)
  626. cos = np.abs(r[0, 0])
  627. sin = np.abs(r[0, 1])
  628. nw = int((h * sin) + (w * cos))
  629. nh = int((h * cos) + (w * sin))
  630. (cx, cy) = pc
  631. r[0, 2] += (nw / 2) - cx
  632. r[1, 2] += (nh / 2) - cy
  633. dsize = (nw, nh)
  634. im = cv2.warpAffine(
  635. im,
  636. r,
  637. dsize=dsize,
  638. flags=cv2.INTER_LINEAR,
  639. borderMode=cv2.BORDER_CONSTANT,
  640. borderValue=self.im_padding_value)
  641. if label is not None:
  642. label = cv2.warpAffine(
  643. label,
  644. r,
  645. dsize=dsize,
  646. flags=cv2.INTER_NEAREST,
  647. borderMode=cv2.BORDER_CONSTANT,
  648. borderValue=self.label_padding_value)
  649. if label is None:
  650. return (im, )
  651. else:
  652. return (im, label)
  653. @manager.TRANSFORMS.add_component
  654. class RandomScaleAspect:
  655. """
  656. Crop a sub-image from an original image with a range of area ratio and aspect and
  657. then scale the sub-image back to the size of the original image.
  658. Args:
  659. min_scale (float, optional): The minimum area ratio of cropped image to the original image. Default: 0.5.
  660. aspect_ratio (float, optional): The minimum aspect ratio. Default: 0.33.
  661. """
  662. def __init__(self, min_scale=0.5, aspect_ratio=0.33):
  663. self.min_scale = min_scale
  664. self.aspect_ratio = aspect_ratio
  665. def __call__(self, im, label=None):
  666. """
  667. Args:
  668. im (np.ndarray): The Image data.
  669. label (np.ndarray, optional): The label data. Default: None.
  670. Returns:
  671. (tuple). When label is None, it returns (im, ), otherwise it returns (im, label).
  672. """
  673. if self.min_scale != 0 and self.aspect_ratio != 0:
  674. img_height = im.shape[0]
  675. img_width = im.shape[1]
  676. for i in range(0, 10):
  677. area = img_height * img_width
  678. target_area = area * np.random.uniform(self.min_scale, 1.0)
  679. aspectRatio = np.random.uniform(self.aspect_ratio,
  680. 1.0 / self.aspect_ratio)
  681. dw = int(np.sqrt(target_area * 1.0 * aspectRatio))
  682. dh = int(np.sqrt(target_area * 1.0 / aspectRatio))
  683. if (np.random.randint(10) < 5):
  684. tmp = dw
  685. dw = dh
  686. dh = tmp
  687. if (dh < img_height and dw < img_width):
  688. h1 = np.random.randint(0, img_height - dh)
  689. w1 = np.random.randint(0, img_width - dw)
  690. im = im[h1:(h1 + dh), w1:(w1 + dw), :]
  691. im = cv2.resize(
  692. im, (img_width, img_height),
  693. interpolation=cv2.INTER_LINEAR)
  694. if label is not None:
  695. label = label[h1:(h1 + dh), w1:(w1 + dw)]
  696. label = cv2.resize(
  697. label, (img_width, img_height),
  698. interpolation=cv2.INTER_NEAREST)
  699. break
  700. if label is None:
  701. return (im, )
  702. else:
  703. return (im, label)
  704. @manager.TRANSFORMS.add_component
  705. class RandomDistort:
  706. """
  707. Distort an image with random configurations.
  708. Args:
  709. brightness_range (float, optional): A range of brightness. Default: 0.5.
  710. brightness_prob (float, optional): A probability of adjusting brightness. Default: 0.5.
  711. contrast_range (float, optional): A range of contrast. Default: 0.5.
  712. contrast_prob (float, optional): A probability of adjusting contrast. Default: 0.5.
  713. saturation_range (float, optional): A range of saturation. Default: 0.5.
  714. saturation_prob (float, optional): A probability of adjusting saturation. Default: 0.5.
  715. hue_range (int, optional): A range of hue. Default: 18.
  716. hue_prob (float, optional): A probability of adjusting hue. Default: 0.5.
  717. """
  718. def __init__(self,
  719. brightness_range=0.5,
  720. brightness_prob=0.5,
  721. contrast_range=0.5,
  722. contrast_prob=0.5,
  723. saturation_range=0.5,
  724. saturation_prob=0.5,
  725. hue_range=18,
  726. hue_prob=0.5):
  727. self.brightness_range = brightness_range
  728. self.brightness_prob = brightness_prob
  729. self.contrast_range = contrast_range
  730. self.contrast_prob = contrast_prob
  731. self.saturation_range = saturation_range
  732. self.saturation_prob = saturation_prob
  733. self.hue_range = hue_range
  734. self.hue_prob = hue_prob
  735. def __call__(self, im, label=None):
  736. """
  737. Args:
  738. im (np.ndarray): The Image data.
  739. label (np.ndarray, optional): The label data. Default: None.
  740. Returns:
  741. (tuple). When label is None, it returns (im, ), otherwise it returns (im, label).
  742. """
  743. brightness_lower = 1 - self.brightness_range
  744. brightness_upper = 1 + self.brightness_range
  745. contrast_lower = 1 - self.contrast_range
  746. contrast_upper = 1 + self.contrast_range
  747. saturation_lower = 1 - self.saturation_range
  748. saturation_upper = 1 + self.saturation_range
  749. hue_lower = -self.hue_range
  750. hue_upper = self.hue_range
  751. ops = [
  752. functional.brightness, functional.contrast, functional.saturation,
  753. functional.hue
  754. ]
  755. random.shuffle(ops)
  756. params_dict = {
  757. 'brightness': {
  758. 'brightness_lower': brightness_lower,
  759. 'brightness_upper': brightness_upper
  760. },
  761. 'contrast': {
  762. 'contrast_lower': contrast_lower,
  763. 'contrast_upper': contrast_upper
  764. },
  765. 'saturation': {
  766. 'saturation_lower': saturation_lower,
  767. 'saturation_upper': saturation_upper
  768. },
  769. 'hue': {
  770. 'hue_lower': hue_lower,
  771. 'hue_upper': hue_upper
  772. }
  773. }
  774. prob_dict = {
  775. 'brightness': self.brightness_prob,
  776. 'contrast': self.contrast_prob,
  777. 'saturation': self.saturation_prob,
  778. 'hue': self.hue_prob
  779. }
  780. im = im.astype('uint8')
  781. im = Image.fromarray(im)
  782. for id in range(len(ops)):
  783. params = params_dict[ops[id].__name__]
  784. prob = prob_dict[ops[id].__name__]
  785. params['im'] = im
  786. if np.random.uniform(0, 1) < prob:
  787. im = ops[id](**params)
  788. im = np.asarray(im).astype('float32')
  789. if label is None:
  790. return (im, )
  791. else:
  792. return (im, label)