operators.py 127 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433
  1. # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # function:
  15. # operators to process sample,
  16. # eg: decode/resize/crop image
  17. from __future__ import absolute_import
  18. from __future__ import print_function
  19. from __future__ import division
  20. try:
  21. from collections.abc import Sequence
  22. except Exception:
  23. from collections import Sequence
  24. from numbers import Number, Integral
  25. import uuid
  26. import random
  27. import math
  28. import numpy as np
  29. import os
  30. import copy
  31. import logging
  32. import cv2
  33. from PIL import Image, ImageDraw
  34. import pickle
  35. import threading
  36. MUTEX = threading.Lock()
  37. from paddlex.ppdet.core.workspace import serializable
  38. from paddlex.ppdet.modeling import bbox_utils
  39. from ..reader import Compose
  40. from .op_helper import (satisfy_sample_constraint, filter_and_process,
  41. generate_sample_bbox, clip_bbox, data_anchor_sampling,
  42. satisfy_sample_constraint_coverage,
  43. crop_image_sampling, generate_sample_bbox_square,
  44. bbox_area_sampling, is_poly, get_border)
  45. from paddlex.ppdet.utils.logger import setup_logger
  46. from paddlex.ppdet.modeling.keypoint_utils import get_affine_transform, affine_transform
  47. logger = setup_logger(__name__)
  48. registered_ops = []
  49. def register_op(cls):
  50. registered_ops.append(cls.__name__)
  51. if not hasattr(BaseOperator, cls.__name__):
  52. setattr(BaseOperator, cls.__name__, cls)
  53. else:
  54. raise KeyError("The {} class has been registered.".format(
  55. cls.__name__))
  56. return serializable(cls)
  57. class BboxError(ValueError):
  58. pass
  59. class ImageError(ValueError):
  60. pass
  61. class BaseOperator(object):
  62. def __init__(self, name=None):
  63. if name is None:
  64. name = self.__class__.__name__
  65. self._id = name + '_' + str(uuid.uuid4())[-6:]
  66. def apply(self, sample, context=None):
  67. """ Process a sample.
  68. Args:
  69. sample (dict): a dict of sample, eg: {'image':xx, 'label': xxx}
  70. context (dict): info about this sample processing
  71. Returns:
  72. result (dict): a processed sample
  73. """
  74. return sample
  75. def __call__(self, sample, context=None):
  76. """ Process a sample.
  77. Args:
  78. sample (dict): a dict of sample, eg: {'image':xx, 'label': xxx}
  79. context (dict): info about this sample processing
  80. Returns:
  81. result (dict): a processed sample
  82. """
  83. if isinstance(sample, Sequence):
  84. for i in range(len(sample)):
  85. sample[i] = self.apply(sample[i], context)
  86. else:
  87. sample = self.apply(sample, context)
  88. return sample
  89. def __str__(self):
  90. return str(self._id)
  91. @register_op
  92. class Decode(BaseOperator):
  93. def __init__(self):
  94. """ Transform the image data to numpy format following the rgb format
  95. """
  96. super(Decode, self).__init__()
  97. def apply(self, sample, context=None):
  98. """ load image if 'im_file' field is not empty but 'image' is"""
  99. if 'image' not in sample:
  100. with open(sample['im_file'], 'rb') as f:
  101. sample['image'] = f.read()
  102. sample.pop('im_file')
  103. im = sample['image']
  104. data = np.frombuffer(im, dtype='uint8')
  105. im = cv2.imdecode(data, 1) # BGR mode, but need RGB mode
  106. if 'keep_ori_im' in sample and sample['keep_ori_im']:
  107. sample['ori_image'] = im
  108. im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
  109. sample['image'] = im
  110. if 'h' not in sample:
  111. sample['h'] = im.shape[0]
  112. elif sample['h'] != im.shape[0]:
  113. logger.warning(
  114. "The actual image height: {} is not equal to the "
  115. "height: {} in annotation, and update sample['h'] by actual "
  116. "image height.".format(im.shape[0], sample['h']))
  117. sample['h'] = im.shape[0]
  118. if 'w' not in sample:
  119. sample['w'] = im.shape[1]
  120. elif sample['w'] != im.shape[1]:
  121. logger.warning(
  122. "The actual image width: {} is not equal to the "
  123. "width: {} in annotation, and update sample['w'] by actual "
  124. "image width.".format(im.shape[1], sample['w']))
  125. sample['w'] = im.shape[1]
  126. sample['im_shape'] = np.array(im.shape[:2], dtype=np.float32)
  127. sample['scale_factor'] = np.array([1., 1.], dtype=np.float32)
  128. return sample
  129. def _make_dirs(dirname):
  130. try:
  131. from pathlib import Path
  132. except ImportError:
  133. from pathlib2 import Path
  134. Path(dirname).mkdir(exist_ok=True)
  135. @register_op
  136. class DecodeCache(BaseOperator):
  137. def __init__(self, cache_root=None):
  138. '''decode image and caching
  139. '''
  140. super(DecodeCache, self).__init__()
  141. self.use_cache = False if cache_root is None else True
  142. self.cache_root = cache_root
  143. if cache_root is not None:
  144. _make_dirs(cache_root)
  145. def apply(self, sample, context=None):
  146. if self.use_cache and os.path.exists(
  147. self.cache_path(self.cache_root, sample['im_file'])):
  148. path = self.cache_path(self.cache_root, sample['im_file'])
  149. im = self.load(path)
  150. else:
  151. if 'image' not in sample:
  152. with open(sample['im_file'], 'rb') as f:
  153. sample['image'] = f.read()
  154. im = sample['image']
  155. data = np.frombuffer(im, dtype='uint8')
  156. im = cv2.imdecode(data, 1) # BGR mode, but need RGB mode
  157. if 'keep_ori_im' in sample and sample['keep_ori_im']:
  158. sample['ori_image'] = im
  159. im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
  160. if self.use_cache and not os.path.exists(
  161. self.cache_path(self.cache_root, sample['im_file'])):
  162. path = self.cache_path(self.cache_root, sample['im_file'])
  163. self.dump(im, path)
  164. sample['image'] = im
  165. sample['h'] = im.shape[0]
  166. sample['w'] = im.shape[1]
  167. sample['im_shape'] = np.array(im.shape[:2], dtype=np.float32)
  168. sample['scale_factor'] = np.array([1., 1.], dtype=np.float32)
  169. sample.pop('im_file')
  170. return sample
  171. @staticmethod
  172. def cache_path(dir_oot, im_file):
  173. return os.path.join(dir_oot, os.path.basename(im_file) + '.pkl')
  174. @staticmethod
  175. def load(path):
  176. with open(path, 'rb') as f:
  177. im = pickle.load(f)
  178. return im
  179. @staticmethod
  180. def dump(obj, path):
  181. MUTEX.acquire()
  182. try:
  183. with open(path, 'wb') as f:
  184. pickle.dump(obj, f)
  185. except Exception as e:
  186. logger.warning('dump {} occurs exception {}'.format(path, str(e)))
  187. finally:
  188. MUTEX.release()
  189. @register_op
  190. class SniperDecodeCrop(BaseOperator):
  191. def __init__(self):
  192. super(SniperDecodeCrop, self).__init__()
  193. def __call__(self, sample, context=None):
  194. if 'image' not in sample:
  195. with open(sample['im_file'], 'rb') as f:
  196. sample['image'] = f.read()
  197. sample.pop('im_file')
  198. im = sample['image']
  199. data = np.frombuffer(im, dtype='uint8')
  200. im = cv2.imdecode(data,
  201. cv2.IMREAD_COLOR) # BGR mode, but need RGB mode
  202. if 'keep_ori_im' in sample and sample['keep_ori_im']:
  203. sample['ori_image'] = im
  204. im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
  205. chip = sample['chip']
  206. x1, y1, x2, y2 = [int(xi) for xi in chip]
  207. im = im[max(y1, 0):min(y2, im.shape[0]), max(x1, 0):min(x2, im.shape[
  208. 1]), :]
  209. sample['image'] = im
  210. h = im.shape[0]
  211. w = im.shape[1]
  212. # sample['im_info'] = [h, w, 1.0]
  213. sample['h'] = h
  214. sample['w'] = w
  215. sample['im_shape'] = np.array(im.shape[:2], dtype=np.float32)
  216. sample['scale_factor'] = np.array([1., 1.], dtype=np.float32)
  217. return sample
  218. @register_op
  219. class Permute(BaseOperator):
  220. def __init__(self):
  221. """
  222. Change the channel to be (C, H, W)
  223. """
  224. super(Permute, self).__init__()
  225. def apply(self, sample, context=None):
  226. im = sample['image']
  227. im = im.transpose((2, 0, 1))
  228. sample['image'] = im
  229. return sample
  230. @register_op
  231. class Lighting(BaseOperator):
  232. """
  233. Lighting the image by eigenvalues and eigenvectors
  234. Args:
  235. eigval (list): eigenvalues
  236. eigvec (list): eigenvectors
  237. alphastd (float): random weight of lighting, 0.1 by default
  238. """
  239. def __init__(self, eigval, eigvec, alphastd=0.1):
  240. super(Lighting, self).__init__()
  241. self.alphastd = alphastd
  242. self.eigval = np.array(eigval).astype('float32')
  243. self.eigvec = np.array(eigvec).astype('float32')
  244. def apply(self, sample, context=None):
  245. alpha = np.random.normal(scale=self.alphastd, size=(3, ))
  246. sample['image'] += np.dot(self.eigvec, self.eigval * alpha)
  247. return sample
  248. @register_op
  249. class RandomErasingImage(BaseOperator):
  250. def __init__(self, prob=0.5, lower=0.02, higher=0.4, aspect_ratio=0.3):
  251. """
  252. Random Erasing Data Augmentation, see https://arxiv.org/abs/1708.04896
  253. Args:
  254. prob (float): probability to carry out random erasing
  255. lower (float): lower limit of the erasing area ratio
  256. higher (float): upper limit of the erasing area ratio
  257. aspect_ratio (float): aspect ratio of the erasing region
  258. """
  259. super(RandomErasingImage, self).__init__()
  260. self.prob = prob
  261. self.lower = lower
  262. self.higher = higher
  263. self.aspect_ratio = aspect_ratio
  264. def apply(self, sample):
  265. gt_bbox = sample['gt_bbox']
  266. im = sample['image']
  267. if not isinstance(im, np.ndarray):
  268. raise TypeError("{}: image is not a numpy array.".format(self))
  269. if len(im.shape) != 3:
  270. raise ImageError("{}: image is not 3-dimensional.".format(self))
  271. for idx in range(gt_bbox.shape[0]):
  272. if self.prob <= np.random.rand():
  273. continue
  274. x1, y1, x2, y2 = gt_bbox[idx, :]
  275. w_bbox = x2 - x1
  276. h_bbox = y2 - y1
  277. area = w_bbox * h_bbox
  278. target_area = random.uniform(self.lower, self.higher) * area
  279. aspect_ratio = random.uniform(self.aspect_ratio,
  280. 1 / self.aspect_ratio)
  281. h = int(round(math.sqrt(target_area * aspect_ratio)))
  282. w = int(round(math.sqrt(target_area / aspect_ratio)))
  283. if w < w_bbox and h < h_bbox:
  284. off_y1 = random.randint(0, int(h_bbox - h))
  285. off_x1 = random.randint(0, int(w_bbox - w))
  286. im[int(y1 + off_y1):int(y1 + off_y1 + h), int(x1 + off_x1):int(
  287. x1 + off_x1 + w), :] = 0
  288. sample['image'] = im
  289. return sample
  290. @register_op
  291. class NormalizeImage(BaseOperator):
  292. def __init__(self,
  293. mean=[0.485, 0.456, 0.406],
  294. std=[1, 1, 1],
  295. is_scale=True):
  296. """
  297. Args:
  298. mean (list): the pixel mean
  299. std (list): the pixel variance
  300. """
  301. super(NormalizeImage, self).__init__()
  302. self.mean = mean
  303. self.std = std
  304. self.is_scale = is_scale
  305. if not (isinstance(self.mean, list) and isinstance(self.std, list) and
  306. isinstance(self.is_scale, bool)):
  307. raise TypeError("{}: input type is invalid.".format(self))
  308. from functools import reduce
  309. if reduce(lambda x, y: x * y, self.std) == 0:
  310. raise ValueError('{}: std is invalid!'.format(self))
  311. def apply(self, sample, context=None):
  312. """Normalize the image.
  313. Operators:
  314. 1.(optional) Scale the image to [0,1]
  315. 2. Each pixel minus mean and is divided by std
  316. """
  317. im = sample['image']
  318. im = im.astype(np.float32, copy=False)
  319. mean = np.array(self.mean)[np.newaxis, np.newaxis, :]
  320. std = np.array(self.std)[np.newaxis, np.newaxis, :]
  321. if self.is_scale:
  322. im = im / 255.0
  323. im -= mean
  324. im /= std
  325. sample['image'] = im
  326. return sample
  327. @register_op
  328. class GridMask(BaseOperator):
  329. def __init__(self,
  330. use_h=True,
  331. use_w=True,
  332. rotate=1,
  333. offset=False,
  334. ratio=0.5,
  335. mode=1,
  336. prob=0.7,
  337. upper_iter=360000):
  338. """
  339. GridMask Data Augmentation, see https://arxiv.org/abs/2001.04086
  340. Args:
  341. use_h (bool): whether to mask vertically
  342. use_w (boo;): whether to mask horizontally
  343. rotate (float): angle for the mask to rotate
  344. offset (float): mask offset
  345. ratio (float): mask ratio
  346. mode (int): gridmask mode
  347. prob (float): max probability to carry out gridmask
  348. upper_iter (int): suggested to be equal to global max_iter
  349. """
  350. super(GridMask, self).__init__()
  351. self.use_h = use_h
  352. self.use_w = use_w
  353. self.rotate = rotate
  354. self.offset = offset
  355. self.ratio = ratio
  356. self.mode = mode
  357. self.prob = prob
  358. self.upper_iter = upper_iter
  359. from .gridmask_utils import Gridmask
  360. self.gridmask_op = Gridmask(
  361. use_h,
  362. use_w,
  363. rotate=rotate,
  364. offset=offset,
  365. ratio=ratio,
  366. mode=mode,
  367. prob=prob,
  368. upper_iter=upper_iter)
  369. def apply(self, sample, context=None):
  370. sample['image'] = self.gridmask_op(sample['image'],
  371. sample['curr_iter'])
  372. return sample
  373. @register_op
  374. class RandomDistort(BaseOperator):
  375. """Random color distortion.
  376. Args:
  377. hue (list): hue settings. in [lower, upper, probability] format.
  378. saturation (list): saturation settings. in [lower, upper, probability] format.
  379. contrast (list): contrast settings. in [lower, upper, probability] format.
  380. brightness (list): brightness settings. in [lower, upper, probability] format.
  381. random_apply (bool): whether to apply in random (yolo) or fixed (SSD)
  382. order.
  383. count (int): the number of doing distrot
  384. random_channel (bool): whether to swap channels randomly
  385. """
  386. def __init__(self,
  387. hue=[-18, 18, 0.5],
  388. saturation=[0.5, 1.5, 0.5],
  389. contrast=[0.5, 1.5, 0.5],
  390. brightness=[0.5, 1.5, 0.5],
  391. random_apply=True,
  392. count=4,
  393. random_channel=False):
  394. super(RandomDistort, self).__init__()
  395. self.hue = hue
  396. self.saturation = saturation
  397. self.contrast = contrast
  398. self.brightness = brightness
  399. self.random_apply = random_apply
  400. self.count = count
  401. self.random_channel = random_channel
  402. def apply_hue(self, img):
  403. low, high, prob = self.hue
  404. if np.random.uniform(0., 1.) < prob:
  405. return img
  406. img = img.astype(np.float32)
  407. # it works, but result differ from HSV version
  408. delta = np.random.uniform(low, high)
  409. u = np.cos(delta * np.pi)
  410. w = np.sin(delta * np.pi)
  411. bt = np.array([[1.0, 0.0, 0.0], [0.0, u, -w], [0.0, w, u]])
  412. tyiq = np.array([[0.299, 0.587, 0.114], [0.596, -0.274, -0.321],
  413. [0.211, -0.523, 0.311]])
  414. ityiq = np.array([[1.0, 0.956, 0.621], [1.0, -0.272, -0.647],
  415. [1.0, -1.107, 1.705]])
  416. t = np.dot(np.dot(ityiq, bt), tyiq).T
  417. img = np.dot(img, t)
  418. return img
  419. def apply_saturation(self, img):
  420. low, high, prob = self.saturation
  421. if np.random.uniform(0., 1.) < prob:
  422. return img
  423. delta = np.random.uniform(low, high)
  424. img = img.astype(np.float32)
  425. # it works, but result differ from HSV version
  426. gray = img * np.array([[[0.299, 0.587, 0.114]]], dtype=np.float32)
  427. gray = gray.sum(axis=2, keepdims=True)
  428. gray *= (1.0 - delta)
  429. img *= delta
  430. img += gray
  431. return img
  432. def apply_contrast(self, img):
  433. low, high, prob = self.contrast
  434. if np.random.uniform(0., 1.) < prob:
  435. return img
  436. delta = np.random.uniform(low, high)
  437. img = img.astype(np.float32)
  438. img *= delta
  439. return img
  440. def apply_brightness(self, img):
  441. low, high, prob = self.brightness
  442. if np.random.uniform(0., 1.) < prob:
  443. return img
  444. delta = np.random.uniform(low, high)
  445. img = img.astype(np.float32)
  446. img += delta
  447. return img
  448. def apply(self, sample, context=None):
  449. img = sample['image']
  450. if self.random_apply:
  451. functions = [
  452. self.apply_brightness, self.apply_contrast,
  453. self.apply_saturation, self.apply_hue
  454. ]
  455. distortions = np.random.permutation(functions)[:self.count]
  456. for func in distortions:
  457. img = func(img)
  458. sample['image'] = img
  459. return sample
  460. img = self.apply_brightness(img)
  461. mode = np.random.randint(0, 2)
  462. if mode:
  463. img = self.apply_contrast(img)
  464. img = self.apply_saturation(img)
  465. img = self.apply_hue(img)
  466. if not mode:
  467. img = self.apply_contrast(img)
  468. if self.random_channel:
  469. if np.random.randint(0, 2):
  470. img = img[..., np.random.permutation(3)]
  471. sample['image'] = img
  472. return sample
  473. @register_op
  474. class AutoAugment(BaseOperator):
  475. def __init__(self, autoaug_type="v1"):
  476. """
  477. Args:
  478. autoaug_type (str): autoaug type, support v0, v1, v2, v3, test
  479. """
  480. super(AutoAugment, self).__init__()
  481. self.autoaug_type = autoaug_type
  482. def apply(self, sample, context=None):
  483. """
  484. Learning Data Augmentation Strategies for Object Detection, see https://arxiv.org/abs/1906.11172
  485. """
  486. im = sample['image']
  487. gt_bbox = sample['gt_bbox']
  488. if not isinstance(im, np.ndarray):
  489. raise TypeError("{}: image is not a numpy array.".format(self))
  490. if len(im.shape) != 3:
  491. raise ImageError("{}: image is not 3-dimensional.".format(self))
  492. if len(gt_bbox) == 0:
  493. return sample
  494. height, width, _ = im.shape
  495. norm_gt_bbox = np.ones_like(gt_bbox, dtype=np.float32)
  496. norm_gt_bbox[:, 0] = gt_bbox[:, 1] / float(height)
  497. norm_gt_bbox[:, 1] = gt_bbox[:, 0] / float(width)
  498. norm_gt_bbox[:, 2] = gt_bbox[:, 3] / float(height)
  499. norm_gt_bbox[:, 3] = gt_bbox[:, 2] / float(width)
  500. from .autoaugment_utils import distort_image_with_autoaugment
  501. im, norm_gt_bbox = distort_image_with_autoaugment(im, norm_gt_bbox,
  502. self.autoaug_type)
  503. gt_bbox[:, 0] = norm_gt_bbox[:, 1] * float(width)
  504. gt_bbox[:, 1] = norm_gt_bbox[:, 0] * float(height)
  505. gt_bbox[:, 2] = norm_gt_bbox[:, 3] * float(width)
  506. gt_bbox[:, 3] = norm_gt_bbox[:, 2] * float(height)
  507. sample['image'] = im
  508. sample['gt_bbox'] = gt_bbox
  509. return sample
  510. @register_op
  511. class RandomFlip(BaseOperator):
  512. def __init__(self, prob=0.5):
  513. """
  514. Args:
  515. prob (float): the probability of flipping image
  516. """
  517. super(RandomFlip, self).__init__()
  518. self.prob = prob
  519. if not (isinstance(self.prob, float)):
  520. raise TypeError("{}: input type is invalid.".format(self))
  521. def apply_segm(self, segms, height, width):
  522. def _flip_poly(poly, width):
  523. flipped_poly = np.array(poly)
  524. flipped_poly[0::2] = width - np.array(poly[0::2])
  525. return flipped_poly.tolist()
  526. def _flip_rle(rle, height, width):
  527. if 'counts' in rle and type(rle['counts']) == list:
  528. rle = mask_util.frPyObjects(rle, height, width)
  529. mask = mask_util.decode(rle)
  530. mask = mask[:, ::-1]
  531. rle = mask_util.encode(np.array(mask, order='F', dtype=np.uint8))
  532. return rle
  533. flipped_segms = []
  534. for segm in segms:
  535. if is_poly(segm):
  536. # Polygon format
  537. flipped_segms.append(
  538. [_flip_poly(poly, width) for poly in segm])
  539. else:
  540. # RLE format
  541. import pycocotools.mask as mask_util
  542. flipped_segms.append(_flip_rle(segm, height, width))
  543. return flipped_segms
  544. def apply_keypoint(self, gt_keypoint, width):
  545. for i in range(gt_keypoint.shape[1]):
  546. if i % 2 == 0:
  547. old_x = gt_keypoint[:, i].copy()
  548. gt_keypoint[:, i] = width - old_x
  549. return gt_keypoint
  550. def apply_image(self, image):
  551. return image[:, ::-1, :]
  552. def apply_bbox(self, bbox, width):
  553. oldx1 = bbox[:, 0].copy()
  554. oldx2 = bbox[:, 2].copy()
  555. bbox[:, 0] = width - oldx2
  556. bbox[:, 2] = width - oldx1
  557. return bbox
  558. def apply_rbox(self, bbox, width):
  559. oldx1 = bbox[:, 0].copy()
  560. oldx2 = bbox[:, 2].copy()
  561. oldx3 = bbox[:, 4].copy()
  562. oldx4 = bbox[:, 6].copy()
  563. bbox[:, 0] = width - oldx1
  564. bbox[:, 2] = width - oldx2
  565. bbox[:, 4] = width - oldx3
  566. bbox[:, 6] = width - oldx4
  567. bbox = [bbox_utils.get_best_begin_point_single(e) for e in bbox]
  568. return bbox
  569. def apply(self, sample, context=None):
  570. """Filp the image and bounding box.
  571. Operators:
  572. 1. Flip the image numpy.
  573. 2. Transform the bboxes' x coordinates.
  574. (Must judge whether the coordinates are normalized!)
  575. 3. Transform the segmentations' x coordinates.
  576. (Must judge whether the coordinates are normalized!)
  577. Output:
  578. sample: the image, bounding box and segmentation part
  579. in sample are flipped.
  580. """
  581. if np.random.uniform(0, 1) < self.prob:
  582. im = sample['image']
  583. height, width = im.shape[:2]
  584. im = self.apply_image(im)
  585. if 'gt_bbox' in sample and len(sample['gt_bbox']) > 0:
  586. sample['gt_bbox'] = self.apply_bbox(sample['gt_bbox'], width)
  587. if 'gt_poly' in sample and len(sample['gt_poly']) > 0:
  588. sample['gt_poly'] = self.apply_segm(sample['gt_poly'], height,
  589. width)
  590. if 'gt_keypoint' in sample and len(sample['gt_keypoint']) > 0:
  591. sample['gt_keypoint'] = self.apply_keypoint(
  592. sample['gt_keypoint'], width)
  593. if 'semantic' in sample and sample['semantic']:
  594. sample['semantic'] = sample['semantic'][:, ::-1]
  595. if 'gt_segm' in sample and sample['gt_segm'].any():
  596. sample['gt_segm'] = sample['gt_segm'][:, :, ::-1]
  597. if 'gt_rbox2poly' in sample and sample['gt_rbox2poly'].any():
  598. sample['gt_rbox2poly'] = self.apply_rbox(
  599. sample['gt_rbox2poly'], width)
  600. sample['flipped'] = True
  601. sample['image'] = im
  602. return sample
  603. @register_op
  604. class Resize(BaseOperator):
  605. def __init__(self, target_size, keep_ratio, interp=cv2.INTER_LINEAR):
  606. """
  607. Resize image to target size. if keep_ratio is True,
  608. resize the image's long side to the maximum of target_size
  609. if keep_ratio is False, resize the image to target size(h, w)
  610. Args:
  611. target_size (int|list): image target size
  612. keep_ratio (bool): whether keep_ratio or not, default true
  613. interp (int): the interpolation method
  614. """
  615. super(Resize, self).__init__()
  616. self.keep_ratio = keep_ratio
  617. self.interp = interp
  618. if not isinstance(target_size, (Integral, Sequence)):
  619. raise TypeError(
  620. "Type of target_size is invalid. Must be Integer or List or Tuple, now is {}".
  621. format(type(target_size)))
  622. if isinstance(target_size, Integral):
  623. target_size = [target_size, target_size]
  624. self.target_size = target_size
  625. def apply_image(self, image, scale):
  626. im_scale_x, im_scale_y = scale
  627. return cv2.resize(
  628. image,
  629. None,
  630. None,
  631. fx=im_scale_x,
  632. fy=im_scale_y,
  633. interpolation=self.interp)
  634. def apply_bbox(self, bbox, scale, size):
  635. im_scale_x, im_scale_y = scale
  636. resize_w, resize_h = size
  637. bbox[:, 0::2] *= im_scale_x
  638. bbox[:, 1::2] *= im_scale_y
  639. bbox[:, 0::2] = np.clip(bbox[:, 0::2], 0, resize_w)
  640. bbox[:, 1::2] = np.clip(bbox[:, 1::2], 0, resize_h)
  641. return bbox
  642. def apply_segm(self, segms, im_size, scale):
  643. def _resize_poly(poly, im_scale_x, im_scale_y):
  644. resized_poly = np.array(poly).astype('float32')
  645. resized_poly[0::2] *= im_scale_x
  646. resized_poly[1::2] *= im_scale_y
  647. return resized_poly.tolist()
  648. def _resize_rle(rle, im_h, im_w, im_scale_x, im_scale_y):
  649. if 'counts' in rle and type(rle['counts']) == list:
  650. rle = mask_util.frPyObjects(rle, im_h, im_w)
  651. mask = mask_util.decode(rle)
  652. mask = cv2.resize(
  653. mask,
  654. None,
  655. None,
  656. fx=im_scale_x,
  657. fy=im_scale_y,
  658. interpolation=self.interp)
  659. rle = mask_util.encode(np.array(mask, order='F', dtype=np.uint8))
  660. return rle
  661. im_h, im_w = im_size
  662. im_scale_x, im_scale_y = scale
  663. resized_segms = []
  664. for segm in segms:
  665. if is_poly(segm):
  666. # Polygon format
  667. resized_segms.append([
  668. _resize_poly(poly, im_scale_x, im_scale_y) for poly in segm
  669. ])
  670. else:
  671. # RLE format
  672. import pycocotools.mask as mask_util
  673. resized_segms.append(
  674. _resize_rle(segm, im_h, im_w, im_scale_x, im_scale_y))
  675. return resized_segms
  676. def apply(self, sample, context=None):
  677. """ Resize the image numpy.
  678. """
  679. im = sample['image']
  680. if not isinstance(im, np.ndarray):
  681. raise TypeError("{}: image type is not numpy.".format(self))
  682. if len(im.shape) != 3:
  683. raise ImageError('{}: image is not 3-dimensional.'.format(self))
  684. # apply image
  685. im_shape = im.shape
  686. if self.keep_ratio:
  687. im_size_min = np.min(im_shape[0:2])
  688. im_size_max = np.max(im_shape[0:2])
  689. target_size_min = np.min(self.target_size)
  690. target_size_max = np.max(self.target_size)
  691. im_scale = min(target_size_min / im_size_min,
  692. target_size_max / im_size_max)
  693. resize_h = im_scale * float(im_shape[0])
  694. resize_w = im_scale * float(im_shape[1])
  695. im_scale_x = im_scale
  696. im_scale_y = im_scale
  697. else:
  698. resize_h, resize_w = self.target_size
  699. im_scale_y = resize_h / im_shape[0]
  700. im_scale_x = resize_w / im_shape[1]
  701. im = self.apply_image(sample['image'], [im_scale_x, im_scale_y])
  702. sample['image'] = im.astype(np.float32)
  703. sample['im_shape'] = np.asarray([resize_h, resize_w], dtype=np.float32)
  704. if 'scale_factor' in sample:
  705. scale_factor = sample['scale_factor']
  706. sample['scale_factor'] = np.asarray(
  707. [scale_factor[0] * im_scale_y, scale_factor[1] * im_scale_x],
  708. dtype=np.float32)
  709. else:
  710. sample['scale_factor'] = np.asarray(
  711. [im_scale_y, im_scale_x], dtype=np.float32)
  712. # apply bbox
  713. if 'gt_bbox' in sample and len(sample['gt_bbox']) > 0:
  714. sample['gt_bbox'] = self.apply_bbox(sample['gt_bbox'],
  715. [im_scale_x, im_scale_y],
  716. [resize_w, resize_h])
  717. # apply rbox
  718. if 'gt_rbox2poly' in sample:
  719. if np.array(sample['gt_rbox2poly']).shape[1] != 8:
  720. logger.warning(
  721. "gt_rbox2poly's length shoule be 8, but actually is {}".
  722. format(len(sample['gt_rbox2poly'])))
  723. sample['gt_rbox2poly'] = self.apply_bbox(sample['gt_rbox2poly'],
  724. [im_scale_x, im_scale_y],
  725. [resize_w, resize_h])
  726. # apply polygon
  727. if 'gt_poly' in sample and len(sample['gt_poly']) > 0:
  728. sample['gt_poly'] = self.apply_segm(
  729. sample['gt_poly'], im_shape[:2], [im_scale_x, im_scale_y])
  730. # apply semantic
  731. if 'semantic' in sample and sample['semantic']:
  732. semantic = sample['semantic']
  733. semantic = cv2.resize(
  734. semantic.astype('float32'),
  735. None,
  736. None,
  737. fx=im_scale_x,
  738. fy=im_scale_y,
  739. interpolation=self.interp)
  740. semantic = np.asarray(semantic).astype('int32')
  741. semantic = np.expand_dims(semantic, 0)
  742. sample['semantic'] = semantic
  743. # apply gt_segm
  744. if 'gt_segm' in sample and len(sample['gt_segm']) > 0:
  745. masks = [
  746. cv2.resize(
  747. gt_segm,
  748. None,
  749. None,
  750. fx=im_scale_x,
  751. fy=im_scale_y,
  752. interpolation=cv2.INTER_NEAREST)
  753. for gt_segm in sample['gt_segm']
  754. ]
  755. sample['gt_segm'] = np.asarray(masks).astype(np.uint8)
  756. return sample
  757. @register_op
  758. class MultiscaleTestResize(BaseOperator):
  759. def __init__(self,
  760. origin_target_size=[800, 1333],
  761. target_size=[],
  762. interp=cv2.INTER_LINEAR,
  763. use_flip=True):
  764. """
  765. Rescale image to the each size in target size, and capped at max_size.
  766. Args:
  767. origin_target_size (list): origin target size of image
  768. target_size (list): A list of target sizes of image.
  769. interp (int): the interpolation method.
  770. use_flip (bool): whether use flip augmentation.
  771. """
  772. super(MultiscaleTestResize, self).__init__()
  773. self.interp = interp
  774. self.use_flip = use_flip
  775. if not isinstance(target_size, Sequence):
  776. raise TypeError(
  777. "Type of target_size is invalid. Must be List or Tuple, now is {}".
  778. format(type(target_size)))
  779. self.target_size = target_size
  780. if not isinstance(origin_target_size, Sequence):
  781. raise TypeError(
  782. "Type of origin_target_size is invalid. Must be List or Tuple, now is {}".
  783. format(type(origin_target_size)))
  784. self.origin_target_size = origin_target_size
  785. def apply(self, sample, context=None):
  786. """ Resize the image numpy for multi-scale test.
  787. """
  788. samples = []
  789. resizer = Resize(
  790. self.origin_target_size, keep_ratio=True, interp=self.interp)
  791. samples.append(resizer(sample.copy(), context))
  792. if self.use_flip:
  793. flipper = RandomFlip(1.1)
  794. samples.append(flipper(sample.copy(), context=context))
  795. for size in self.target_size:
  796. resizer = Resize(size, keep_ratio=True, interp=self.interp)
  797. samples.append(resizer(sample.copy(), context))
  798. return samples
  799. @register_op
  800. class RandomResize(BaseOperator):
  801. def __init__(self,
  802. target_size,
  803. keep_ratio=True,
  804. interp=cv2.INTER_LINEAR,
  805. random_size=True,
  806. random_interp=False):
  807. """
  808. Resize image to target size randomly. random target_size and interpolation method
  809. Args:
  810. target_size (int, list, tuple): image target size, if random size is True, must be list or tuple
  811. keep_ratio (bool): whether keep_raio or not, default true
  812. interp (int): the interpolation method
  813. random_size (bool): whether random select target size of image
  814. random_interp (bool): whether random select interpolation method
  815. """
  816. super(RandomResize, self).__init__()
  817. self.keep_ratio = keep_ratio
  818. self.interp = interp
  819. self.interps = [
  820. cv2.INTER_NEAREST,
  821. cv2.INTER_LINEAR,
  822. cv2.INTER_AREA,
  823. cv2.INTER_CUBIC,
  824. cv2.INTER_LANCZOS4,
  825. ]
  826. assert isinstance(target_size, (
  827. Integral, Sequence)), "target_size must be Integer, List or Tuple"
  828. if random_size and not isinstance(target_size, Sequence):
  829. raise TypeError(
  830. "Type of target_size is invalid when random_size is True. Must be List or Tuple, now is {}".
  831. format(type(target_size)))
  832. self.target_size = target_size
  833. self.random_size = random_size
  834. self.random_interp = random_interp
  835. def apply(self, sample, context=None):
  836. """ Resize the image numpy.
  837. """
  838. if self.random_size:
  839. target_size = random.choice(self.target_size)
  840. else:
  841. target_size = self.target_size
  842. if self.random_interp:
  843. interp = random.choice(self.interps)
  844. else:
  845. interp = self.interp
  846. resizer = Resize(target_size, self.keep_ratio, interp)
  847. return resizer(sample, context=context)
  848. @register_op
  849. class RandomExpand(BaseOperator):
  850. """Random expand the canvas.
  851. Args:
  852. ratio (float): maximum expansion ratio.
  853. prob (float): probability to expand.
  854. fill_value (list): color value used to fill the canvas. in RGB order.
  855. """
  856. def __init__(self, ratio=4., prob=0.5, fill_value=(127.5, 127.5, 127.5)):
  857. super(RandomExpand, self).__init__()
  858. assert ratio > 1.01, "expand ratio must be larger than 1.01"
  859. self.ratio = ratio
  860. self.prob = prob
  861. assert isinstance(fill_value, (Number, Sequence)), \
  862. "fill value must be either float or sequence"
  863. if isinstance(fill_value, Number):
  864. fill_value = (fill_value, ) * 3
  865. if not isinstance(fill_value, tuple):
  866. fill_value = tuple(fill_value)
  867. self.fill_value = fill_value
  868. def apply(self, sample, context=None):
  869. if np.random.uniform(0., 1.) < self.prob:
  870. return sample
  871. im = sample['image']
  872. height, width = im.shape[:2]
  873. ratio = np.random.uniform(1., self.ratio)
  874. h = int(height * ratio)
  875. w = int(width * ratio)
  876. if not h > height or not w > width:
  877. return sample
  878. y = np.random.randint(0, h - height)
  879. x = np.random.randint(0, w - width)
  880. offsets, size = [x, y], [h, w]
  881. pad = Pad(size,
  882. pad_mode=-1,
  883. offsets=offsets,
  884. fill_value=self.fill_value)
  885. return pad(sample, context=context)
  886. @register_op
  887. class CropWithSampling(BaseOperator):
  888. def __init__(self, batch_sampler, satisfy_all=False, avoid_no_bbox=True):
  889. """
  890. Args:
  891. batch_sampler (list): Multiple sets of different
  892. parameters for cropping.
  893. satisfy_all (bool): whether all boxes must satisfy.
  894. e.g.[[1, 1, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0],
  895. [1, 50, 0.3, 1.0, 0.5, 2.0, 0.1, 1.0],
  896. [1, 50, 0.3, 1.0, 0.5, 2.0, 0.3, 1.0],
  897. [1, 50, 0.3, 1.0, 0.5, 2.0, 0.5, 1.0],
  898. [1, 50, 0.3, 1.0, 0.5, 2.0, 0.7, 1.0],
  899. [1, 50, 0.3, 1.0, 0.5, 2.0, 0.9, 1.0],
  900. [1, 50, 0.3, 1.0, 0.5, 2.0, 0.0, 1.0]]
  901. [max sample, max trial, min scale, max scale,
  902. min aspect ratio, max aspect ratio,
  903. min overlap, max overlap]
  904. avoid_no_bbox (bool): whether to avoid the
  905. situation where the box does not appear.
  906. """
  907. super(CropWithSampling, self).__init__()
  908. self.batch_sampler = batch_sampler
  909. self.satisfy_all = satisfy_all
  910. self.avoid_no_bbox = avoid_no_bbox
  911. def apply(self, sample, context):
  912. """
  913. Crop the image and modify bounding box.
  914. Operators:
  915. 1. Scale the image width and height.
  916. 2. Crop the image according to a radom sample.
  917. 3. Rescale the bounding box.
  918. 4. Determine if the new bbox is satisfied in the new image.
  919. Returns:
  920. sample: the image, bounding box are replaced.
  921. """
  922. assert 'image' in sample, "image data not found"
  923. im = sample['image']
  924. gt_bbox = sample['gt_bbox']
  925. gt_class = sample['gt_class']
  926. im_height, im_width = im.shape[:2]
  927. gt_score = None
  928. if 'gt_score' in sample:
  929. gt_score = sample['gt_score']
  930. sampled_bbox = []
  931. gt_bbox = gt_bbox.tolist()
  932. for sampler in self.batch_sampler:
  933. found = 0
  934. for i in range(sampler[1]):
  935. if found >= sampler[0]:
  936. break
  937. sample_bbox = generate_sample_bbox(sampler)
  938. if satisfy_sample_constraint(sampler, sample_bbox, gt_bbox,
  939. self.satisfy_all):
  940. sampled_bbox.append(sample_bbox)
  941. found = found + 1
  942. im = np.array(im)
  943. while sampled_bbox:
  944. idx = int(np.random.uniform(0, len(sampled_bbox)))
  945. sample_bbox = sampled_bbox.pop(idx)
  946. sample_bbox = clip_bbox(sample_bbox)
  947. crop_bbox, crop_class, crop_score = \
  948. filter_and_process(sample_bbox, gt_bbox, gt_class, scores=gt_score)
  949. if self.avoid_no_bbox:
  950. if len(crop_bbox) < 1:
  951. continue
  952. xmin = int(sample_bbox[0] * im_width)
  953. xmax = int(sample_bbox[2] * im_width)
  954. ymin = int(sample_bbox[1] * im_height)
  955. ymax = int(sample_bbox[3] * im_height)
  956. im = im[ymin:ymax, xmin:xmax]
  957. sample['image'] = im
  958. sample['gt_bbox'] = crop_bbox
  959. sample['gt_class'] = crop_class
  960. sample['gt_score'] = crop_score
  961. return sample
  962. return sample
  963. @register_op
  964. class CropWithDataAchorSampling(BaseOperator):
  965. def __init__(self,
  966. batch_sampler,
  967. anchor_sampler=None,
  968. target_size=None,
  969. das_anchor_scales=[16, 32, 64, 128],
  970. sampling_prob=0.5,
  971. min_size=8.,
  972. avoid_no_bbox=True):
  973. """
  974. Args:
  975. anchor_sampler (list): anchor_sampling sets of different
  976. parameters for cropping.
  977. batch_sampler (list): Multiple sets of different
  978. parameters for cropping.
  979. e.g.[[1, 10, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.2, 0.0]]
  980. [[1, 50, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0],
  981. [1, 50, 0.3, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0],
  982. [1, 50, 0.3, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0],
  983. [1, 50, 0.3, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0],
  984. [1, 50, 0.3, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0]]
  985. [max sample, max trial, min scale, max scale,
  986. min aspect ratio, max aspect ratio,
  987. min overlap, max overlap, min coverage, max coverage]
  988. target_size (int): target image size.
  989. das_anchor_scales (list[float]): a list of anchor scales in data
  990. anchor smapling.
  991. min_size (float): minimum size of sampled bbox.
  992. avoid_no_bbox (bool): whether to avoid the
  993. situation where the box does not appear.
  994. """
  995. super(CropWithDataAchorSampling, self).__init__()
  996. self.anchor_sampler = anchor_sampler
  997. self.batch_sampler = batch_sampler
  998. self.target_size = target_size
  999. self.sampling_prob = sampling_prob
  1000. self.min_size = min_size
  1001. self.avoid_no_bbox = avoid_no_bbox
  1002. self.das_anchor_scales = np.array(das_anchor_scales)
  1003. def apply(self, sample, context):
  1004. """
  1005. Crop the image and modify bounding box.
  1006. Operators:
  1007. 1. Scale the image width and height.
  1008. 2. Crop the image according to a radom sample.
  1009. 3. Rescale the bounding box.
  1010. 4. Determine if the new bbox is satisfied in the new image.
  1011. Returns:
  1012. sample: the image, bounding box are replaced.
  1013. """
  1014. assert 'image' in sample, "image data not found"
  1015. im = sample['image']
  1016. gt_bbox = sample['gt_bbox']
  1017. gt_class = sample['gt_class']
  1018. image_height, image_width = im.shape[:2]
  1019. gt_bbox[:, 0] /= image_width
  1020. gt_bbox[:, 1] /= image_height
  1021. gt_bbox[:, 2] /= image_width
  1022. gt_bbox[:, 3] /= image_height
  1023. gt_score = None
  1024. if 'gt_score' in sample:
  1025. gt_score = sample['gt_score']
  1026. sampled_bbox = []
  1027. gt_bbox = gt_bbox.tolist()
  1028. prob = np.random.uniform(0., 1.)
  1029. if prob > self.sampling_prob: # anchor sampling
  1030. assert self.anchor_sampler
  1031. for sampler in self.anchor_sampler:
  1032. found = 0
  1033. for i in range(sampler[1]):
  1034. if found >= sampler[0]:
  1035. break
  1036. sample_bbox = data_anchor_sampling(
  1037. gt_bbox, image_width, image_height,
  1038. self.das_anchor_scales, self.target_size)
  1039. if sample_bbox == 0:
  1040. break
  1041. if satisfy_sample_constraint_coverage(sampler, sample_bbox,
  1042. gt_bbox):
  1043. sampled_bbox.append(sample_bbox)
  1044. found = found + 1
  1045. im = np.array(im)
  1046. while sampled_bbox:
  1047. idx = int(np.random.uniform(0, len(sampled_bbox)))
  1048. sample_bbox = sampled_bbox.pop(idx)
  1049. if 'gt_keypoint' in sample.keys():
  1050. keypoints = (sample['gt_keypoint'],
  1051. sample['keypoint_ignore'])
  1052. crop_bbox, crop_class, crop_score, gt_keypoints = \
  1053. filter_and_process(sample_bbox, gt_bbox, gt_class,
  1054. scores=gt_score,
  1055. keypoints=keypoints)
  1056. else:
  1057. crop_bbox, crop_class, crop_score = filter_and_process(
  1058. sample_bbox, gt_bbox, gt_class, scores=gt_score)
  1059. crop_bbox, crop_class, crop_score = bbox_area_sampling(
  1060. crop_bbox, crop_class, crop_score, self.target_size,
  1061. self.min_size)
  1062. if self.avoid_no_bbox:
  1063. if len(crop_bbox) < 1:
  1064. continue
  1065. im = crop_image_sampling(im, sample_bbox, image_width,
  1066. image_height, self.target_size)
  1067. height, width = im.shape[:2]
  1068. crop_bbox[:, 0] *= width
  1069. crop_bbox[:, 1] *= height
  1070. crop_bbox[:, 2] *= width
  1071. crop_bbox[:, 3] *= height
  1072. sample['image'] = im
  1073. sample['gt_bbox'] = crop_bbox
  1074. sample['gt_class'] = crop_class
  1075. if 'gt_score' in sample:
  1076. sample['gt_score'] = crop_score
  1077. if 'gt_keypoint' in sample.keys():
  1078. sample['gt_keypoint'] = gt_keypoints[0]
  1079. sample['keypoint_ignore'] = gt_keypoints[1]
  1080. return sample
  1081. return sample
  1082. else:
  1083. for sampler in self.batch_sampler:
  1084. found = 0
  1085. for i in range(sampler[1]):
  1086. if found >= sampler[0]:
  1087. break
  1088. sample_bbox = generate_sample_bbox_square(
  1089. sampler, image_width, image_height)
  1090. if satisfy_sample_constraint_coverage(sampler, sample_bbox,
  1091. gt_bbox):
  1092. sampled_bbox.append(sample_bbox)
  1093. found = found + 1
  1094. im = np.array(im)
  1095. while sampled_bbox:
  1096. idx = int(np.random.uniform(0, len(sampled_bbox)))
  1097. sample_bbox = sampled_bbox.pop(idx)
  1098. sample_bbox = clip_bbox(sample_bbox)
  1099. if 'gt_keypoint' in sample.keys():
  1100. keypoints = (sample['gt_keypoint'],
  1101. sample['keypoint_ignore'])
  1102. crop_bbox, crop_class, crop_score, gt_keypoints = \
  1103. filter_and_process(sample_bbox, gt_bbox, gt_class,
  1104. scores=gt_score,
  1105. keypoints=keypoints)
  1106. else:
  1107. crop_bbox, crop_class, crop_score = filter_and_process(
  1108. sample_bbox, gt_bbox, gt_class, scores=gt_score)
  1109. # sampling bbox according the bbox area
  1110. crop_bbox, crop_class, crop_score = bbox_area_sampling(
  1111. crop_bbox, crop_class, crop_score, self.target_size,
  1112. self.min_size)
  1113. if self.avoid_no_bbox:
  1114. if len(crop_bbox) < 1:
  1115. continue
  1116. xmin = int(sample_bbox[0] * image_width)
  1117. xmax = int(sample_bbox[2] * image_width)
  1118. ymin = int(sample_bbox[1] * image_height)
  1119. ymax = int(sample_bbox[3] * image_height)
  1120. im = im[ymin:ymax, xmin:xmax]
  1121. height, width = im.shape[:2]
  1122. crop_bbox[:, 0] *= width
  1123. crop_bbox[:, 1] *= height
  1124. crop_bbox[:, 2] *= width
  1125. crop_bbox[:, 3] *= height
  1126. sample['image'] = im
  1127. sample['gt_bbox'] = crop_bbox
  1128. sample['gt_class'] = crop_class
  1129. if 'gt_score' in sample:
  1130. sample['gt_score'] = crop_score
  1131. if 'gt_keypoint' in sample.keys():
  1132. sample['gt_keypoint'] = gt_keypoints[0]
  1133. sample['keypoint_ignore'] = gt_keypoints[1]
  1134. return sample
  1135. return sample
  1136. @register_op
  1137. class RandomCrop(BaseOperator):
  1138. """Random crop image and bboxes.
  1139. Args:
  1140. aspect_ratio (list): aspect ratio of cropped region.
  1141. in [min, max] format.
  1142. thresholds (list): iou thresholds for decide a valid bbox crop.
  1143. scaling (list): ratio between a cropped region and the original image.
  1144. in [min, max] format.
  1145. num_attempts (int): number of tries before giving up.
  1146. allow_no_crop (bool): allow return without actually cropping them.
  1147. cover_all_box (bool): ensure all bboxes are covered in the final crop.
  1148. is_mask_crop(bool): whether crop the segmentation.
  1149. """
  1150. def __init__(self,
  1151. aspect_ratio=[.5, 2.],
  1152. thresholds=[.0, .1, .3, .5, .7, .9],
  1153. scaling=[.3, 1.],
  1154. num_attempts=50,
  1155. allow_no_crop=True,
  1156. cover_all_box=False,
  1157. is_mask_crop=False):
  1158. super(RandomCrop, self).__init__()
  1159. self.aspect_ratio = aspect_ratio
  1160. self.thresholds = thresholds
  1161. self.scaling = scaling
  1162. self.num_attempts = num_attempts
  1163. self.allow_no_crop = allow_no_crop
  1164. self.cover_all_box = cover_all_box
  1165. self.is_mask_crop = is_mask_crop
  1166. def crop_segms(self, segms, valid_ids, crop, height, width):
  1167. def _crop_poly(segm, crop):
  1168. xmin, ymin, xmax, ymax = crop
  1169. crop_coord = [xmin, ymin, xmin, ymax, xmax, ymax, xmax, ymin]
  1170. crop_p = np.array(crop_coord).reshape(4, 2)
  1171. crop_p = Polygon(crop_p)
  1172. crop_segm = list()
  1173. for poly in segm:
  1174. poly = np.array(poly).reshape(len(poly) // 2, 2)
  1175. polygon = Polygon(poly)
  1176. if not polygon.is_valid:
  1177. exterior = polygon.exterior
  1178. multi_lines = exterior.intersection(exterior)
  1179. polygons = shapely.ops.polygonize(multi_lines)
  1180. polygon = MultiPolygon(polygons)
  1181. multi_polygon = list()
  1182. if isinstance(polygon, MultiPolygon):
  1183. multi_polygon = copy.deepcopy(polygon)
  1184. else:
  1185. multi_polygon.append(copy.deepcopy(polygon))
  1186. for per_polygon in multi_polygon:
  1187. inter = per_polygon.intersection(crop_p)
  1188. if not inter:
  1189. continue
  1190. if isinstance(inter, (MultiPolygon, GeometryCollection)):
  1191. for part in inter:
  1192. if not isinstance(part, Polygon):
  1193. continue
  1194. part = np.squeeze(
  1195. np.array(part.exterior.coords[:-1]).reshape(
  1196. 1, -1))
  1197. part[0::2] -= xmin
  1198. part[1::2] -= ymin
  1199. crop_segm.append(part.tolist())
  1200. elif isinstance(inter, Polygon):
  1201. crop_poly = np.squeeze(
  1202. np.array(inter.exterior.coords[:-1]).reshape(1,
  1203. -1))
  1204. crop_poly[0::2] -= xmin
  1205. crop_poly[1::2] -= ymin
  1206. crop_segm.append(crop_poly.tolist())
  1207. else:
  1208. continue
  1209. return crop_segm
  1210. def _crop_rle(rle, crop, height, width):
  1211. if 'counts' in rle and type(rle['counts']) == list:
  1212. rle = mask_util.frPyObjects(rle, height, width)
  1213. mask = mask_util.decode(rle)
  1214. mask = mask[crop[1]:crop[3], crop[0]:crop[2]]
  1215. rle = mask_util.encode(np.array(mask, order='F', dtype=np.uint8))
  1216. return rle
  1217. crop_segms = []
  1218. for id in valid_ids:
  1219. segm = segms[id]
  1220. if is_poly(segm):
  1221. import copy
  1222. import shapely.ops
  1223. from shapely.geometry import Polygon, MultiPolygon, GeometryCollection
  1224. logging.getLogger("shapely").setLevel(logging.WARNING)
  1225. # Polygon format
  1226. crop_segms.append(_crop_poly(segm, crop))
  1227. else:
  1228. # RLE format
  1229. import pycocotools.mask as mask_util
  1230. crop_segms.append(_crop_rle(segm, crop, height, width))
  1231. return crop_segms
  1232. def apply(self, sample, context=None):
  1233. if 'gt_bbox' in sample and len(sample['gt_bbox']) == 0:
  1234. return sample
  1235. h, w = sample['image'].shape[:2]
  1236. gt_bbox = sample['gt_bbox']
  1237. # NOTE Original method attempts to generate one candidate for each
  1238. # threshold then randomly sample one from the resulting list.
  1239. # Here a short circuit approach is taken, i.e., randomly choose a
  1240. # threshold and attempt to find a valid crop, and simply return the
  1241. # first one found.
  1242. # The probability is not exactly the same, kinda resembling the
  1243. # "Monty Hall" problem. Actually carrying out the attempts will affect
  1244. # observability (just like opening doors in the "Monty Hall" game).
  1245. thresholds = list(self.thresholds)
  1246. if self.allow_no_crop:
  1247. thresholds.append('no_crop')
  1248. np.random.shuffle(thresholds)
  1249. for thresh in thresholds:
  1250. if thresh == 'no_crop':
  1251. return sample
  1252. found = False
  1253. for i in range(self.num_attempts):
  1254. scale = np.random.uniform(*self.scaling)
  1255. if self.aspect_ratio is not None:
  1256. min_ar, max_ar = self.aspect_ratio
  1257. aspect_ratio = np.random.uniform(
  1258. max(min_ar, scale**2), min(max_ar, scale**-2))
  1259. h_scale = scale / np.sqrt(aspect_ratio)
  1260. w_scale = scale * np.sqrt(aspect_ratio)
  1261. else:
  1262. h_scale = np.random.uniform(*self.scaling)
  1263. w_scale = np.random.uniform(*self.scaling)
  1264. crop_h = h * h_scale
  1265. crop_w = w * w_scale
  1266. if self.aspect_ratio is None:
  1267. if crop_h / crop_w < 0.5 or crop_h / crop_w > 2.0:
  1268. continue
  1269. crop_h = int(crop_h)
  1270. crop_w = int(crop_w)
  1271. crop_y = np.random.randint(0, h - crop_h)
  1272. crop_x = np.random.randint(0, w - crop_w)
  1273. crop_box = [crop_x, crop_y, crop_x + crop_w, crop_y + crop_h]
  1274. iou = self._iou_matrix(
  1275. gt_bbox, np.array(
  1276. [crop_box], dtype=np.float32))
  1277. if iou.max() < thresh:
  1278. continue
  1279. if self.cover_all_box and iou.min() < thresh:
  1280. continue
  1281. cropped_box, valid_ids = self._crop_box_with_center_constraint(
  1282. gt_bbox, np.array(
  1283. crop_box, dtype=np.float32))
  1284. if valid_ids.size > 0:
  1285. found = True
  1286. break
  1287. if found:
  1288. if self.is_mask_crop and 'gt_poly' in sample and len(sample[
  1289. 'gt_poly']) > 0:
  1290. crop_polys = self.crop_segms(
  1291. sample['gt_poly'],
  1292. valid_ids,
  1293. np.array(
  1294. crop_box, dtype=np.int64),
  1295. h,
  1296. w)
  1297. if [] in crop_polys:
  1298. delete_id = list()
  1299. valid_polys = list()
  1300. for id, crop_poly in enumerate(crop_polys):
  1301. if crop_poly == []:
  1302. delete_id.append(id)
  1303. else:
  1304. valid_polys.append(crop_poly)
  1305. valid_ids = np.delete(valid_ids, delete_id)
  1306. if len(valid_polys) == 0:
  1307. return sample
  1308. sample['gt_poly'] = valid_polys
  1309. else:
  1310. sample['gt_poly'] = crop_polys
  1311. if 'gt_segm' in sample:
  1312. sample['gt_segm'] = self._crop_segm(sample['gt_segm'],
  1313. crop_box)
  1314. sample['gt_segm'] = np.take(
  1315. sample['gt_segm'], valid_ids, axis=0)
  1316. sample['image'] = self._crop_image(sample['image'], crop_box)
  1317. sample['gt_bbox'] = np.take(cropped_box, valid_ids, axis=0)
  1318. sample['gt_class'] = np.take(
  1319. sample['gt_class'], valid_ids, axis=0)
  1320. if 'gt_score' in sample:
  1321. sample['gt_score'] = np.take(
  1322. sample['gt_score'], valid_ids, axis=0)
  1323. if 'is_crowd' in sample:
  1324. sample['is_crowd'] = np.take(
  1325. sample['is_crowd'], valid_ids, axis=0)
  1326. if 'difficult' in sample:
  1327. sample['difficult'] = np.take(
  1328. sample['difficult'], valid_ids, axis=0)
  1329. return sample
  1330. return sample
  1331. def _iou_matrix(self, a, b):
  1332. tl_i = np.maximum(a[:, np.newaxis, :2], b[:, :2])
  1333. br_i = np.minimum(a[:, np.newaxis, 2:], b[:, 2:])
  1334. area_i = np.prod(br_i - tl_i, axis=2) * (tl_i < br_i).all(axis=2)
  1335. area_a = np.prod(a[:, 2:] - a[:, :2], axis=1)
  1336. area_b = np.prod(b[:, 2:] - b[:, :2], axis=1)
  1337. area_o = (area_a[:, np.newaxis] + area_b - area_i)
  1338. return area_i / (area_o + 1e-10)
  1339. def _crop_box_with_center_constraint(self, box, crop):
  1340. cropped_box = box.copy()
  1341. cropped_box[:, :2] = np.maximum(box[:, :2], crop[:2])
  1342. cropped_box[:, 2:] = np.minimum(box[:, 2:], crop[2:])
  1343. cropped_box[:, :2] -= crop[:2]
  1344. cropped_box[:, 2:] -= crop[:2]
  1345. centers = (box[:, :2] + box[:, 2:]) / 2
  1346. valid = np.logical_and(crop[:2] <= centers,
  1347. centers < crop[2:]).all(axis=1)
  1348. valid = np.logical_and(
  1349. valid, (cropped_box[:, :2] < cropped_box[:, 2:]).all(axis=1))
  1350. return cropped_box, np.where(valid)[0]
  1351. def _crop_image(self, img, crop):
  1352. x1, y1, x2, y2 = crop
  1353. return img[y1:y2, x1:x2, :]
  1354. def _crop_segm(self, segm, crop):
  1355. x1, y1, x2, y2 = crop
  1356. return segm[:, y1:y2, x1:x2]
  1357. @register_op
  1358. class RandomScaledCrop(BaseOperator):
  1359. """Resize image and bbox based on long side (with optional random scaling),
  1360. then crop or pad image to target size.
  1361. Args:
  1362. target_dim (int): target size.
  1363. scale_range (list): random scale range.
  1364. interp (int): interpolation method, default to `cv2.INTER_LINEAR`.
  1365. """
  1366. def __init__(self,
  1367. target_dim=512,
  1368. scale_range=[.1, 2.],
  1369. interp=cv2.INTER_LINEAR):
  1370. super(RandomScaledCrop, self).__init__()
  1371. self.target_dim = target_dim
  1372. self.scale_range = scale_range
  1373. self.interp = interp
  1374. def apply(self, sample, context=None):
  1375. img = sample['image']
  1376. h, w = img.shape[:2]
  1377. random_scale = np.random.uniform(*self.scale_range)
  1378. dim = self.target_dim
  1379. random_dim = int(dim * random_scale)
  1380. dim_max = max(h, w)
  1381. scale = random_dim / dim_max
  1382. resize_w = w * scale
  1383. resize_h = h * scale
  1384. offset_x = int(max(0, np.random.uniform(0., resize_w - dim)))
  1385. offset_y = int(max(0, np.random.uniform(0., resize_h - dim)))
  1386. img = cv2.resize(img, (resize_w, resize_h), interpolation=self.interp)
  1387. img = np.array(img)
  1388. canvas = np.zeros((dim, dim, 3), dtype=img.dtype)
  1389. canvas[:min(dim, resize_h), :min(dim, resize_w), :] = img[
  1390. offset_y:offset_y + dim, offset_x:offset_x + dim, :]
  1391. sample['image'] = canvas
  1392. sample['im_shape'] = np.asarray([resize_h, resize_w], dtype=np.float32)
  1393. scale_factor = sample['sacle_factor']
  1394. sample['scale_factor'] = np.asarray(
  1395. [scale_factor[0] * scale, scale_factor[1] * scale],
  1396. dtype=np.float32)
  1397. if 'gt_bbox' in sample and len(sample['gt_bbox']) > 0:
  1398. scale_array = np.array([scale, scale] * 2, dtype=np.float32)
  1399. shift_array = np.array([offset_x, offset_y] * 2, dtype=np.float32)
  1400. boxes = sample['gt_bbox'] * scale_array - shift_array
  1401. boxes = np.clip(boxes, 0, dim - 1)
  1402. # filter boxes with no area
  1403. area = np.prod(boxes[..., 2:] - boxes[..., :2], axis=1)
  1404. valid = (area > 1.).nonzero()[0]
  1405. sample['gt_bbox'] = boxes[valid]
  1406. sample['gt_class'] = sample['gt_class'][valid]
  1407. return sample
  1408. @register_op
  1409. class Cutmix(BaseOperator):
  1410. def __init__(self, alpha=1.5, beta=1.5):
  1411. """
  1412. CutMix: Regularization Strategy to Train Strong Classifiers with Localizable Features, see https://arxiv.org/abs/1905.04899
  1413. Cutmix image and gt_bbbox/gt_score
  1414. Args:
  1415. alpha (float): alpha parameter of beta distribute
  1416. beta (float): beta parameter of beta distribute
  1417. """
  1418. super(Cutmix, self).__init__()
  1419. self.alpha = alpha
  1420. self.beta = beta
  1421. if self.alpha <= 0.0:
  1422. raise ValueError("alpha shold be positive in {}".format(self))
  1423. if self.beta <= 0.0:
  1424. raise ValueError("beta shold be positive in {}".format(self))
  1425. def apply_image(self, img1, img2, factor):
  1426. """ _rand_bbox """
  1427. h = max(img1.shape[0], img2.shape[0])
  1428. w = max(img1.shape[1], img2.shape[1])
  1429. cut_rat = np.sqrt(1. - factor)
  1430. cut_w = np.int32(w * cut_rat)
  1431. cut_h = np.int32(h * cut_rat)
  1432. # uniform
  1433. cx = np.random.randint(w)
  1434. cy = np.random.randint(h)
  1435. bbx1 = np.clip(cx - cut_w // 2, 0, w - 1)
  1436. bby1 = np.clip(cy - cut_h // 2, 0, h - 1)
  1437. bbx2 = np.clip(cx + cut_w // 2, 0, w - 1)
  1438. bby2 = np.clip(cy + cut_h // 2, 0, h - 1)
  1439. img_1_pad = np.zeros((h, w, img1.shape[2]), 'float32')
  1440. img_1_pad[:img1.shape[0], :img1.shape[1], :] = \
  1441. img1.astype('float32')
  1442. img_2_pad = np.zeros((h, w, img2.shape[2]), 'float32')
  1443. img_2_pad[:img2.shape[0], :img2.shape[1], :] = \
  1444. img2.astype('float32')
  1445. img_1_pad[bby1:bby2, bbx1:bbx2, :] = img_2_pad[bby1:bby2, bbx1:bbx2, :]
  1446. return img_1_pad
  1447. def __call__(self, sample, context=None):
  1448. if not isinstance(sample, Sequence):
  1449. return sample
  1450. assert len(sample) == 2, 'cutmix need two samples'
  1451. factor = np.random.beta(self.alpha, self.beta)
  1452. factor = max(0.0, min(1.0, factor))
  1453. if factor >= 1.0:
  1454. return sample[0]
  1455. if factor <= 0.0:
  1456. return sample[1]
  1457. img1 = sample[0]['image']
  1458. img2 = sample[1]['image']
  1459. img = self.apply_image(img1, img2, factor)
  1460. gt_bbox1 = sample[0]['gt_bbox']
  1461. gt_bbox2 = sample[1]['gt_bbox']
  1462. gt_bbox = np.concatenate((gt_bbox1, gt_bbox2), axis=0)
  1463. gt_class1 = sample[0]['gt_class']
  1464. gt_class2 = sample[1]['gt_class']
  1465. gt_class = np.concatenate((gt_class1, gt_class2), axis=0)
  1466. gt_score1 = np.ones_like(sample[0]['gt_class'])
  1467. gt_score2 = np.ones_like(sample[1]['gt_class'])
  1468. gt_score = np.concatenate(
  1469. (gt_score1 * factor, gt_score2 * (1. - factor)), axis=0)
  1470. result = copy.deepcopy(sample[0])
  1471. result['image'] = img
  1472. result['gt_bbox'] = gt_bbox
  1473. result['gt_score'] = gt_score
  1474. result['gt_class'] = gt_class
  1475. if 'is_crowd' in sample[0]:
  1476. is_crowd1 = sample[0]['is_crowd']
  1477. is_crowd2 = sample[1]['is_crowd']
  1478. is_crowd = np.concatenate((is_crowd1, is_crowd2), axis=0)
  1479. result['is_crowd'] = is_crowd
  1480. if 'difficult' in sample[0]:
  1481. is_difficult1 = sample[0]['difficult']
  1482. is_difficult2 = sample[1]['difficult']
  1483. is_difficult = np.concatenate(
  1484. (is_difficult1, is_difficult2), axis=0)
  1485. result['difficult'] = is_difficult
  1486. return result
  1487. @register_op
  1488. class Mixup(BaseOperator):
  1489. def __init__(self, alpha=1.5, beta=1.5):
  1490. """ Mixup image and gt_bbbox/gt_score
  1491. Args:
  1492. alpha (float): alpha parameter of beta distribute
  1493. beta (float): beta parameter of beta distribute
  1494. """
  1495. super(Mixup, self).__init__()
  1496. self.alpha = alpha
  1497. self.beta = beta
  1498. if self.alpha <= 0.0:
  1499. raise ValueError("alpha shold be positive in {}".format(self))
  1500. if self.beta <= 0.0:
  1501. raise ValueError("beta shold be positive in {}".format(self))
  1502. def apply_image(self, img1, img2, factor):
  1503. h = max(img1.shape[0], img2.shape[0])
  1504. w = max(img1.shape[1], img2.shape[1])
  1505. img = np.zeros((h, w, img1.shape[2]), 'float32')
  1506. img[:img1.shape[0], :img1.shape[1], :] = \
  1507. img1.astype('float32') * factor
  1508. img[:img2.shape[0], :img2.shape[1], :] += \
  1509. img2.astype('float32') * (1.0 - factor)
  1510. return img.astype('uint8')
  1511. def __call__(self, sample, context=None):
  1512. if not isinstance(sample, Sequence):
  1513. return sample
  1514. assert len(sample) == 2, 'mixup need two samples'
  1515. factor = np.random.beta(self.alpha, self.beta)
  1516. factor = max(0.0, min(1.0, factor))
  1517. if factor >= 1.0:
  1518. return sample[0]
  1519. if factor <= 0.0:
  1520. return sample[1]
  1521. im = self.apply_image(sample[0]['image'], sample[1]['image'], factor)
  1522. result = copy.deepcopy(sample[0])
  1523. result['image'] = im
  1524. # apply bbox and score
  1525. if 'gt_bbox' in sample[0]:
  1526. gt_bbox1 = sample[0]['gt_bbox']
  1527. gt_bbox2 = sample[1]['gt_bbox']
  1528. gt_bbox = np.concatenate((gt_bbox1, gt_bbox2), axis=0)
  1529. result['gt_bbox'] = gt_bbox
  1530. if 'gt_class' in sample[0]:
  1531. gt_class1 = sample[0]['gt_class']
  1532. gt_class2 = sample[1]['gt_class']
  1533. gt_class = np.concatenate((gt_class1, gt_class2), axis=0)
  1534. result['gt_class'] = gt_class
  1535. gt_score1 = np.ones_like(sample[0]['gt_class'])
  1536. gt_score2 = np.ones_like(sample[1]['gt_class'])
  1537. gt_score = np.concatenate(
  1538. (gt_score1 * factor, gt_score2 * (1. - factor)), axis=0)
  1539. result['gt_score'] = gt_score.astype('float32')
  1540. if 'is_crowd' in sample[0]:
  1541. is_crowd1 = sample[0]['is_crowd']
  1542. is_crowd2 = sample[1]['is_crowd']
  1543. is_crowd = np.concatenate((is_crowd1, is_crowd2), axis=0)
  1544. result['is_crowd'] = is_crowd
  1545. if 'difficult' in sample[0]:
  1546. is_difficult1 = sample[0]['difficult']
  1547. is_difficult2 = sample[1]['difficult']
  1548. is_difficult = np.concatenate(
  1549. (is_difficult1, is_difficult2), axis=0)
  1550. result['difficult'] = is_difficult
  1551. if 'gt_ide' in sample[0]:
  1552. gt_ide1 = sample[0]['gt_ide']
  1553. gt_ide2 = sample[1]['gt_ide']
  1554. gt_ide = np.concatenate((gt_ide1, gt_ide2), axis=0)
  1555. result['gt_ide'] = gt_ide
  1556. return result
  1557. @register_op
  1558. class NormalizeBox(BaseOperator):
  1559. """Transform the bounding box's coornidates to [0,1]."""
  1560. def __init__(self):
  1561. super(NormalizeBox, self).__init__()
  1562. def apply(self, sample, context):
  1563. im = sample['image']
  1564. gt_bbox = sample['gt_bbox']
  1565. height, width, _ = im.shape
  1566. for i in range(gt_bbox.shape[0]):
  1567. gt_bbox[i][0] = gt_bbox[i][0] / width
  1568. gt_bbox[i][1] = gt_bbox[i][1] / height
  1569. gt_bbox[i][2] = gt_bbox[i][2] / width
  1570. gt_bbox[i][3] = gt_bbox[i][3] / height
  1571. sample['gt_bbox'] = gt_bbox
  1572. if 'gt_keypoint' in sample.keys():
  1573. gt_keypoint = sample['gt_keypoint']
  1574. for i in range(gt_keypoint.shape[1]):
  1575. if i % 2:
  1576. gt_keypoint[:, i] = gt_keypoint[:, i] / height
  1577. else:
  1578. gt_keypoint[:, i] = gt_keypoint[:, i] / width
  1579. sample['gt_keypoint'] = gt_keypoint
  1580. return sample
  1581. @register_op
  1582. class BboxXYXY2XYWH(BaseOperator):
  1583. """
  1584. Convert bbox XYXY format to XYWH format.
  1585. """
  1586. def __init__(self):
  1587. super(BboxXYXY2XYWH, self).__init__()
  1588. def apply(self, sample, context=None):
  1589. assert 'gt_bbox' in sample
  1590. bbox = sample['gt_bbox']
  1591. bbox[:, 2:4] = bbox[:, 2:4] - bbox[:, :2]
  1592. bbox[:, :2] = bbox[:, :2] + bbox[:, 2:4] / 2.
  1593. sample['gt_bbox'] = bbox
  1594. return sample
  1595. @register_op
  1596. class PadBox(BaseOperator):
  1597. def __init__(self, num_max_boxes=50):
  1598. """
  1599. Pad zeros to bboxes if number of bboxes is less than num_max_boxes.
  1600. Args:
  1601. num_max_boxes (int): the max number of bboxes
  1602. """
  1603. self.num_max_boxes = num_max_boxes
  1604. super(PadBox, self).__init__()
  1605. def apply(self, sample, context=None):
  1606. assert 'gt_bbox' in sample
  1607. bbox = sample['gt_bbox']
  1608. gt_num = min(self.num_max_boxes, len(bbox))
  1609. num_max = self.num_max_boxes
  1610. # fields = context['fields'] if context else []
  1611. pad_bbox = np.zeros((num_max, 4), dtype=np.float32)
  1612. if gt_num > 0:
  1613. pad_bbox[:gt_num, :] = bbox[:gt_num, :]
  1614. sample['gt_bbox'] = pad_bbox
  1615. if 'gt_class' in sample:
  1616. pad_class = np.zeros((num_max, ), dtype=np.int32)
  1617. if gt_num > 0:
  1618. pad_class[:gt_num] = sample['gt_class'][:gt_num, 0]
  1619. sample['gt_class'] = pad_class
  1620. if 'gt_score' in sample:
  1621. pad_score = np.zeros((num_max, ), dtype=np.float32)
  1622. if gt_num > 0:
  1623. pad_score[:gt_num] = sample['gt_score'][:gt_num, 0]
  1624. sample['gt_score'] = pad_score
  1625. # in training, for example in op ExpandImage,
  1626. # the bbox and gt_class is expandded, but the difficult is not,
  1627. # so, judging by it's length
  1628. if 'difficult' in sample:
  1629. pad_diff = np.zeros((num_max, ), dtype=np.int32)
  1630. if gt_num > 0:
  1631. pad_diff[:gt_num] = sample['difficult'][:gt_num, 0]
  1632. sample['difficult'] = pad_diff
  1633. if 'is_crowd' in sample:
  1634. pad_crowd = np.zeros((num_max, ), dtype=np.int32)
  1635. if gt_num > 0:
  1636. pad_crowd[:gt_num] = sample['is_crowd'][:gt_num, 0]
  1637. sample['is_crowd'] = pad_crowd
  1638. if 'gt_ide' in sample:
  1639. pad_ide = np.zeros((num_max, ), dtype=np.int32)
  1640. if gt_num > 0:
  1641. pad_ide[:gt_num] = sample['gt_ide'][:gt_num, 0]
  1642. sample['gt_ide'] = pad_ide
  1643. return sample
  1644. @register_op
  1645. class DebugVisibleImage(BaseOperator):
  1646. """
  1647. In debug mode, visualize images according to `gt_box`.
  1648. (Currently only supported when not cropping and flipping image.)
  1649. """
  1650. def __init__(self, output_dir='output/debug', is_normalized=False):
  1651. super(DebugVisibleImage, self).__init__()
  1652. self.is_normalized = is_normalized
  1653. self.output_dir = output_dir
  1654. if not os.path.isdir(output_dir):
  1655. os.makedirs(output_dir)
  1656. if not isinstance(self.is_normalized, bool):
  1657. raise TypeError("{}: input type is invalid.".format(self))
  1658. def apply(self, sample, context=None):
  1659. image = Image.fromarray(sample['image'].astype(np.uint8))
  1660. out_file_name = '{:012d}.jpg'.format(sample['im_id'][0])
  1661. width = sample['w']
  1662. height = sample['h']
  1663. gt_bbox = sample['gt_bbox']
  1664. gt_class = sample['gt_class']
  1665. draw = ImageDraw.Draw(image)
  1666. for i in range(gt_bbox.shape[0]):
  1667. if self.is_normalized:
  1668. gt_bbox[i][0] = gt_bbox[i][0] * width
  1669. gt_bbox[i][1] = gt_bbox[i][1] * height
  1670. gt_bbox[i][2] = gt_bbox[i][2] * width
  1671. gt_bbox[i][3] = gt_bbox[i][3] * height
  1672. xmin, ymin, xmax, ymax = gt_bbox[i]
  1673. draw.line(
  1674. [(xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin),
  1675. (xmin, ymin)],
  1676. width=2,
  1677. fill='green')
  1678. # draw label
  1679. text = str(gt_class[i][0])
  1680. tw, th = draw.textsize(text)
  1681. draw.rectangle(
  1682. [(xmin + 1, ymin - th), (xmin + tw + 1, ymin)], fill='green')
  1683. draw.text((xmin + 1, ymin - th), text, fill=(255, 255, 255))
  1684. if 'gt_keypoint' in sample.keys():
  1685. gt_keypoint = sample['gt_keypoint']
  1686. if self.is_normalized:
  1687. for i in range(gt_keypoint.shape[1]):
  1688. if i % 2:
  1689. gt_keypoint[:, i] = gt_keypoint[:, i] * height
  1690. else:
  1691. gt_keypoint[:, i] = gt_keypoint[:, i] * width
  1692. for i in range(gt_keypoint.shape[0]):
  1693. keypoint = gt_keypoint[i]
  1694. for j in range(int(keypoint.shape[0] / 2)):
  1695. x1 = round(keypoint[2 * j]).astype(np.int32)
  1696. y1 = round(keypoint[2 * j + 1]).astype(np.int32)
  1697. draw.ellipse(
  1698. (x1, y1, x1 + 5, y1 + 5),
  1699. fill='green',
  1700. outline='green')
  1701. save_path = os.path.join(self.output_dir, out_file_name)
  1702. image.save(save_path, quality=95)
  1703. return sample
  1704. @register_op
  1705. class Pad(BaseOperator):
  1706. def __init__(self,
  1707. size=None,
  1708. size_divisor=32,
  1709. pad_mode=0,
  1710. offsets=None,
  1711. fill_value=(127.5, 127.5, 127.5)):
  1712. """
  1713. Pad image to a specified size or multiple of size_divisor.
  1714. Args:
  1715. size (int, Sequence): image target size, if None, pad to multiple of size_divisor, default None
  1716. size_divisor (int): size divisor, default 32
  1717. pad_mode (int): pad mode, currently only supports four modes [-1, 0, 1, 2]. if -1, use specified offsets
  1718. if 0, only pad to right and bottom. if 1, pad according to center. if 2, only pad left and top
  1719. offsets (list): [offset_x, offset_y], specify offset while padding, only supported pad_mode=-1
  1720. fill_value (bool): rgb value of pad area, default (127.5, 127.5, 127.5)
  1721. """
  1722. super(Pad, self).__init__()
  1723. if not isinstance(size, (int, Sequence)):
  1724. raise TypeError(
  1725. "Type of target_size is invalid when random_size is True. \
  1726. Must be List, now is {}".format(type(size)))
  1727. if isinstance(size, int):
  1728. size = [size, size]
  1729. assert pad_mode in [
  1730. -1, 0, 1, 2
  1731. ], 'currently only supports four modes [-1, 0, 1, 2]'
  1732. if pad_mode == -1:
  1733. assert offsets, 'if pad_mode is -1, offsets should not be None'
  1734. self.size = size
  1735. self.size_divisor = size_divisor
  1736. self.pad_mode = pad_mode
  1737. self.fill_value = fill_value
  1738. self.offsets = offsets
  1739. def apply_segm(self, segms, offsets, im_size, size):
  1740. def _expand_poly(poly, x, y):
  1741. expanded_poly = np.array(poly)
  1742. expanded_poly[0::2] += x
  1743. expanded_poly[1::2] += y
  1744. return expanded_poly.tolist()
  1745. def _expand_rle(rle, x, y, height, width, h, w):
  1746. if 'counts' in rle and type(rle['counts']) == list:
  1747. rle = mask_util.frPyObjects(rle, height, width)
  1748. mask = mask_util.decode(rle)
  1749. expanded_mask = np.full((h, w), 0).astype(mask.dtype)
  1750. expanded_mask[y:y + height, x:x + width] = mask
  1751. rle = mask_util.encode(
  1752. np.array(
  1753. expanded_mask, order='F', dtype=np.uint8))
  1754. return rle
  1755. x, y = offsets
  1756. height, width = im_size
  1757. h, w = size
  1758. expanded_segms = []
  1759. for segm in segms:
  1760. if is_poly(segm):
  1761. # Polygon format
  1762. expanded_segms.append(
  1763. [_expand_poly(poly, x, y) for poly in segm])
  1764. else:
  1765. # RLE format
  1766. import pycocotools.mask as mask_util
  1767. expanded_segms.append(
  1768. _expand_rle(segm, x, y, height, width, h, w))
  1769. return expanded_segms
  1770. def apply_bbox(self, bbox, offsets):
  1771. return bbox + np.array(offsets * 2, dtype=np.float32)
  1772. def apply_keypoint(self, keypoints, offsets):
  1773. n = len(keypoints[0]) // 2
  1774. return keypoints + np.array(offsets * n, dtype=np.float32)
  1775. def apply_image(self, image, offsets, im_size, size):
  1776. x, y = offsets
  1777. im_h, im_w = im_size
  1778. h, w = size
  1779. canvas = np.ones((h, w, 3), dtype=np.float32)
  1780. canvas *= np.array(self.fill_value, dtype=np.float32)
  1781. canvas[y:y + im_h, x:x + im_w, :] = image.astype(np.float32)
  1782. return canvas
  1783. def apply(self, sample, context=None):
  1784. im = sample['image']
  1785. im_h, im_w = im.shape[:2]
  1786. if self.size:
  1787. h, w = self.size
  1788. assert (
  1789. im_h <= h and im_w <= w
  1790. ), '(h, w) of target size should be greater than (im_h, im_w)'
  1791. else:
  1792. h = int(np.ceil(im_h / self.size_divisor) * self.size_divisor)
  1793. w = int(np.ceil(im_w / self.size_divisor) * self.size_divisor)
  1794. if h == im_h and w == im_w:
  1795. sample['image'] = im.astype(np.float32)
  1796. return sample
  1797. if self.pad_mode == -1:
  1798. offset_x, offset_y = self.offsets
  1799. elif self.pad_mode == 0:
  1800. offset_y, offset_x = 0, 0
  1801. elif self.pad_mode == 1:
  1802. offset_y, offset_x = (h - im_h) // 2, (w - im_w) // 2
  1803. else:
  1804. offset_y, offset_x = h - im_h, w - im_w
  1805. offsets, im_size, size = [offset_x, offset_y], [im_h, im_w], [h, w]
  1806. sample['image'] = self.apply_image(im, offsets, im_size, size)
  1807. if self.pad_mode == 0:
  1808. return sample
  1809. if 'gt_bbox' in sample and len(sample['gt_bbox']) > 0:
  1810. sample['gt_bbox'] = self.apply_bbox(sample['gt_bbox'], offsets)
  1811. if 'gt_poly' in sample and len(sample['gt_poly']) > 0:
  1812. sample['gt_poly'] = self.apply_segm(sample['gt_poly'], offsets,
  1813. im_size, size)
  1814. if 'gt_keypoint' in sample and len(sample['gt_keypoint']) > 0:
  1815. sample['gt_keypoint'] = self.apply_keypoint(sample['gt_keypoint'],
  1816. offsets)
  1817. return sample
  1818. @register_op
  1819. class Poly2Mask(BaseOperator):
  1820. """
  1821. gt poly to mask annotations
  1822. """
  1823. def __init__(self):
  1824. super(Poly2Mask, self).__init__()
  1825. import pycocotools.mask as maskUtils
  1826. self.maskutils = maskUtils
  1827. def _poly2mask(self, mask_ann, img_h, img_w):
  1828. if isinstance(mask_ann, list):
  1829. # polygon -- a single object might consist of multiple parts
  1830. # we merge all parts into one mask rle code
  1831. rles = self.maskutils.frPyObjects(mask_ann, img_h, img_w)
  1832. rle = self.maskutils.merge(rles)
  1833. elif isinstance(mask_ann['counts'], list):
  1834. # uncompressed RLE
  1835. rle = self.maskutils.frPyObjects(mask_ann, img_h, img_w)
  1836. else:
  1837. # rle
  1838. rle = mask_ann
  1839. mask = self.maskutils.decode(rle)
  1840. return mask
  1841. def apply(self, sample, context=None):
  1842. assert 'gt_poly' in sample
  1843. im_h = sample['h']
  1844. im_w = sample['w']
  1845. masks = [
  1846. self._poly2mask(gt_poly, im_h, im_w)
  1847. for gt_poly in sample['gt_poly']
  1848. ]
  1849. sample['gt_segm'] = np.asarray(masks).astype(np.uint8)
  1850. return sample
  1851. @register_op
  1852. class Rbox2Poly(BaseOperator):
  1853. """
  1854. Convert rbbox format to poly format.
  1855. """
  1856. def __init__(self):
  1857. super(Rbox2Poly, self).__init__()
  1858. def apply(self, sample, context=None):
  1859. assert 'gt_rbox' in sample
  1860. assert sample['gt_rbox'].shape[1] == 5
  1861. rrects = sample['gt_rbox']
  1862. x_ctr = rrects[:, 0]
  1863. y_ctr = rrects[:, 1]
  1864. width = rrects[:, 2]
  1865. height = rrects[:, 3]
  1866. x1 = x_ctr - width / 2.0
  1867. y1 = y_ctr - height / 2.0
  1868. x2 = x_ctr + width / 2.0
  1869. y2 = y_ctr + height / 2.0
  1870. sample['gt_bbox'] = np.stack([x1, y1, x2, y2], axis=1)
  1871. polys = bbox_utils.rbox2poly_np(rrects)
  1872. sample['gt_rbox2poly'] = polys
  1873. return sample
  1874. @register_op
  1875. class AugmentHSV(BaseOperator):
  1876. """
  1877. Augment the SV channel of image data.
  1878. Args:
  1879. fraction (float): the fraction for augment. Default: 0.5.
  1880. is_bgr (bool): whether the image is BGR mode. Default: True.
  1881. hgain (float): H channel gains
  1882. sgain (float): S channel gains
  1883. vgain (float): V channel gains
  1884. """
  1885. def __init__(self,
  1886. fraction=0.50,
  1887. is_bgr=True,
  1888. hgain=None,
  1889. sgain=None,
  1890. vgain=None):
  1891. super(AugmentHSV, self).__init__()
  1892. self.fraction = fraction
  1893. self.is_bgr = is_bgr
  1894. self.hgain = hgain
  1895. self.sgain = sgain
  1896. self.vgain = vgain
  1897. self.use_hsvgain = False if hgain is None else True
  1898. def apply(self, sample, context=None):
  1899. img = sample['image']
  1900. if self.is_bgr:
  1901. img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
  1902. else:
  1903. img_hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
  1904. if self.use_hsvgain:
  1905. hsv_augs = np.random.uniform(
  1906. -1, 1, 3) * [self.hgain, self.sgain, self.vgain]
  1907. # random selection of h, s, v
  1908. hsv_augs *= np.random.randint(0, 2, 3)
  1909. img_hsv[..., 0] = (img_hsv[..., 0] + hsv_augs[0]) % 180
  1910. img_hsv[..., 1] = np.clip(img_hsv[..., 1] + hsv_augs[1], 0, 255)
  1911. img_hsv[..., 2] = np.clip(img_hsv[..., 2] + hsv_augs[2], 0, 255)
  1912. else:
  1913. S = img_hsv[:, :, 1].astype(np.float32)
  1914. V = img_hsv[:, :, 2].astype(np.float32)
  1915. a = (random.random() * 2 - 1) * self.fraction + 1
  1916. S *= a
  1917. if a > 1:
  1918. np.clip(S, a_min=0, a_max=255, out=S)
  1919. a = (random.random() * 2 - 1) * self.fraction + 1
  1920. V *= a
  1921. if a > 1:
  1922. np.clip(V, a_min=0, a_max=255, out=V)
  1923. img_hsv[:, :, 1] = S.astype(np.uint8)
  1924. img_hsv[:, :, 2] = V.astype(np.uint8)
  1925. if self.is_bgr:
  1926. cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img)
  1927. else:
  1928. cv2.cvtColor(img_hsv, cv2.COLOR_HSV2RGB, dst=img)
  1929. sample['image'] = img.astype(np.float32)
  1930. return sample
  1931. @register_op
  1932. class Norm2PixelBbox(BaseOperator):
  1933. """
  1934. Transform the bounding box's coornidates which is in [0,1] to pixels.
  1935. """
  1936. def __init__(self):
  1937. super(Norm2PixelBbox, self).__init__()
  1938. def apply(self, sample, context=None):
  1939. assert 'gt_bbox' in sample
  1940. bbox = sample['gt_bbox']
  1941. height, width = sample['image'].shape[:2]
  1942. bbox[:, 0::2] = bbox[:, 0::2] * width
  1943. bbox[:, 1::2] = bbox[:, 1::2] * height
  1944. sample['gt_bbox'] = bbox
  1945. return sample
  1946. @register_op
  1947. class BboxCXCYWH2XYXY(BaseOperator):
  1948. """
  1949. Convert bbox CXCYWH format to XYXY format.
  1950. [center_x, center_y, width, height] -> [x0, y0, x1, y1]
  1951. """
  1952. def __init__(self):
  1953. super(BboxCXCYWH2XYXY, self).__init__()
  1954. def apply(self, sample, context=None):
  1955. assert 'gt_bbox' in sample
  1956. bbox0 = sample['gt_bbox']
  1957. bbox = bbox0.copy()
  1958. bbox[:, :2] = bbox0[:, :2] - bbox0[:, 2:4] / 2.
  1959. bbox[:, 2:4] = bbox0[:, :2] + bbox0[:, 2:4] / 2.
  1960. sample['gt_bbox'] = bbox
  1961. return sample
  1962. @register_op
  1963. class RandomResizeCrop(BaseOperator):
  1964. """Random resize and crop image and bboxes.
  1965. Args:
  1966. resizes (list): resize image to one of resizes. if keep_ratio is True and mode is
  1967. 'long', resize the image's long side to the maximum of target_size, if keep_ratio is
  1968. True and mode is 'short', resize the image's short side to the minimum of target_size.
  1969. cropsizes (list): crop sizes after resize, [(min_crop_1, max_crop_1), ...]
  1970. mode (str): resize mode, `long` or `short`. Details see resizes.
  1971. prob (float): probability of this op.
  1972. keep_ratio (bool): whether keep_ratio or not, default true
  1973. interp (int): the interpolation method
  1974. thresholds (list): iou thresholds for decide a valid bbox crop.
  1975. num_attempts (int): number of tries before giving up.
  1976. allow_no_crop (bool): allow return without actually cropping them.
  1977. cover_all_box (bool): ensure all bboxes are covered in the final crop.
  1978. is_mask_crop(bool): whether crop the segmentation.
  1979. """
  1980. def __init__(
  1981. self,
  1982. resizes,
  1983. cropsizes,
  1984. prob=0.5,
  1985. mode='short',
  1986. keep_ratio=True,
  1987. interp=cv2.INTER_LINEAR,
  1988. num_attempts=3,
  1989. cover_all_box=False,
  1990. allow_no_crop=False,
  1991. thresholds=[0.3, 0.5, 0.7],
  1992. is_mask_crop=False, ):
  1993. super(RandomResizeCrop, self).__init__()
  1994. self.resizes = resizes
  1995. self.cropsizes = cropsizes
  1996. self.prob = prob
  1997. self.mode = mode
  1998. self.resizer = Resize(0, keep_ratio=keep_ratio, interp=interp)
  1999. self.croper = RandomCrop(
  2000. num_attempts=num_attempts,
  2001. cover_all_box=cover_all_box,
  2002. thresholds=thresholds,
  2003. allow_no_crop=allow_no_crop,
  2004. is_mask_crop=is_mask_crop)
  2005. def _format_size(self, size):
  2006. if isinstance(size, Integral):
  2007. size = (size, size)
  2008. return size
  2009. def apply(self, sample, context=None):
  2010. if random.random() < self.prob:
  2011. _resize = self._format_size(random.choice(self.resizes))
  2012. _cropsize = self._format_size(random.choice(self.cropsizes))
  2013. sample = self._resize(
  2014. self.resizer,
  2015. sample,
  2016. size=_resize,
  2017. mode=self.mode,
  2018. context=context)
  2019. sample = self._random_crop(
  2020. self.croper, sample, size=_cropsize, context=context)
  2021. return sample
  2022. @staticmethod
  2023. def _random_crop(croper, sample, size, context=None):
  2024. if 'gt_bbox' in sample and len(sample['gt_bbox']) == 0:
  2025. return sample
  2026. self = croper
  2027. h, w = sample['image'].shape[:2]
  2028. gt_bbox = sample['gt_bbox']
  2029. cropsize = size
  2030. min_crop = min(cropsize)
  2031. max_crop = max(cropsize)
  2032. thresholds = list(self.thresholds)
  2033. np.random.shuffle(thresholds)
  2034. for thresh in thresholds:
  2035. found = False
  2036. for _ in range(self.num_attempts):
  2037. crop_h = random.randint(min_crop, min(h, max_crop))
  2038. crop_w = random.randint(min_crop, min(w, max_crop))
  2039. crop_y = random.randint(0, h - crop_h)
  2040. crop_x = random.randint(0, w - crop_w)
  2041. crop_box = [crop_x, crop_y, crop_x + crop_w, crop_y + crop_h]
  2042. iou = self._iou_matrix(
  2043. gt_bbox, np.array(
  2044. [crop_box], dtype=np.float32))
  2045. if iou.max() < thresh:
  2046. continue
  2047. if self.cover_all_box and iou.min() < thresh:
  2048. continue
  2049. cropped_box, valid_ids = self._crop_box_with_center_constraint(
  2050. gt_bbox, np.array(
  2051. crop_box, dtype=np.float32))
  2052. if valid_ids.size > 0:
  2053. found = True
  2054. break
  2055. if found:
  2056. if self.is_mask_crop and 'gt_poly' in sample and len(sample[
  2057. 'gt_poly']) > 0:
  2058. crop_polys = self.crop_segms(
  2059. sample['gt_poly'],
  2060. valid_ids,
  2061. np.array(
  2062. crop_box, dtype=np.int64),
  2063. h,
  2064. w)
  2065. if [] in crop_polys:
  2066. delete_id = list()
  2067. valid_polys = list()
  2068. for id, crop_poly in enumerate(crop_polys):
  2069. if crop_poly == []:
  2070. delete_id.append(id)
  2071. else:
  2072. valid_polys.append(crop_poly)
  2073. valid_ids = np.delete(valid_ids, delete_id)
  2074. if len(valid_polys) == 0:
  2075. return sample
  2076. sample['gt_poly'] = valid_polys
  2077. else:
  2078. sample['gt_poly'] = crop_polys
  2079. if 'gt_segm' in sample:
  2080. sample['gt_segm'] = self._crop_segm(sample['gt_segm'],
  2081. crop_box)
  2082. sample['gt_segm'] = np.take(
  2083. sample['gt_segm'], valid_ids, axis=0)
  2084. sample['image'] = self._crop_image(sample['image'], crop_box)
  2085. sample['gt_bbox'] = np.take(cropped_box, valid_ids, axis=0)
  2086. sample['gt_class'] = np.take(
  2087. sample['gt_class'], valid_ids, axis=0)
  2088. if 'gt_score' in sample:
  2089. sample['gt_score'] = np.take(
  2090. sample['gt_score'], valid_ids, axis=0)
  2091. if 'is_crowd' in sample:
  2092. sample['is_crowd'] = np.take(
  2093. sample['is_crowd'], valid_ids, axis=0)
  2094. return sample
  2095. return sample
  2096. @staticmethod
  2097. def _resize(resizer, sample, size, mode='short', context=None):
  2098. self = resizer
  2099. im = sample['image']
  2100. target_size = size
  2101. if not isinstance(im, np.ndarray):
  2102. raise TypeError("{}: image type is not numpy.".format(self))
  2103. if len(im.shape) != 3:
  2104. raise ImageError('{}: image is not 3-dimensional.'.format(self))
  2105. # apply image
  2106. im_shape = im.shape
  2107. if self.keep_ratio:
  2108. im_size_min = np.min(im_shape[0:2])
  2109. im_size_max = np.max(im_shape[0:2])
  2110. target_size_min = np.min(target_size)
  2111. target_size_max = np.max(target_size)
  2112. if mode == 'long':
  2113. im_scale = min(target_size_min / im_size_min,
  2114. target_size_max / im_size_max)
  2115. else:
  2116. im_scale = max(target_size_min / im_size_min,
  2117. target_size_max / im_size_max)
  2118. resize_h = im_scale * float(im_shape[0])
  2119. resize_w = im_scale * float(im_shape[1])
  2120. im_scale_x = im_scale
  2121. im_scale_y = im_scale
  2122. else:
  2123. resize_h, resize_w = target_size
  2124. im_scale_y = resize_h / im_shape[0]
  2125. im_scale_x = resize_w / im_shape[1]
  2126. im = self.apply_image(sample['image'], [im_scale_x, im_scale_y])
  2127. sample['image'] = im
  2128. sample['im_shape'] = np.asarray([resize_h, resize_w], dtype=np.float32)
  2129. if 'scale_factor' in sample:
  2130. scale_factor = sample['scale_factor']
  2131. sample['scale_factor'] = np.asarray(
  2132. [scale_factor[0] * im_scale_y, scale_factor[1] * im_scale_x],
  2133. dtype=np.float32)
  2134. else:
  2135. sample['scale_factor'] = np.asarray(
  2136. [im_scale_y, im_scale_x], dtype=np.float32)
  2137. # apply bbox
  2138. if 'gt_bbox' in sample and len(sample['gt_bbox']) > 0:
  2139. sample['gt_bbox'] = self.apply_bbox(sample['gt_bbox'],
  2140. [im_scale_x, im_scale_y],
  2141. [resize_w, resize_h])
  2142. # apply rbox
  2143. if 'gt_rbox2poly' in sample:
  2144. if np.array(sample['gt_rbox2poly']).shape[1] != 8:
  2145. logger.warn(
  2146. "gt_rbox2poly's length shoule be 8, but actually is {}".
  2147. format(len(sample['gt_rbox2poly'])))
  2148. sample['gt_rbox2poly'] = self.apply_bbox(sample['gt_rbox2poly'],
  2149. [im_scale_x, im_scale_y],
  2150. [resize_w, resize_h])
  2151. # apply polygon
  2152. if 'gt_poly' in sample and len(sample['gt_poly']) > 0:
  2153. sample['gt_poly'] = self.apply_segm(
  2154. sample['gt_poly'], im_shape[:2], [im_scale_x, im_scale_y])
  2155. # apply semantic
  2156. if 'semantic' in sample and sample['semantic']:
  2157. semantic = sample['semantic']
  2158. semantic = cv2.resize(
  2159. semantic.astype('float32'),
  2160. None,
  2161. None,
  2162. fx=im_scale_x,
  2163. fy=im_scale_y,
  2164. interpolation=self.interp)
  2165. semantic = np.asarray(semantic).astype('int32')
  2166. semantic = np.expand_dims(semantic, 0)
  2167. sample['semantic'] = semantic
  2168. # apply gt_segm
  2169. if 'gt_segm' in sample and len(sample['gt_segm']) > 0:
  2170. masks = [
  2171. cv2.resize(
  2172. gt_segm,
  2173. None,
  2174. None,
  2175. fx=im_scale_x,
  2176. fy=im_scale_y,
  2177. interpolation=cv2.INTER_NEAREST)
  2178. for gt_segm in sample['gt_segm']
  2179. ]
  2180. sample['gt_segm'] = np.asarray(masks).astype(np.uint8)
  2181. return sample
  2182. @register_op
  2183. class RandomSelect(BaseOperator):
  2184. """
  2185. Randomly choose a transformation between transforms1 and transforms2,
  2186. and the probability of choosing transforms1 is p.
  2187. The code is based on https://github.com/facebookresearch/detr/blob/main/datasets/transforms.py
  2188. """
  2189. def __init__(self, transforms1, transforms2, p=0.5):
  2190. super(RandomSelect, self).__init__()
  2191. self.transforms1 = Compose(transforms1)
  2192. self.transforms2 = Compose(transforms2)
  2193. self.p = p
  2194. def apply(self, sample, context=None):
  2195. if random.random() < self.p:
  2196. return self.transforms1(sample)
  2197. return self.transforms2(sample)
  2198. @register_op
  2199. class RandomShortSideResize(BaseOperator):
  2200. def __init__(self,
  2201. short_side_sizes,
  2202. max_size=None,
  2203. interp=cv2.INTER_LINEAR,
  2204. random_interp=False):
  2205. """
  2206. Resize the image randomly according to the short side. If max_size is not None,
  2207. the long side is scaled according to max_size. The whole process will be keep ratio.
  2208. Args:
  2209. short_side_sizes (list|tuple): Image target short side size.
  2210. max_size (int): The size of the longest side of image after resize.
  2211. interp (int): The interpolation method.
  2212. random_interp (bool): Whether random select interpolation method.
  2213. """
  2214. super(RandomShortSideResize, self).__init__()
  2215. assert isinstance(short_side_sizes,
  2216. Sequence), "short_side_sizes must be List or Tuple"
  2217. self.short_side_sizes = short_side_sizes
  2218. self.max_size = max_size
  2219. self.interp = interp
  2220. self.random_interp = random_interp
  2221. self.interps = [
  2222. cv2.INTER_NEAREST,
  2223. cv2.INTER_LINEAR,
  2224. cv2.INTER_AREA,
  2225. cv2.INTER_CUBIC,
  2226. cv2.INTER_LANCZOS4,
  2227. ]
  2228. def get_size_with_aspect_ratio(self, image_shape, size, max_size=None):
  2229. h, w = image_shape
  2230. if max_size is not None:
  2231. min_original_size = float(min((w, h)))
  2232. max_original_size = float(max((w, h)))
  2233. if max_original_size / min_original_size * size > max_size:
  2234. size = int(
  2235. round(max_size * min_original_size / max_original_size))
  2236. if (w <= h and w == size) or (h <= w and h == size):
  2237. return (w, h)
  2238. if w < h:
  2239. ow = size
  2240. oh = int(size * h / w)
  2241. else:
  2242. oh = size
  2243. ow = int(size * w / h)
  2244. return (ow, oh)
  2245. def resize(self,
  2246. sample,
  2247. target_size,
  2248. max_size=None,
  2249. interp=cv2.INTER_LINEAR):
  2250. im = sample['image']
  2251. if not isinstance(im, np.ndarray):
  2252. raise TypeError("{}: image type is not numpy.".format(self))
  2253. if len(im.shape) != 3:
  2254. raise ImageError('{}: image is not 3-dimensional.'.format(self))
  2255. target_size = self.get_size_with_aspect_ratio(im.shape[:2],
  2256. target_size, max_size)
  2257. im_scale_y, im_scale_x = target_size[1] / im.shape[0], target_size[
  2258. 0] / im.shape[1]
  2259. sample['image'] = cv2.resize(im, target_size, interpolation=interp)
  2260. sample['im_shape'] = np.asarray(target_size[::-1], dtype=np.float32)
  2261. if 'scale_factor' in sample:
  2262. scale_factor = sample['scale_factor']
  2263. sample['scale_factor'] = np.asarray(
  2264. [scale_factor[0] * im_scale_y, scale_factor[1] * im_scale_x],
  2265. dtype=np.float32)
  2266. else:
  2267. sample['scale_factor'] = np.asarray(
  2268. [im_scale_y, im_scale_x], dtype=np.float32)
  2269. # apply bbox
  2270. if 'gt_bbox' in sample and len(sample['gt_bbox']) > 0:
  2271. sample['gt_bbox'] = self.apply_bbox(
  2272. sample['gt_bbox'], [im_scale_x, im_scale_y], target_size)
  2273. # apply polygon
  2274. if 'gt_poly' in sample and len(sample['gt_poly']) > 0:
  2275. sample['gt_poly'] = self.apply_segm(
  2276. sample['gt_poly'], im.shape[:2], [im_scale_x, im_scale_y])
  2277. # apply semantic
  2278. if 'semantic' in sample and sample['semantic']:
  2279. semantic = sample['semantic']
  2280. semantic = cv2.resize(
  2281. semantic.astype('float32'),
  2282. target_size,
  2283. interpolation=self.interp)
  2284. semantic = np.asarray(semantic).astype('int32')
  2285. semantic = np.expand_dims(semantic, 0)
  2286. sample['semantic'] = semantic
  2287. # apply gt_segm
  2288. if 'gt_segm' in sample and len(sample['gt_segm']) > 0:
  2289. masks = [
  2290. cv2.resize(
  2291. gt_segm, target_size, interpolation=cv2.INTER_NEAREST)
  2292. for gt_segm in sample['gt_segm']
  2293. ]
  2294. sample['gt_segm'] = np.asarray(masks).astype(np.uint8)
  2295. return sample
  2296. def apply_bbox(self, bbox, scale, size):
  2297. im_scale_x, im_scale_y = scale
  2298. resize_w, resize_h = size
  2299. bbox[:, 0::2] *= im_scale_x
  2300. bbox[:, 1::2] *= im_scale_y
  2301. bbox[:, 0::2] = np.clip(bbox[:, 0::2], 0, resize_w)
  2302. bbox[:, 1::2] = np.clip(bbox[:, 1::2], 0, resize_h)
  2303. return bbox.astype('float32')
  2304. def apply_segm(self, segms, im_size, scale):
  2305. def _resize_poly(poly, im_scale_x, im_scale_y):
  2306. resized_poly = np.array(poly).astype('float32')
  2307. resized_poly[0::2] *= im_scale_x
  2308. resized_poly[1::2] *= im_scale_y
  2309. return resized_poly.tolist()
  2310. def _resize_rle(rle, im_h, im_w, im_scale_x, im_scale_y):
  2311. if 'counts' in rle and type(rle['counts']) == list:
  2312. rle = mask_util.frPyObjects(rle, im_h, im_w)
  2313. mask = mask_util.decode(rle)
  2314. mask = cv2.resize(
  2315. mask,
  2316. None,
  2317. None,
  2318. fx=im_scale_x,
  2319. fy=im_scale_y,
  2320. interpolation=self.interp)
  2321. rle = mask_util.encode(np.array(mask, order='F', dtype=np.uint8))
  2322. return rle
  2323. im_h, im_w = im_size
  2324. im_scale_x, im_scale_y = scale
  2325. resized_segms = []
  2326. for segm in segms:
  2327. if is_poly(segm):
  2328. # Polygon format
  2329. resized_segms.append([
  2330. _resize_poly(poly, im_scale_x, im_scale_y) for poly in segm
  2331. ])
  2332. else:
  2333. # RLE format
  2334. import pycocotools.mask as mask_util
  2335. resized_segms.append(
  2336. _resize_rle(segm, im_h, im_w, im_scale_x, im_scale_y))
  2337. return resized_segms
  2338. def apply(self, sample, context=None):
  2339. target_size = random.choice(self.short_side_sizes)
  2340. interp = random.choice(
  2341. self.interps) if self.random_interp else self.interp
  2342. return self.resize(sample, target_size, self.max_size, interp)
  2343. @register_op
  2344. class RandomSizeCrop(BaseOperator):
  2345. """
  2346. Cut the image randomly according to `min_size` and `max_size`
  2347. """
  2348. def __init__(self, min_size, max_size):
  2349. super(RandomSizeCrop, self).__init__()
  2350. self.min_size = min_size
  2351. self.max_size = max_size
  2352. from paddle.vision.transforms.functional import crop as paddle_crop
  2353. self.paddle_crop = paddle_crop
  2354. @staticmethod
  2355. def get_crop_params(img_shape, output_size):
  2356. """Get parameters for ``crop`` for a random crop.
  2357. Args:
  2358. img_shape (list|tuple): Image's height and width.
  2359. output_size (list|tuple): Expected output size of the crop.
  2360. Returns:
  2361. tuple: params (i, j, h, w) to be passed to ``crop`` for random crop.
  2362. """
  2363. h, w = img_shape
  2364. th, tw = output_size
  2365. if h + 1 < th or w + 1 < tw:
  2366. raise ValueError(
  2367. "Required crop size {} is larger then input image size {}".
  2368. format((th, tw), (h, w)))
  2369. if w == tw and h == th:
  2370. return 0, 0, h, w
  2371. i = random.randint(0, h - th + 1)
  2372. j = random.randint(0, w - tw + 1)
  2373. return i, j, th, tw
  2374. def crop(self, sample, region):
  2375. image_shape = sample['image'].shape[:2]
  2376. sample['image'] = self.paddle_crop(sample['image'], *region)
  2377. keep_index = None
  2378. # apply bbox
  2379. if 'gt_bbox' in sample and len(sample['gt_bbox']) > 0:
  2380. sample['gt_bbox'] = self.apply_bbox(sample['gt_bbox'], region)
  2381. bbox = sample['gt_bbox'].reshape([-1, 2, 2])
  2382. area = (bbox[:, 1, :] - bbox[:, 0, :]).prod(axis=1)
  2383. keep_index = np.where(area > 0)[0]
  2384. sample['gt_bbox'] = sample['gt_bbox'][keep_index] if len(
  2385. keep_index) > 0 else np.zeros(
  2386. [0, 4], dtype=np.float32)
  2387. sample['gt_class'] = sample['gt_class'][keep_index] if len(
  2388. keep_index) > 0 else np.zeros(
  2389. [0, 1], dtype=np.float32)
  2390. if 'gt_score' in sample:
  2391. sample['gt_score'] = sample['gt_score'][keep_index] if len(
  2392. keep_index) > 0 else np.zeros(
  2393. [0, 1], dtype=np.float32)
  2394. if 'is_crowd' in sample:
  2395. sample['is_crowd'] = sample['is_crowd'][keep_index] if len(
  2396. keep_index) > 0 else np.zeros(
  2397. [0, 1], dtype=np.float32)
  2398. # apply polygon
  2399. if 'gt_poly' in sample and len(sample['gt_poly']) > 0:
  2400. sample['gt_poly'] = self.apply_segm(sample['gt_poly'], region,
  2401. image_shape)
  2402. if keep_index is not None:
  2403. sample['gt_poly'] = sample['gt_poly'][keep_index]
  2404. # apply gt_segm
  2405. if 'gt_segm' in sample and len(sample['gt_segm']) > 0:
  2406. i, j, h, w = region
  2407. sample['gt_segm'] = sample['gt_segm'][:, i:i + h, j:j + w]
  2408. if keep_index is not None:
  2409. sample['gt_segm'] = sample['gt_segm'][keep_index]
  2410. return sample
  2411. def apply_bbox(self, bbox, region):
  2412. i, j, h, w = region
  2413. region_size = np.asarray([w, h])
  2414. crop_bbox = bbox - np.asarray([j, i, j, i])
  2415. crop_bbox = np.minimum(crop_bbox.reshape([-1, 2, 2]), region_size)
  2416. crop_bbox = crop_bbox.clip(min=0)
  2417. return crop_bbox.reshape([-1, 4]).astype('float32')
  2418. def apply_segm(self, segms, region, image_shape):
  2419. def _crop_poly(segm, crop):
  2420. xmin, ymin, xmax, ymax = crop
  2421. crop_coord = [xmin, ymin, xmin, ymax, xmax, ymax, xmax, ymin]
  2422. crop_p = np.array(crop_coord).reshape(4, 2)
  2423. crop_p = Polygon(crop_p)
  2424. crop_segm = list()
  2425. for poly in segm:
  2426. poly = np.array(poly).reshape(len(poly) // 2, 2)
  2427. polygon = Polygon(poly)
  2428. if not polygon.is_valid:
  2429. exterior = polygon.exterior
  2430. multi_lines = exterior.intersection(exterior)
  2431. polygons = shapely.ops.polygonize(multi_lines)
  2432. polygon = MultiPolygon(polygons)
  2433. multi_polygon = list()
  2434. if isinstance(polygon, MultiPolygon):
  2435. multi_polygon = copy.deepcopy(polygon)
  2436. else:
  2437. multi_polygon.append(copy.deepcopy(polygon))
  2438. for per_polygon in multi_polygon:
  2439. inter = per_polygon.intersection(crop_p)
  2440. if not inter:
  2441. continue
  2442. if isinstance(inter, (MultiPolygon, GeometryCollection)):
  2443. for part in inter:
  2444. if not isinstance(part, Polygon):
  2445. continue
  2446. part = np.squeeze(
  2447. np.array(part.exterior.coords[:-1]).reshape(
  2448. 1, -1))
  2449. part[0::2] -= xmin
  2450. part[1::2] -= ymin
  2451. crop_segm.append(part.tolist())
  2452. elif isinstance(inter, Polygon):
  2453. crop_poly = np.squeeze(
  2454. np.array(inter.exterior.coords[:-1]).reshape(1,
  2455. -1))
  2456. crop_poly[0::2] -= xmin
  2457. crop_poly[1::2] -= ymin
  2458. crop_segm.append(crop_poly.tolist())
  2459. else:
  2460. continue
  2461. return crop_segm
  2462. def _crop_rle(rle, crop, height, width):
  2463. if 'counts' in rle and type(rle['counts']) == list:
  2464. rle = mask_util.frPyObjects(rle, height, width)
  2465. mask = mask_util.decode(rle)
  2466. mask = mask[crop[1]:crop[3], crop[0]:crop[2]]
  2467. rle = mask_util.encode(np.array(mask, order='F', dtype=np.uint8))
  2468. return rle
  2469. i, j, h, w = region
  2470. crop = [j, i, j + w, i + h]
  2471. height, width = image_shape
  2472. crop_segms = []
  2473. for segm in segms:
  2474. if is_poly(segm):
  2475. import copy
  2476. import shapely.ops
  2477. from shapely.geometry import Polygon, MultiPolygon, GeometryCollection
  2478. # Polygon format
  2479. crop_segms.append(_crop_poly(segm, crop))
  2480. else:
  2481. # RLE format
  2482. import pycocotools.mask as mask_util
  2483. crop_segms.append(_crop_rle(segm, crop, height, width))
  2484. return crop_segms
  2485. def apply(self, sample, context=None):
  2486. h = random.randint(self.min_size,
  2487. min(sample['image'].shape[0], self.max_size))
  2488. w = random.randint(self.min_size,
  2489. min(sample['image'].shape[1], self.max_size))
  2490. region = self.get_crop_params(sample['image'].shape[:2], [h, w])
  2491. return self.crop(sample, region)
  2492. @register_op
  2493. class WarpAffine(BaseOperator):
  2494. def __init__(self,
  2495. keep_res=False,
  2496. pad=31,
  2497. input_h=512,
  2498. input_w=512,
  2499. scale=0.4,
  2500. shift=0.1):
  2501. """WarpAffine
  2502. Warp affine the image
  2503. The code is based on https://github.com/xingyizhou/CenterNet/blob/master/src/lib/datasets/sample/ctdet.py
  2504. """
  2505. super(WarpAffine, self).__init__()
  2506. self.keep_res = keep_res
  2507. self.pad = pad
  2508. self.input_h = input_h
  2509. self.input_w = input_w
  2510. self.scale = scale
  2511. self.shift = shift
  2512. def apply(self, sample, context=None):
  2513. img = sample['image']
  2514. img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
  2515. if 'gt_bbox' in sample and len(sample['gt_bbox']) == 0:
  2516. return sample
  2517. h, w = img.shape[:2]
  2518. if self.keep_res:
  2519. input_h = (h | self.pad) + 1
  2520. input_w = (w | self.pad) + 1
  2521. s = np.array([input_w, input_h], dtype=np.float32)
  2522. c = np.array([w // 2, h // 2], dtype=np.float32)
  2523. else:
  2524. s = max(h, w) * 1.0
  2525. input_h, input_w = self.input_h, self.input_w
  2526. c = np.array([w / 2., h / 2.], dtype=np.float32)
  2527. trans_input = get_affine_transform(c, s, 0, [input_w, input_h])
  2528. img = cv2.resize(img, (w, h))
  2529. inp = cv2.warpAffine(
  2530. img, trans_input, (input_w, input_h), flags=cv2.INTER_LINEAR)
  2531. sample['image'] = inp
  2532. return sample
  2533. @register_op
  2534. class FlipWarpAffine(BaseOperator):
  2535. def __init__(self,
  2536. keep_res=False,
  2537. pad=31,
  2538. input_h=512,
  2539. input_w=512,
  2540. not_rand_crop=False,
  2541. scale=0.4,
  2542. shift=0.1,
  2543. flip=0.5,
  2544. is_scale=True,
  2545. use_random=True):
  2546. """FlipWarpAffine
  2547. 1. Random Crop
  2548. 2. Flip the image horizontal
  2549. 3. Warp affine the image
  2550. """
  2551. super(FlipWarpAffine, self).__init__()
  2552. self.keep_res = keep_res
  2553. self.pad = pad
  2554. self.input_h = input_h
  2555. self.input_w = input_w
  2556. self.not_rand_crop = not_rand_crop
  2557. self.scale = scale
  2558. self.shift = shift
  2559. self.flip = flip
  2560. self.is_scale = is_scale
  2561. self.use_random = use_random
  2562. def apply(self, sample, context=None):
  2563. img = sample['image']
  2564. img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
  2565. if 'gt_bbox' in sample and len(sample['gt_bbox']) == 0:
  2566. return sample
  2567. h, w = img.shape[:2]
  2568. if self.keep_res:
  2569. input_h = (h | self.pad) + 1
  2570. input_w = (w | self.pad) + 1
  2571. s = np.array([input_w, input_h], dtype=np.float32)
  2572. c = np.array([w // 2, h // 2], dtype=np.float32)
  2573. else:
  2574. s = max(h, w) * 1.0
  2575. input_h, input_w = self.input_h, self.input_w
  2576. c = np.array([w / 2., h / 2.], dtype=np.float32)
  2577. if self.use_random:
  2578. gt_bbox = sample['gt_bbox']
  2579. if not self.not_rand_crop:
  2580. s = s * np.random.choice(np.arange(0.6, 1.4, 0.1))
  2581. w_border = get_border(128, w)
  2582. h_border = get_border(128, h)
  2583. c[0] = np.random.randint(low=w_border, high=w - w_border)
  2584. c[1] = np.random.randint(low=h_border, high=h - h_border)
  2585. else:
  2586. sf = self.scale
  2587. cf = self.shift
  2588. c[0] += s * np.clip(np.random.randn() * cf, -2 * cf, 2 * cf)
  2589. c[1] += s * np.clip(np.random.randn() * cf, -2 * cf, 2 * cf)
  2590. s = s * np.clip(np.random.randn() * sf + 1, 1 - sf, 1 + sf)
  2591. if np.random.random() < self.flip:
  2592. img = img[:, ::-1, :]
  2593. c[0] = w - c[0] - 1
  2594. oldx1 = gt_bbox[:, 0].copy()
  2595. oldx2 = gt_bbox[:, 2].copy()
  2596. gt_bbox[:, 0] = w - oldx2 - 1
  2597. gt_bbox[:, 2] = w - oldx1 - 1
  2598. sample['gt_bbox'] = gt_bbox
  2599. trans_input = get_affine_transform(c, s, 0, [input_w, input_h])
  2600. if not self.use_random:
  2601. img = cv2.resize(img, (w, h))
  2602. inp = cv2.warpAffine(
  2603. img, trans_input, (input_w, input_h), flags=cv2.INTER_LINEAR)
  2604. if self.is_scale:
  2605. inp = (inp.astype(np.float32) / 255.)
  2606. sample['image'] = inp
  2607. sample['center'] = c
  2608. sample['scale'] = s
  2609. return sample
  2610. @register_op
  2611. class CenterRandColor(BaseOperator):
  2612. """Random color for CenterNet series models.
  2613. Args:
  2614. saturation (float): saturation settings.
  2615. contrast (float): contrast settings.
  2616. brightness (float): brightness settings.
  2617. """
  2618. def __init__(self, saturation=0.4, contrast=0.4, brightness=0.4):
  2619. super(CenterRandColor, self).__init__()
  2620. self.saturation = saturation
  2621. self.contrast = contrast
  2622. self.brightness = brightness
  2623. def apply_saturation(self, img, img_gray):
  2624. alpha = 1. + np.random.uniform(
  2625. low=-self.saturation, high=self.saturation)
  2626. self._blend(alpha, img, img_gray[:, :, None])
  2627. return img
  2628. def apply_contrast(self, img, img_gray):
  2629. alpha = 1. + np.random.uniform(low=-self.contrast, high=self.contrast)
  2630. img_mean = img_gray.mean()
  2631. self._blend(alpha, img, img_mean)
  2632. return img
  2633. def apply_brightness(self, img, img_gray):
  2634. alpha = 1 + np.random.uniform(
  2635. low=-self.brightness, high=self.brightness)
  2636. img *= alpha
  2637. return img
  2638. def _blend(self, alpha, img, img_mean):
  2639. img *= alpha
  2640. img_mean *= (1 - alpha)
  2641. img += img_mean
  2642. def __call__(self, sample, context=None):
  2643. img = sample['image']
  2644. img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
  2645. functions = [
  2646. self.apply_brightness,
  2647. self.apply_contrast,
  2648. self.apply_saturation,
  2649. ]
  2650. distortions = np.random.permutation(functions)
  2651. for func in distortions:
  2652. img = func(img, img_gray)
  2653. sample['image'] = img
  2654. return sample
  2655. @register_op
  2656. class Mosaic(BaseOperator):
  2657. """ Mosaic operator for image and gt_bboxes
  2658. The code is based on https://github.com/Megvii-BaseDetection/YOLOX/blob/main/yolox/data/datasets/mosaicdetection.py
  2659. 1. get mosaic coords
  2660. 2. clip bbox and get mosaic_labels
  2661. 3. random_affine augment
  2662. 4. Mixup augment as copypaste (optinal), not used in tiny/nano
  2663. Args:
  2664. prob (float): probability of using Mosaic, 1.0 as default
  2665. input_dim (list[int]): input shape
  2666. degrees (list[2]): the rotate range to apply, transform range is [min, max]
  2667. translate (list[2]): the translate range to apply, transform range is [min, max]
  2668. scale (list[2]): the scale range to apply, transform range is [min, max]
  2669. shear (list[2]): the shear range to apply, transform range is [min, max]
  2670. enable_mixup (bool): whether to enable Mixup or not
  2671. mixup_prob (float): probability of using Mixup, 1.0 as default
  2672. mixup_scale (list[int]): scale range of Mixup
  2673. remove_outside_box (bool): whether remove outside boxes, False as
  2674. default in COCO dataset, True in MOT dataset
  2675. """
  2676. def __init__(self,
  2677. prob=1.0,
  2678. input_dim=[640, 640],
  2679. degrees=[-10, 10],
  2680. translate=[-0.1, 0.1],
  2681. scale=[0.1, 2],
  2682. shear=[-2, 2],
  2683. enable_mixup=True,
  2684. mixup_prob=1.0,
  2685. mixup_scale=[0.5, 1.5],
  2686. remove_outside_box=False):
  2687. super(Mosaic, self).__init__()
  2688. self.prob = prob
  2689. if isinstance(input_dim, Integral):
  2690. input_dim = [input_dim, input_dim]
  2691. self.input_dim = input_dim
  2692. self.degrees = degrees
  2693. self.translate = translate
  2694. self.scale = scale
  2695. self.shear = shear
  2696. self.enable_mixup = enable_mixup
  2697. self.mixup_prob = mixup_prob
  2698. self.mixup_scale = mixup_scale
  2699. self.remove_outside_box = remove_outside_box
  2700. def get_mosaic_coords(self, mosaic_idx, xc, yc, w, h, input_h, input_w):
  2701. # (x1, y1, x2, y2) means coords in large image,
  2702. # small_coords means coords in small image in mosaic aug.
  2703. if mosaic_idx == 0:
  2704. # top left
  2705. x1, y1, x2, y2 = max(xc - w, 0), max(yc - h, 0), xc, yc
  2706. small_coords = w - (x2 - x1), h - (y2 - y1), w, h
  2707. elif mosaic_idx == 1:
  2708. # top right
  2709. x1, y1, x2, y2 = xc, max(yc - h, 0), min(xc + w, input_w * 2), yc
  2710. small_coords = 0, h - (y2 - y1), min(w, x2 - x1), h
  2711. elif mosaic_idx == 2:
  2712. # bottom left
  2713. x1, y1, x2, y2 = max(xc - w, 0), yc, xc, min(input_h * 2, yc + h)
  2714. small_coords = w - (x2 - x1), 0, w, min(y2 - y1, h)
  2715. elif mosaic_idx == 3:
  2716. # bottom right
  2717. x1, y1, x2, y2 = xc, yc, min(xc + w, input_w * 2), min(input_h * 2,
  2718. yc + h)
  2719. small_coords = 0, 0, min(w, x2 - x1), min(y2 - y1, h)
  2720. return (x1, y1, x2, y2), small_coords
  2721. def random_affine_augment(self,
  2722. img,
  2723. labels=[],
  2724. input_dim=[640, 640],
  2725. degrees=[-10, 10],
  2726. scales=[0.1, 2],
  2727. shears=[-2, 2],
  2728. translates=[-0.1, 0.1]):
  2729. # random rotation and scale
  2730. degree = random.uniform(degrees[0], degrees[1])
  2731. scale = random.uniform(scales[0], scales[1])
  2732. assert scale > 0, "Argument scale should be positive."
  2733. R = cv2.getRotationMatrix2D(angle=degree, center=(0, 0), scale=scale)
  2734. M = np.ones([2, 3])
  2735. # random shear
  2736. shear = random.uniform(shears[0], shears[1])
  2737. shear_x = math.tan(shear * math.pi / 180)
  2738. shear_y = math.tan(shear * math.pi / 180)
  2739. M[0] = R[0] + shear_y * R[1]
  2740. M[1] = R[1] + shear_x * R[0]
  2741. # random translation
  2742. translate = random.uniform(translates[0], translates[1])
  2743. translation_x = translate * input_dim[0]
  2744. translation_y = translate * input_dim[1]
  2745. M[0, 2] = translation_x
  2746. M[1, 2] = translation_y
  2747. # warpAffine
  2748. img = cv2.warpAffine(
  2749. img, M, dsize=tuple(input_dim), borderValue=(114, 114, 114))
  2750. num_gts = len(labels)
  2751. if num_gts > 0:
  2752. # warp corner points
  2753. corner_points = np.ones((4 * num_gts, 3))
  2754. corner_points[:, :2] = labels[:, [0, 1, 2, 3, 0, 3, 2, 1]].reshape(
  2755. 4 * num_gts, 2) # x1y1, x2y2, x1y2, x2y1
  2756. # apply affine transform
  2757. corner_points = corner_points @M.T
  2758. corner_points = corner_points.reshape(num_gts, 8)
  2759. # create new boxes
  2760. corner_xs = corner_points[:, 0::2]
  2761. corner_ys = corner_points[:, 1::2]
  2762. new_bboxes = np.concatenate((corner_xs.min(1), corner_ys.min(1),
  2763. corner_xs.max(1), corner_ys.max(1)))
  2764. new_bboxes = new_bboxes.reshape(4, num_gts).T
  2765. # clip boxes
  2766. new_bboxes[:, 0::2] = np.clip(new_bboxes[:, 0::2], 0, input_dim[0])
  2767. new_bboxes[:, 1::2] = np.clip(new_bboxes[:, 1::2], 0, input_dim[1])
  2768. labels[:, :4] = new_bboxes
  2769. return img, labels
  2770. def __call__(self, sample, context=None):
  2771. if not isinstance(sample, Sequence):
  2772. return sample
  2773. assert len(
  2774. sample
  2775. ) == 5, "Mosaic needs 5 samples, 4 for mosaic and 1 for mixup."
  2776. if np.random.uniform(0., 1.) > self.prob:
  2777. return sample[0]
  2778. mosaic_gt_bbox, mosaic_gt_class, mosaic_is_crowd = [], [], []
  2779. input_h, input_w = self.input_dim
  2780. yc = int(random.uniform(0.5 * input_h, 1.5 * input_h))
  2781. xc = int(random.uniform(0.5 * input_w, 1.5 * input_w))
  2782. mosaic_img = np.full(
  2783. (input_h * 2, input_w * 2, 3), 114, dtype=np.uint8)
  2784. # 1. get mosaic coords
  2785. for mosaic_idx, sp in enumerate(sample[:4]):
  2786. img = sp['image']
  2787. gt_bbox = sp['gt_bbox']
  2788. h0, w0 = img.shape[:2]
  2789. scale = min(1. * input_h / h0, 1. * input_w / w0)
  2790. img = cv2.resize(
  2791. img, (int(w0 * scale), int(h0 * scale)),
  2792. interpolation=cv2.INTER_LINEAR)
  2793. (h, w, c) = img.shape[:3]
  2794. # suffix l means large image, while s means small image in mosaic aug.
  2795. (l_x1, l_y1, l_x2, l_y2), (
  2796. s_x1, s_y1, s_x2, s_y2) = self.get_mosaic_coords(
  2797. mosaic_idx, xc, yc, w, h, input_h, input_w)
  2798. mosaic_img[l_y1:l_y2, l_x1:l_x2] = img[s_y1:s_y2, s_x1:s_x2]
  2799. padw, padh = l_x1 - s_x1, l_y1 - s_y1
  2800. # Normalized xywh to pixel xyxy format
  2801. _gt_bbox = gt_bbox.copy()
  2802. if len(gt_bbox) > 0:
  2803. _gt_bbox[:, 0] = scale * gt_bbox[:, 0] + padw
  2804. _gt_bbox[:, 1] = scale * gt_bbox[:, 1] + padh
  2805. _gt_bbox[:, 2] = scale * gt_bbox[:, 2] + padw
  2806. _gt_bbox[:, 3] = scale * gt_bbox[:, 3] + padh
  2807. is_crowd = sp['is_crowd'] if 'is_crowd' in sp else np.zeros(
  2808. (len(_gt_bbox), 1), dtype=np.int32)
  2809. mosaic_gt_bbox.append(_gt_bbox)
  2810. mosaic_gt_class.append(sp['gt_class'])
  2811. mosaic_is_crowd.append(is_crowd)
  2812. # 2. clip bbox and get mosaic_labels([gt_bbox, gt_class, is_crowd])
  2813. if len(mosaic_gt_bbox):
  2814. mosaic_gt_bbox = np.concatenate(mosaic_gt_bbox, 0)
  2815. mosaic_gt_class = np.concatenate(mosaic_gt_class, 0)
  2816. mosaic_is_crowd = np.concatenate(mosaic_is_crowd, 0)
  2817. mosaic_labels = np.concatenate([
  2818. mosaic_gt_bbox, mosaic_gt_class.astype(mosaic_gt_bbox.dtype),
  2819. mosaic_is_crowd.astype(mosaic_gt_bbox.dtype)
  2820. ], 1)
  2821. if self.remove_outside_box:
  2822. # for MOT dataset
  2823. flag1 = mosaic_gt_bbox[:, 0] < 2 * input_w
  2824. flag2 = mosaic_gt_bbox[:, 2] > 0
  2825. flag3 = mosaic_gt_bbox[:, 1] < 2 * input_h
  2826. flag4 = mosaic_gt_bbox[:, 3] > 0
  2827. flag_all = flag1 * flag2 * flag3 * flag4
  2828. mosaic_labels = mosaic_labels[flag_all]
  2829. else:
  2830. mosaic_labels[:, 0] = np.clip(mosaic_labels[:, 0], 0,
  2831. 2 * input_w)
  2832. mosaic_labels[:, 1] = np.clip(mosaic_labels[:, 1], 0,
  2833. 2 * input_h)
  2834. mosaic_labels[:, 2] = np.clip(mosaic_labels[:, 2], 0,
  2835. 2 * input_w)
  2836. mosaic_labels[:, 3] = np.clip(mosaic_labels[:, 3], 0,
  2837. 2 * input_h)
  2838. else:
  2839. mosaic_labels = np.zeros((1, 6))
  2840. # 3. random_affine augment
  2841. mosaic_img, mosaic_labels = self.random_affine_augment(
  2842. mosaic_img,
  2843. mosaic_labels,
  2844. input_dim=self.input_dim,
  2845. degrees=self.degrees,
  2846. translates=self.translate,
  2847. scales=self.scale,
  2848. shears=self.shear)
  2849. # 4. Mixup augment as copypaste, https://arxiv.org/abs/2012.07177
  2850. # optinal, not used(enable_mixup=False) in tiny/nano
  2851. if (self.enable_mixup and not len(mosaic_labels) == 0 and
  2852. random.random() < self.mixup_prob):
  2853. sample_mixup = sample[4]
  2854. mixup_img = sample_mixup['image']
  2855. cp_labels = np.concatenate([
  2856. sample_mixup['gt_bbox'],
  2857. sample_mixup['gt_class'].astype(mosaic_labels.dtype),
  2858. sample_mixup['is_crowd'].astype(mosaic_labels.dtype)
  2859. ], 1)
  2860. mosaic_img, mosaic_labels = self.mixup_augment(
  2861. mosaic_img, mosaic_labels, self.input_dim, cp_labels,
  2862. mixup_img)
  2863. sample0 = sample[0]
  2864. sample0['image'] = mosaic_img.astype(np.uint8) # can not be float32
  2865. sample0['h'] = float(mosaic_img.shape[0])
  2866. sample0['w'] = float(mosaic_img.shape[1])
  2867. sample0['im_shape'][0] = sample0['h']
  2868. sample0['im_shape'][1] = sample0['w']
  2869. sample0['gt_bbox'] = mosaic_labels[:, :4].astype(np.float32)
  2870. sample0['gt_class'] = mosaic_labels[:, 4:5].astype(np.float32)
  2871. sample0['is_crowd'] = mosaic_labels[:, 5:6].astype(np.float32)
  2872. return sample0
  2873. def mixup_augment(self, origin_img, origin_labels, input_dim, cp_labels,
  2874. img):
  2875. jit_factor = random.uniform(*self.mixup_scale)
  2876. FLIP = random.uniform(0, 1) > 0.5
  2877. if len(img.shape) == 3:
  2878. cp_img = np.ones(
  2879. (input_dim[0], input_dim[1], 3), dtype=np.uint8) * 114
  2880. else:
  2881. cp_img = np.ones(input_dim, dtype=np.uint8) * 114
  2882. cp_scale_ratio = min(input_dim[0] / img.shape[0],
  2883. input_dim[1] / img.shape[1])
  2884. resized_img = cv2.resize(
  2885. img, (int(img.shape[1] * cp_scale_ratio),
  2886. int(img.shape[0] * cp_scale_ratio)),
  2887. interpolation=cv2.INTER_LINEAR)
  2888. cp_img[:int(img.shape[0] * cp_scale_ratio), :int(img.shape[
  2889. 1] * cp_scale_ratio)] = resized_img
  2890. cp_img = cv2.resize(cp_img, (int(cp_img.shape[1] * jit_factor),
  2891. int(cp_img.shape[0] * jit_factor)))
  2892. cp_scale_ratio *= jit_factor
  2893. if FLIP:
  2894. cp_img = cp_img[:, ::-1, :]
  2895. origin_h, origin_w = cp_img.shape[:2]
  2896. target_h, target_w = origin_img.shape[:2]
  2897. padded_img = np.zeros(
  2898. (max(origin_h, target_h), max(origin_w, target_w), 3),
  2899. dtype=np.uint8)
  2900. padded_img[:origin_h, :origin_w] = cp_img
  2901. x_offset, y_offset = 0, 0
  2902. if padded_img.shape[0] > target_h:
  2903. y_offset = random.randint(0, padded_img.shape[0] - target_h - 1)
  2904. if padded_img.shape[1] > target_w:
  2905. x_offset = random.randint(0, padded_img.shape[1] - target_w - 1)
  2906. padded_cropped_img = padded_img[y_offset:y_offset + target_h, x_offset:
  2907. x_offset + target_w]
  2908. # adjust boxes
  2909. cp_bboxes_origin_np = cp_labels[:, :4].copy()
  2910. cp_bboxes_origin_np[:, 0::2] = np.clip(cp_bboxes_origin_np[:, 0::2] *
  2911. cp_scale_ratio, 0, origin_w)
  2912. cp_bboxes_origin_np[:, 1::2] = np.clip(cp_bboxes_origin_np[:, 1::2] *
  2913. cp_scale_ratio, 0, origin_h)
  2914. if FLIP:
  2915. cp_bboxes_origin_np[:, 0::2] = (
  2916. origin_w - cp_bboxes_origin_np[:, 0::2][:, ::-1])
  2917. cp_bboxes_transformed_np = cp_bboxes_origin_np.copy()
  2918. if self.remove_outside_box:
  2919. # for MOT dataset
  2920. cp_bboxes_transformed_np[:, 0::2] -= x_offset
  2921. cp_bboxes_transformed_np[:, 1::2] -= y_offset
  2922. else:
  2923. cp_bboxes_transformed_np[:, 0::2] = np.clip(
  2924. cp_bboxes_transformed_np[:, 0::2] - x_offset, 0, target_w)
  2925. cp_bboxes_transformed_np[:, 1::2] = np.clip(
  2926. cp_bboxes_transformed_np[:, 1::2] - y_offset, 0, target_h)
  2927. cls_labels = cp_labels[:, 4:5].copy()
  2928. crd_labels = cp_labels[:, 5:6].copy()
  2929. box_labels = cp_bboxes_transformed_np
  2930. labels = np.hstack((box_labels, cls_labels, crd_labels))
  2931. if self.remove_outside_box:
  2932. labels = labels[labels[:, 0] < target_w]
  2933. labels = labels[labels[:, 2] > 0]
  2934. labels = labels[labels[:, 1] < target_h]
  2935. labels = labels[labels[:, 3] > 0]
  2936. origin_labels = np.vstack((origin_labels, labels))
  2937. origin_img = origin_img.astype(np.float32)
  2938. origin_img = 0.5 * origin_img + 0.5 * padded_cropped_img.astype(
  2939. np.float32)
  2940. return origin_img.astype(np.uint8), origin_labels
  2941. @register_op
  2942. class PadResize(BaseOperator):
  2943. """ PadResize for image and gt_bbbox
  2944. Args:
  2945. target_size (list[int]): input shape
  2946. fill_value (float): pixel value of padded image
  2947. """
  2948. def __init__(self, target_size, fill_value=114):
  2949. super(PadResize, self).__init__()
  2950. if isinstance(target_size, Integral):
  2951. target_size = [target_size, target_size]
  2952. self.target_size = target_size
  2953. self.fill_value = fill_value
  2954. def _resize(self, img, bboxes, labels):
  2955. ratio = min(self.target_size[0] / img.shape[0],
  2956. self.target_size[1] / img.shape[1])
  2957. w, h = int(img.shape[1] * ratio), int(img.shape[0] * ratio)
  2958. resized_img = cv2.resize(img, (w, h), interpolation=cv2.INTER_LINEAR)
  2959. if len(bboxes) > 0:
  2960. bboxes *= ratio
  2961. mask = np.minimum(bboxes[:, 2] - bboxes[:, 0],
  2962. bboxes[:, 3] - bboxes[:, 1]) > 1
  2963. bboxes = bboxes[mask]
  2964. labels = labels[mask]
  2965. return resized_img, bboxes, labels
  2966. def _pad(self, img):
  2967. h, w, _ = img.shape
  2968. if h == self.target_size[0] and w == self.target_size[1]:
  2969. return img
  2970. padded_img = np.full(
  2971. (self.target_size[0], self.target_size[1], 3),
  2972. self.fill_value,
  2973. dtype=np.uint8)
  2974. padded_img[:h, :w] = img
  2975. return padded_img
  2976. def apply(self, sample, context=None):
  2977. image = sample['image']
  2978. bboxes = sample['gt_bbox']
  2979. labels = sample['gt_class']
  2980. image, bboxes, labels = self._resize(image, bboxes, labels)
  2981. sample['image'] = self._pad(image).astype(np.float32)
  2982. sample['gt_bbox'] = bboxes
  2983. sample['gt_class'] = labels
  2984. return sample