det_transforms.py 54 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175
  1. # copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. try:
  15. from collections.abc import Sequence
  16. except Exception:
  17. from collections import Sequence
  18. import random
  19. import os.path as osp
  20. import numpy as np
  21. import cv2
  22. from PIL import Image, ImageEnhance
  23. from .ops import *
  24. from .box_utils import *
  25. class Compose:
  26. """根据数据预处理/增强列表对输入数据进行操作。
  27. 所有操作的输入图像流形状均是[H, W, C],其中H为图像高,W为图像宽,C为图像通道数。
  28. Args:
  29. transforms (list): 数据预处理/增强列表。
  30. Raises:
  31. TypeError: 形参数据类型不满足需求。
  32. ValueError: 数据长度不匹配。
  33. """
  34. def __init__(self, transforms):
  35. if not isinstance(transforms, list):
  36. raise TypeError('The transforms must be a list!')
  37. if len(transforms) < 1:
  38. raise ValueError('The length of transforms ' + \
  39. 'must be equal or larger than 1!')
  40. self.transforms = transforms
  41. self.use_mixup = False
  42. for t in self.transforms:
  43. if t.__class__.__name__ == 'MixupImage':
  44. self.use_mixup = True
  45. def __call__(self, im, im_info=None, label_info=None):
  46. """
  47. Args:
  48. im (str/np.ndarray): 图像路径/图像np.ndarray数据。
  49. im_info (dict): 存储与图像相关的信息,dict中的字段如下:
  50. - im_id (np.ndarray): 图像序列号,形状为(1,)。
  51. - image_shape (np.ndarray): 图像原始大小,形状为(2,),
  52. image_shape[0]为高,image_shape[1]为宽。
  53. - mixup (list): list为[im, im_info, label_info],分别对应
  54. 与当前图像进行mixup的图像np.ndarray数据、图像相关信息、标注框相关信息;
  55. 注意,当前epoch若无需进行mixup,则无该字段。
  56. label_info (dict): 存储与标注框相关的信息,dict中的字段如下:
  57. - gt_bbox (np.ndarray): 真实标注框坐标[x1, y1, x2, y2],形状为(n, 4),
  58. 其中n代表真实标注框的个数。
  59. - gt_class (np.ndarray): 每个真实标注框对应的类别序号,形状为(n, 1),
  60. 其中n代表真实标注框的个数。
  61. - gt_score (np.ndarray): 每个真实标注框对应的混合得分,形状为(n, 1),
  62. 其中n代表真实标注框的个数。
  63. - gt_poly (list): 每个真实标注框内的多边形分割区域,每个分割区域由点的x、y坐标组成,
  64. 长度为n,其中n代表真实标注框的个数。
  65. - is_crowd (np.ndarray): 每个真实标注框中是否是一组对象,形状为(n, 1),
  66. 其中n代表真实标注框的个数。
  67. - difficult (np.ndarray): 每个真实标注框中的对象是否为难识别对象,形状为(n, 1),
  68. 其中n代表真实标注框的个数。
  69. Returns:
  70. tuple: 根据网络所需字段所组成的tuple;
  71. 字段由transforms中的最后一个数据预处理操作决定。
  72. """
  73. def decode_image(im_file, im_info, label_info):
  74. if im_info is None:
  75. im_info = dict()
  76. try:
  77. im = cv2.imread(im_file).astype('float32')
  78. except:
  79. raise TypeError(
  80. 'Can\'t read The image file {}!'.format(im_file))
  81. im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
  82. # make default im_info with [h, w, 1]
  83. im_info['im_resize_info'] = np.array(
  84. [im.shape[0], im.shape[1], 1.], dtype=np.float32)
  85. if not self.use_mixup:
  86. if 'mixup' in im_info:
  87. del im_info['mixup']
  88. # decode mixup image
  89. if 'mixup' in im_info:
  90. im_info['mixup'] = \
  91. decode_image(im_info['mixup'][0],
  92. im_info['mixup'][1],
  93. im_info['mixup'][2])
  94. if label_info is None:
  95. return (im, im_info)
  96. else:
  97. return (im, im_info, label_info)
  98. outputs = decode_image(im, im_info, label_info)
  99. im = outputs[0]
  100. im_info = outputs[1]
  101. if len(outputs) == 3:
  102. label_info = outputs[2]
  103. for op in self.transforms:
  104. if im is None:
  105. return None
  106. outputs = op(im, im_info, label_info)
  107. im = outputs[0]
  108. return outputs
  109. class ResizeByShort:
  110. """根据图像的短边调整图像大小(resize)。
  111. 1. 获取图像的长边和短边长度。
  112. 2. 根据短边与short_size的比例,计算长边的目标长度,
  113. 此时高、宽的resize比例为short_size/原图短边长度。
  114. 3. 如果max_size>0,调整resize比例:
  115. 如果长边的目标长度>max_size,则高、宽的resize比例为max_size/原图长边长度。
  116. 4. 根据调整大小的比例对图像进行resize。
  117. Args:
  118. target_size (int): 短边目标长度。默认为800。
  119. max_size (int): 长边目标长度的最大限制。默认为1333。
  120. Raises:
  121. TypeError: 形参数据类型不满足需求。
  122. """
  123. def __init__(self, short_size=800, max_size=1333):
  124. self.max_size = int(max_size)
  125. if not isinstance(short_size, int):
  126. raise TypeError(
  127. "Type of short_size is invalid. Must be Integer, now is {}".
  128. format(type(short_size)))
  129. self.short_size = short_size
  130. if not (isinstance(self.max_size, int)):
  131. raise TypeError("max_size: input type is invalid.")
  132. def __call__(self, im, im_info=None, label_info=None):
  133. """
  134. Args:
  135. im (numnp.ndarraypy): 图像np.ndarray数据。
  136. im_info (dict, 可选): 存储与图像相关的信息。
  137. label_info (dict, 可选): 存储与标注框相关的信息。
  138. Returns:
  139. tuple: 当label_info为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典;
  140. 当label_info不为空时,返回的tuple为(im, im_info, label_info),分别对应图像np.ndarray数据、
  141. 存储与标注框相关信息的字典。
  142. 其中,im_info更新字段为:
  143. - im_resize_info (np.ndarray): resize后的图像高、resize后的图像宽、resize后的图像相对原始图的缩放比例
  144. 三者组成的np.ndarray,形状为(3,)。
  145. Raises:
  146. TypeError: 形参数据类型不满足需求。
  147. ValueError: 数据长度不匹配。
  148. """
  149. if im_info is None:
  150. im_info = dict()
  151. if not isinstance(im, np.ndarray):
  152. raise TypeError("ResizeByShort: image type is not numpy.")
  153. if len(im.shape) != 3:
  154. raise ValueError('ResizeByShort: image is not 3-dimensional.')
  155. im_short_size = min(im.shape[0], im.shape[1])
  156. im_long_size = max(im.shape[0], im.shape[1])
  157. scale = float(self.short_size) / im_short_size
  158. if self.max_size > 0 and np.round(
  159. scale * im_long_size) > self.max_size:
  160. scale = float(self.max_size) / float(im_long_size)
  161. resized_width = int(round(im.shape[1] * scale))
  162. resized_height = int(round(im.shape[0] * scale))
  163. im_resize_info = [resized_height, resized_width, scale]
  164. im = cv2.resize(
  165. im, (resized_width, resized_height),
  166. interpolation=cv2.INTER_LINEAR)
  167. im_info['im_resize_info'] = np.array(im_resize_info).astype(np.float32)
  168. if label_info is None:
  169. return (im, im_info)
  170. else:
  171. return (im, im_info, label_info)
  172. class Padding:
  173. """将图像的长和宽padding至coarsest_stride的倍数。如输入图像为[300, 640],
  174. `coarest_stride`为32,则由于300不为32的倍数,因此在图像最右和最下使用0值
  175. 进行padding,最终输出图像为[320, 640]。
  176. 1. 如果coarsest_stride为1则直接返回。
  177. 2. 获取图像的高H、宽W。
  178. 3. 计算填充后图像的高H_new、宽W_new。
  179. 4. 构建大小为(H_new, W_new, 3)像素值为0的np.ndarray,
  180. 并将原图的np.ndarray粘贴于左上角。
  181. Args:
  182. coarsest_stride (int): 填充后的图像长、宽为该参数的倍数,默认为1。
  183. target_size (int|list): 填充后的图像长、宽,默认为None。
  184. """
  185. def __init__(self, coarsest_stride=1, target_size=None):
  186. self.coarsest_stride = coarsest_stride
  187. self.target_size = target_size
  188. def __call__(self, im, im_info=None, label_info=None):
  189. """
  190. Args:
  191. im (numnp.ndarraypy): 图像np.ndarray数据。
  192. im_info (dict, 可选): 存储与图像相关的信息。
  193. label_info (dict, 可选): 存储与标注框相关的信息。
  194. Returns:
  195. tuple: 当label_info为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典;
  196. 当label_info不为空时,返回的tuple为(im, im_info, label_info),分别对应图像np.ndarray数据、
  197. 存储与标注框相关信息的字典。
  198. Raises:
  199. TypeError: 形参数据类型不满足需求。
  200. ValueError: 数据长度不匹配。
  201. ValueError: coarsest_stride,target_size需有且只有一个被指定,coarset_stride优先级更高。
  202. ValueError: target_size小于原图的大小。
  203. """
  204. if self.coarsest_stride == 1 and self.target_size is None:
  205. if label_info is None:
  206. return (im, im_info)
  207. else:
  208. return (im, im_info, label_info)
  209. if im_info is None:
  210. im_info = dict()
  211. if not isinstance(im, np.ndarray):
  212. raise TypeError("Padding: image type is not numpy.")
  213. if len(im.shape) != 3:
  214. raise ValueError('Padding: image is not 3-dimensional.')
  215. im_h, im_w, im_c = im.shape[:]
  216. if self.coarsest_stride > 1:
  217. padding_im_h = int(
  218. np.ceil(im_h / self.coarsest_stride) * self.coarsest_stride)
  219. padding_im_w = int(
  220. np.ceil(im_w / self.coarsest_stride) * self.coarsest_stride)
  221. elif isinstance(self.target_size, int):
  222. padding_im_h = self.target_size
  223. padding_im_w = self.target_size
  224. elif isinstance(self.target_size, list):
  225. padding_im_w = self.target_size[0]
  226. padding_im_h = self.target_size[1]
  227. else:
  228. raise ValueError(
  229. "coarsest_stridei(>1) or target_size(list|int) need setting in Padding transform"
  230. )
  231. pad_height = padding_im_h - im_h
  232. pad_width = padding_im_w - im_w
  233. if pad_height < 0 or pad_width < 0:
  234. raise ValueError(
  235. 'the size of image should be less than target_size, but the size of image ({}, {}), is larger than target_size ({}, {})'
  236. .format(im_w, im_h, padding_im_w, padding_im_h))
  237. padding_im = np.zeros((padding_im_h, padding_im_w, im_c),
  238. dtype=np.float32)
  239. padding_im[:im_h, :im_w, :] = im
  240. if label_info is None:
  241. return (padding_im, im_info)
  242. else:
  243. return (padding_im, im_info, label_info)
  244. class Resize:
  245. """调整图像大小(resize)。
  246. - 当目标大小(target_size)类型为int时,根据插值方式,
  247. 将图像resize为[target_size, target_size]。
  248. - 当目标大小(target_size)类型为list或tuple时,根据插值方式,
  249. 将图像resize为target_size。
  250. 注意:当插值方式为“RANDOM”时,则随机选取一种插值方式进行resize。
  251. Args:
  252. target_size (int/list/tuple): 短边目标长度。默认为608。
  253. interp (str): resize的插值方式,与opencv的插值方式对应,取值范围为
  254. ['NEAREST', 'LINEAR', 'CUBIC', 'AREA', 'LANCZOS4', 'RANDOM']。默认为"LINEAR"。
  255. Raises:
  256. TypeError: 形参数据类型不满足需求。
  257. ValueError: 插值方式不在['NEAREST', 'LINEAR', 'CUBIC',
  258. 'AREA', 'LANCZOS4', 'RANDOM']中。
  259. """
  260. # The interpolation mode
  261. interp_dict = {
  262. 'NEAREST': cv2.INTER_NEAREST,
  263. 'LINEAR': cv2.INTER_LINEAR,
  264. 'CUBIC': cv2.INTER_CUBIC,
  265. 'AREA': cv2.INTER_AREA,
  266. 'LANCZOS4': cv2.INTER_LANCZOS4
  267. }
  268. def __init__(self, target_size=608, interp='LINEAR'):
  269. self.interp = interp
  270. if not (interp == "RANDOM" or interp in self.interp_dict):
  271. raise ValueError("interp should be one of {}".format(
  272. self.interp_dict.keys()))
  273. if isinstance(target_size, list) or isinstance(target_size, tuple):
  274. if len(target_size) != 2:
  275. raise TypeError(
  276. 'when target is list or tuple, it should include 2 elements, but it is {}'
  277. .format(target_size))
  278. elif not isinstance(target_size, int):
  279. raise TypeError(
  280. "Type of target_size is invalid. Must be Integer or List or tuple, now is {}"
  281. .format(type(target_size)))
  282. self.target_size = target_size
  283. def __call__(self, im, im_info=None, label_info=None):
  284. """
  285. Args:
  286. im (np.ndarray): 图像np.ndarray数据。
  287. im_info (dict, 可选): 存储与图像相关的信息。
  288. label_info (dict, 可选): 存储与标注框相关的信息。
  289. Returns:
  290. tuple: 当label_info为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典;
  291. 当label_info不为空时,返回的tuple为(im, im_info, label_info),分别对应图像np.ndarray数据、
  292. 存储与标注框相关信息的字典。
  293. Raises:
  294. TypeError: 形参数据类型不满足需求。
  295. ValueError: 数据长度不匹配。
  296. """
  297. if im_info is None:
  298. im_info = dict()
  299. if not isinstance(im, np.ndarray):
  300. raise TypeError("Resize: image type is not numpy.")
  301. if len(im.shape) != 3:
  302. raise ValueError('Resize: image is not 3-dimensional.')
  303. if self.interp == "RANDOM":
  304. interp = random.choice(list(self.interp_dict.keys()))
  305. else:
  306. interp = self.interp
  307. im = resize(im, self.target_size, self.interp_dict[interp])
  308. if label_info is None:
  309. return (im, im_info)
  310. else:
  311. return (im, im_info, label_info)
  312. class RandomHorizontalFlip:
  313. """随机翻转图像、标注框、分割信息,模型训练时的数据增强操作。
  314. 1. 随机采样一个0-1之间的小数,当小数小于水平翻转概率时,
  315. 执行2-4步操作,否则直接返回。
  316. 2. 水平翻转图像。
  317. 3. 计算翻转后的真实标注框的坐标,更新label_info中的gt_bbox信息。
  318. 4. 计算翻转后的真实分割区域的坐标,更新label_info中的gt_poly信息。
  319. Args:
  320. prob (float): 随机水平翻转的概率。默认为0.5。
  321. Raises:
  322. TypeError: 形参数据类型不满足需求。
  323. """
  324. def __init__(self, prob=0.5):
  325. self.prob = prob
  326. if not isinstance(self.prob, float):
  327. raise TypeError("RandomHorizontalFlip: input type is invalid.")
  328. def __call__(self, im, im_info=None, label_info=None):
  329. """
  330. Args:
  331. im (np.ndarray): 图像np.ndarray数据。
  332. im_info (dict, 可选): 存储与图像相关的信息。
  333. label_info (dict, 可选): 存储与标注框相关的信息。
  334. Returns:
  335. tuple: 当label_info为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典;
  336. 当label_info不为空时,返回的tuple为(im, im_info, label_info),分别对应图像np.ndarray数据、
  337. 存储与标注框相关信息的字典。
  338. 其中,im_info更新字段为:
  339. - gt_bbox (np.ndarray): 水平翻转后的标注框坐标[x1, y1, x2, y2],形状为(n, 4),
  340. 其中n代表真实标注框的个数。
  341. - gt_poly (list): 水平翻转后的多边形分割区域的x、y坐标,长度为n,
  342. 其中n代表真实标注框的个数。
  343. Raises:
  344. TypeError: 形参数据类型不满足需求。
  345. ValueError: 数据长度不匹配。
  346. """
  347. if not isinstance(im, np.ndarray):
  348. raise TypeError(
  349. "RandomHorizontalFlip: image is not a numpy array.")
  350. if len(im.shape) != 3:
  351. raise ValueError(
  352. "RandomHorizontalFlip: image is not 3-dimensional.")
  353. if im_info is None or label_info is None:
  354. raise TypeError(
  355. 'Cannot do RandomHorizontalFlip! ' +
  356. 'Becasuse the im_info and label_info can not be None!')
  357. if 'gt_bbox' not in label_info:
  358. raise TypeError('Cannot do RandomHorizontalFlip! ' + \
  359. 'Becasuse gt_bbox is not in label_info!')
  360. image_shape = im_info['image_shape']
  361. gt_bbox = label_info['gt_bbox']
  362. height = image_shape[0]
  363. width = image_shape[1]
  364. if np.random.uniform(0, 1) < self.prob:
  365. im = horizontal_flip(im)
  366. if gt_bbox.shape[0] == 0:
  367. if label_info is None:
  368. return (im, im_info)
  369. else:
  370. return (im, im_info, label_info)
  371. label_info['gt_bbox'] = box_horizontal_flip(gt_bbox, width)
  372. if 'gt_poly' in label_info and \
  373. len(label_info['gt_poly']) != 0:
  374. label_info['gt_poly'] = segms_horizontal_flip(
  375. label_info['gt_poly'], height, width)
  376. if label_info is None:
  377. return (im, im_info)
  378. else:
  379. return (im, im_info, label_info)
  380. class Normalize:
  381. """对图像进行标准化。
  382. 1. 归一化图像到到区间[0.0, 1.0]。
  383. 2. 对图像进行减均值除以标准差操作。
  384. Args:
  385. mean (list): 图像数据集的均值。默认为[0.485, 0.456, 0.406]。
  386. std (list): 图像数据集的标准差。默认为[0.229, 0.224, 0.225]。
  387. Raises:
  388. TypeError: 形参数据类型不满足需求。
  389. """
  390. def __init__(self, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]):
  391. self.mean = mean
  392. self.std = std
  393. if not (isinstance(self.mean, list) and isinstance(self.std, list)):
  394. raise TypeError("NormalizeImage: input type is invalid.")
  395. from functools import reduce
  396. if reduce(lambda x, y: x * y, self.std) == 0:
  397. raise TypeError('NormalizeImage: std is invalid!')
  398. def __call__(self, im, im_info=None, label_info=None):
  399. """
  400. Args:
  401. im (numnp.ndarraypy): 图像np.ndarray数据。
  402. im_info (dict, 可选): 存储与图像相关的信息。
  403. label_info (dict, 可选): 存储与标注框相关的信息。
  404. Returns:
  405. tuple: 当label_info为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典;
  406. 当label_info不为空时,返回的tuple为(im, im_info, label_info),分别对应图像np.ndarray数据、
  407. 存储与标注框相关信息的字典。
  408. """
  409. mean = np.array(self.mean)[np.newaxis, np.newaxis, :]
  410. std = np.array(self.std)[np.newaxis, np.newaxis, :]
  411. im = normalize(im, mean, std)
  412. if label_info is None:
  413. return (im, im_info)
  414. else:
  415. return (im, im_info, label_info)
  416. class RandomDistort:
  417. """以一定的概率对图像进行随机像素内容变换,模型训练时的数据增强操作
  418. 1. 对变换的操作顺序进行随机化操作。
  419. 2. 按照1中的顺序以一定的概率在范围[-range, range]对图像进行随机像素内容变换。
  420. Args:
  421. brightness_range (float): 明亮度因子的范围。默认为0.5。
  422. brightness_prob (float): 随机调整明亮度的概率。默认为0.5。
  423. contrast_range (float): 对比度因子的范围。默认为0.5。
  424. contrast_prob (float): 随机调整对比度的概率。默认为0.5。
  425. saturation_range (float): 饱和度因子的范围。默认为0.5。
  426. saturation_prob (float): 随机调整饱和度的概率。默认为0.5。
  427. hue_range (int): 色调因子的范围。默认为18。
  428. hue_prob (float): 随机调整色调的概率。默认为0.5。
  429. """
  430. def __init__(self,
  431. brightness_range=0.5,
  432. brightness_prob=0.5,
  433. contrast_range=0.5,
  434. contrast_prob=0.5,
  435. saturation_range=0.5,
  436. saturation_prob=0.5,
  437. hue_range=18,
  438. hue_prob=0.5):
  439. self.brightness_range = brightness_range
  440. self.brightness_prob = brightness_prob
  441. self.contrast_range = contrast_range
  442. self.contrast_prob = contrast_prob
  443. self.saturation_range = saturation_range
  444. self.saturation_prob = saturation_prob
  445. self.hue_range = hue_range
  446. self.hue_prob = hue_prob
  447. def __call__(self, im, im_info=None, label_info=None):
  448. """
  449. Args:
  450. im (np.ndarray): 图像np.ndarray数据。
  451. im_info (dict, 可选): 存储与图像相关的信息。
  452. label_info (dict, 可选): 存储与标注框相关的信息。
  453. Returns:
  454. tuple: 当label_info为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典;
  455. 当label_info不为空时,返回的tuple为(im, im_info, label_info),分别对应图像np.ndarray数据、
  456. 存储与标注框相关信息的字典。
  457. """
  458. brightness_lower = 1 - self.brightness_range
  459. brightness_upper = 1 + self.brightness_range
  460. contrast_lower = 1 - self.contrast_range
  461. contrast_upper = 1 + self.contrast_range
  462. saturation_lower = 1 - self.saturation_range
  463. saturation_upper = 1 + self.saturation_range
  464. hue_lower = -self.hue_range
  465. hue_upper = self.hue_range
  466. ops = [brightness, contrast, saturation, hue]
  467. random.shuffle(ops)
  468. params_dict = {
  469. 'brightness': {
  470. 'brightness_lower': brightness_lower,
  471. 'brightness_upper': brightness_upper
  472. },
  473. 'contrast': {
  474. 'contrast_lower': contrast_lower,
  475. 'contrast_upper': contrast_upper
  476. },
  477. 'saturation': {
  478. 'saturation_lower': saturation_lower,
  479. 'saturation_upper': saturation_upper
  480. },
  481. 'hue': {
  482. 'hue_lower': hue_lower,
  483. 'hue_upper': hue_upper
  484. }
  485. }
  486. prob_dict = {
  487. 'brightness': self.brightness_prob,
  488. 'contrast': self.contrast_prob,
  489. 'saturation': self.saturation_prob,
  490. 'hue': self.hue_prob
  491. }
  492. for id in range(4):
  493. params = params_dict[ops[id].__name__]
  494. prob = prob_dict[ops[id].__name__]
  495. params['im'] = im
  496. if np.random.uniform(0, 1) < prob:
  497. im = ops[id](**params)
  498. if label_info is None:
  499. return (im, im_info)
  500. else:
  501. return (im, im_info, label_info)
  502. class MixupImage:
  503. """对图像进行mixup操作,模型训练时的数据增强操作,目前仅YOLOv3模型支持该transform。
  504. 当label_info中不存在mixup字段时,直接返回,否则进行下述操作:
  505. 1. 从随机beta分布中抽取出随机因子factor。
  506. 2.
  507. - 当factor>=1.0时,去除label_info中的mixup字段,直接返回。
  508. - 当factor<=0.0时,直接返回label_info中的mixup字段,并在label_info中去除该字段。
  509. - 其余情况,执行下述操作:
  510. (1)原图像乘以factor,mixup图像乘以(1-factor),叠加2个结果。
  511. (2)拼接原图像标注框和mixup图像标注框。
  512. (3)拼接原图像标注框类别和mixup图像标注框类别。
  513. (4)原图像标注框混合得分乘以factor,mixup图像标注框混合得分乘以(1-factor),叠加2个结果。
  514. 3. 更新im_info中的image_shape信息。
  515. Args:
  516. alpha (float): 随机beta分布的下限。默认为1.5。
  517. beta (float): 随机beta分布的上限。默认为1.5。
  518. mixup_epoch (int): 在前mixup_epoch轮使用mixup增强操作;当该参数为-1时,该策略不会生效。
  519. 默认为-1。
  520. Raises:
  521. ValueError: 数据长度不匹配。
  522. """
  523. def __init__(self, alpha=1.5, beta=1.5, mixup_epoch=-1):
  524. self.alpha = alpha
  525. self.beta = beta
  526. if self.alpha <= 0.0:
  527. raise ValueError("alpha shold be positive in MixupImage")
  528. if self.beta <= 0.0:
  529. raise ValueError("beta shold be positive in MixupImage")
  530. self.mixup_epoch = mixup_epoch
  531. def _mixup_img(self, img1, img2, factor):
  532. h = max(img1.shape[0], img2.shape[0])
  533. w = max(img1.shape[1], img2.shape[1])
  534. img = np.zeros((h, w, img1.shape[2]), 'float32')
  535. img[:img1.shape[0], :img1.shape[1], :] = \
  536. img1.astype('float32') * factor
  537. img[:img2.shape[0], :img2.shape[1], :] += \
  538. img2.astype('float32') * (1.0 - factor)
  539. return img.astype('float32')
  540. def __call__(self, im, im_info=None, label_info=None):
  541. """
  542. Args:
  543. im (np.ndarray): 图像np.ndarray数据。
  544. im_info (dict, 可选): 存储与图像相关的信息。
  545. label_info (dict, 可选): 存储与标注框相关的信息。
  546. Returns:
  547. tuple: 当label_info为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典;
  548. 当label_info不为空时,返回的tuple为(im, im_info, label_info),分别对应图像np.ndarray数据、
  549. 存储与标注框相关信息的字典。
  550. 其中,im_info更新字段为:
  551. - image_shape (np.ndarray): mixup后的图像高、宽二者组成的np.ndarray,形状为(2,)。
  552. im_info删除的字段:
  553. - mixup (list): 与当前字段进行mixup的图像相关信息。
  554. label_info更新字段为:
  555. - gt_bbox (np.ndarray): mixup后真实标注框坐标,形状为(n, 4),
  556. 其中n代表真实标注框的个数。
  557. - gt_class (np.ndarray): mixup后每个真实标注框对应的类别序号,形状为(n, 1),
  558. 其中n代表真实标注框的个数。
  559. - gt_score (np.ndarray): mixup后每个真实标注框对应的混合得分,形状为(n, 1),
  560. 其中n代表真实标注框的个数。
  561. Raises:
  562. TypeError: 形参数据类型不满足需求。
  563. """
  564. if im_info is None:
  565. raise TypeError('Cannot do MixupImage! ' +
  566. 'Becasuse the im_info can not be None!')
  567. if 'mixup' not in im_info:
  568. if label_info is None:
  569. return (im, im_info)
  570. else:
  571. return (im, im_info, label_info)
  572. factor = np.random.beta(self.alpha, self.beta)
  573. factor = max(0.0, min(1.0, factor))
  574. if im_info['epoch'] > self.mixup_epoch \
  575. or factor >= 1.0:
  576. im_info.pop('mixup')
  577. if label_info is None:
  578. return (im, im_info)
  579. else:
  580. return (im, im_info, label_info)
  581. if factor <= 0.0:
  582. return im_info.pop('mixup')
  583. im = self._mixup_img(im, im_info['mixup'][0], factor)
  584. if label_info is None:
  585. raise TypeError('Cannot do MixupImage! ' +
  586. 'Becasuse the label_info can not be None!')
  587. if 'gt_bbox' not in label_info or \
  588. 'gt_class' not in label_info or \
  589. 'gt_score' not in label_info:
  590. raise TypeError('Cannot do MixupImage! ' + \
  591. 'Becasuse gt_bbox/gt_class/gt_score is not in label_info!')
  592. gt_bbox1 = label_info['gt_bbox']
  593. gt_bbox2 = im_info['mixup'][2]['gt_bbox']
  594. gt_bbox = np.concatenate((gt_bbox1, gt_bbox2), axis=0)
  595. gt_class1 = label_info['gt_class']
  596. gt_class2 = im_info['mixup'][2]['gt_class']
  597. gt_class = np.concatenate((gt_class1, gt_class2), axis=0)
  598. gt_score1 = label_info['gt_score']
  599. gt_score2 = im_info['mixup'][2]['gt_score']
  600. gt_score = np.concatenate(
  601. (gt_score1 * factor, gt_score2 * (1. - factor)), axis=0)
  602. if 'gt_poly' in label_info:
  603. gt_poly1 = label_info['gt_poly']
  604. gt_poly2 = im_info['mixup'][2]['gt_poly']
  605. label_info['gt_poly'] = gt_poly1 + gt_poly2
  606. is_crowd1 = label_info['is_crowd']
  607. is_crowd2 = im_info['mixup'][2]['is_crowd']
  608. is_crowd = np.concatenate((is_crowd1, is_crowd2), axis=0)
  609. label_info['gt_bbox'] = gt_bbox
  610. label_info['gt_score'] = gt_score
  611. label_info['gt_class'] = gt_class
  612. label_info['is_crowd'] = is_crowd
  613. im_info['image_shape'] = np.array([im.shape[0],
  614. im.shape[1]]).astype('int32')
  615. im_info.pop('mixup')
  616. if label_info is None:
  617. return (im, im_info)
  618. else:
  619. return (im, im_info, label_info)
  620. class RandomExpand:
  621. """随机扩张图像,模型训练时的数据增强操作。
  622. 1. 随机选取扩张比例(扩张比例大于1时才进行扩张)。
  623. 2. 计算扩张后图像大小。
  624. 3. 初始化像素值为输入填充值的图像,并将原图像随机粘贴于该图像上。
  625. 4. 根据原图像粘贴位置换算出扩张后真实标注框的位置坐标。
  626. 5. 根据原图像粘贴位置换算出扩张后真实分割区域的位置坐标。
  627. Args:
  628. ratio (float): 图像扩张的最大比例。默认为4.0。
  629. prob (float): 随机扩张的概率。默认为0.5。
  630. fill_value (list): 扩张图像的初始填充值(0-255)。默认为[123.675, 116.28, 103.53]。
  631. """
  632. def __init__(self,
  633. ratio=4.,
  634. prob=0.5,
  635. fill_value=[123.675, 116.28, 103.53]):
  636. super(RandomExpand, self).__init__()
  637. assert ratio > 1.01, "expand ratio must be larger than 1.01"
  638. self.ratio = ratio
  639. self.prob = prob
  640. assert isinstance(fill_value, Sequence), \
  641. "fill value must be sequence"
  642. if not isinstance(fill_value, tuple):
  643. fill_value = tuple(fill_value)
  644. self.fill_value = fill_value
  645. def __call__(self, im, im_info=None, label_info=None):
  646. """
  647. Args:
  648. im (np.ndarray): 图像np.ndarray数据。
  649. im_info (dict, 可选): 存储与图像相关的信息。
  650. label_info (dict, 可选): 存储与标注框相关的信息。
  651. Returns:
  652. tuple: 当label_info为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典;
  653. 当label_info不为空时,返回的tuple为(im, im_info, label_info),分别对应图像np.ndarray数据、
  654. 存储与标注框相关信息的字典。
  655. 其中,im_info更新字段为:
  656. - image_shape (np.ndarray): 扩张后的图像高、宽二者组成的np.ndarray,形状为(2,)。
  657. label_info更新字段为:
  658. - gt_bbox (np.ndarray): 随机扩张后真实标注框坐标,形状为(n, 4),
  659. 其中n代表真实标注框的个数。
  660. - gt_class (np.ndarray): 随机扩张后每个真实标注框对应的类别序号,形状为(n, 1),
  661. 其中n代表真实标注框的个数。
  662. Raises:
  663. TypeError: 形参数据类型不满足需求。
  664. """
  665. if im_info is None or label_info is None:
  666. raise TypeError(
  667. 'Cannot do RandomExpand! ' +
  668. 'Becasuse the im_info and label_info can not be None!')
  669. if 'gt_bbox' not in label_info or \
  670. 'gt_class' not in label_info:
  671. raise TypeError('Cannot do RandomExpand! ' + \
  672. 'Becasuse gt_bbox/gt_class is not in label_info!')
  673. if np.random.uniform(0., 1.) < self.prob:
  674. return (im, im_info, label_info)
  675. image_shape = im_info['image_shape']
  676. height = int(image_shape[0])
  677. width = int(image_shape[1])
  678. expand_ratio = np.random.uniform(1., self.ratio)
  679. h = int(height * expand_ratio)
  680. w = int(width * expand_ratio)
  681. if not h > height or not w > width:
  682. return (im, im_info, label_info)
  683. y = np.random.randint(0, h - height)
  684. x = np.random.randint(0, w - width)
  685. canvas = np.ones((h, w, 3), dtype=np.float32)
  686. canvas *= np.array(self.fill_value, dtype=np.float32)
  687. canvas[y:y + height, x:x + width, :] = im
  688. im_info['image_shape'] = np.array([h, w]).astype('int32')
  689. if 'gt_bbox' in label_info and len(label_info['gt_bbox']) > 0:
  690. label_info['gt_bbox'] += np.array([x, y] * 2, dtype=np.float32)
  691. if 'gt_poly' in label_info and len(label_info['gt_poly']) > 0:
  692. label_info['gt_poly'] = expand_segms(label_info['gt_poly'], x, y,
  693. height, width, expand_ratio)
  694. return (canvas, im_info, label_info)
  695. class RandomCrop:
  696. """随机裁剪图像。
  697. 1. 若allow_no_crop为True,则在thresholds加入’no_crop’。
  698. 2. 随机打乱thresholds。
  699. 3. 遍历thresholds中各元素:
  700. (1) 如果当前thresh为’no_crop’,则返回原始图像和标注信息。
  701. (2) 随机取出aspect_ratio和scaling中的值并由此计算出候选裁剪区域的高、宽、起始点。
  702. (3) 计算真实标注框与候选裁剪区域IoU,若全部真实标注框的IoU都小于thresh,则继续第3步。
  703. (4) 如果cover_all_box为True且存在真实标注框的IoU小于thresh,则继续第3步。
  704. (5) 筛选出位于候选裁剪区域内的真实标注框,若有效框的个数为0,则继续第3步,否则进行第4步。
  705. 4. 换算有效真值标注框相对候选裁剪区域的位置坐标。
  706. 5. 换算有效分割区域相对候选裁剪区域的位置坐标。
  707. Args:
  708. aspect_ratio (list): 裁剪后短边缩放比例的取值范围,以[min, max]形式表示。默认值为[.5, 2.]。
  709. thresholds (list): 判断裁剪候选区域是否有效所需的IoU阈值取值列表。默认值为[.0, .1, .3, .5, .7, .9]。
  710. scaling (list): 裁剪面积相对原面积的取值范围,以[min, max]形式表示。默认值为[.3, 1.]。
  711. num_attempts (int): 在放弃寻找有效裁剪区域前尝试的次数。默认值为50。
  712. allow_no_crop (bool): 是否允许未进行裁剪。默认值为True。
  713. cover_all_box (bool): 是否要求所有的真实标注框都必须在裁剪区域内。默认值为False。
  714. """
  715. def __init__(self,
  716. aspect_ratio=[.5, 2.],
  717. thresholds=[.0, .1, .3, .5, .7, .9],
  718. scaling=[.3, 1.],
  719. num_attempts=50,
  720. allow_no_crop=True,
  721. cover_all_box=False):
  722. self.aspect_ratio = aspect_ratio
  723. self.thresholds = thresholds
  724. self.scaling = scaling
  725. self.num_attempts = num_attempts
  726. self.allow_no_crop = allow_no_crop
  727. self.cover_all_box = cover_all_box
  728. def __call__(self, im, im_info=None, label_info=None):
  729. """
  730. Args:
  731. im (np.ndarray): 图像np.ndarray数据。
  732. im_info (dict, 可选): 存储与图像相关的信息。
  733. label_info (dict, 可选): 存储与标注框相关的信息。
  734. Returns:
  735. tuple: 当label_info为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典;
  736. 当label_info不为空时,返回的tuple为(im, im_info, label_info),分别对应图像np.ndarray数据、
  737. 存储与标注框相关信息的字典。
  738. 其中,im_info更新字段为:
  739. - image_shape (np.ndarray): 扩裁剪的图像高、宽二者组成的np.ndarray,形状为(2,)。
  740. label_info更新字段为:
  741. - gt_bbox (np.ndarray): 随机裁剪后真实标注框坐标,形状为(n, 4),
  742. 其中n代表真实标注框的个数。
  743. - gt_class (np.ndarray): 随机裁剪后每个真实标注框对应的类别序号,形状为(n, 1),
  744. 其中n代表真实标注框的个数。
  745. - gt_score (np.ndarray): 随机裁剪后每个真实标注框对应的混合得分,形状为(n, 1),
  746. 其中n代表真实标注框的个数。
  747. Raises:
  748. TypeError: 形参数据类型不满足需求。
  749. """
  750. if im_info is None or label_info is None:
  751. raise TypeError(
  752. 'Cannot do RandomCrop! ' +
  753. 'Becasuse the im_info and label_info can not be None!')
  754. if 'gt_bbox' not in label_info or \
  755. 'gt_class' not in label_info:
  756. raise TypeError('Cannot do RandomCrop! ' + \
  757. 'Becasuse gt_bbox/gt_class is not in label_info!')
  758. if len(label_info['gt_bbox']) == 0:
  759. return (im, im_info, label_info)
  760. image_shape = im_info['image_shape']
  761. w = image_shape[1]
  762. h = image_shape[0]
  763. gt_bbox = label_info['gt_bbox']
  764. thresholds = list(self.thresholds)
  765. if self.allow_no_crop:
  766. thresholds.append('no_crop')
  767. np.random.shuffle(thresholds)
  768. for thresh in thresholds:
  769. if thresh == 'no_crop':
  770. return (im, im_info, label_info)
  771. found = False
  772. for i in range(self.num_attempts):
  773. scale = np.random.uniform(*self.scaling)
  774. min_ar, max_ar = self.aspect_ratio
  775. aspect_ratio = np.random.uniform(
  776. max(min_ar, scale**2), min(max_ar, scale**-2))
  777. crop_h = int(h * scale / np.sqrt(aspect_ratio))
  778. crop_w = int(w * scale * np.sqrt(aspect_ratio))
  779. crop_y = np.random.randint(0, h - crop_h)
  780. crop_x = np.random.randint(0, w - crop_w)
  781. crop_box = [crop_x, crop_y, crop_x + crop_w, crop_y + crop_h]
  782. iou = iou_matrix(gt_bbox, np.array([crop_box],
  783. dtype=np.float32))
  784. if iou.max() < thresh:
  785. continue
  786. if self.cover_all_box and iou.min() < thresh:
  787. continue
  788. cropped_box, valid_ids = crop_box_with_center_constraint(
  789. gt_bbox, np.array(crop_box, dtype=np.float32))
  790. if valid_ids.size > 0:
  791. found = True
  792. break
  793. if found:
  794. if 'gt_poly' in label_info and len(label_info['gt_poly']) > 0:
  795. crop_polys = crop_segms(label_info['gt_poly'], valid_ids,
  796. np.array(crop_box, dtype=np.int64),
  797. h, w)
  798. if [] in crop_polys:
  799. delete_id = list()
  800. valid_polys = list()
  801. for id, crop_poly in enumerate(crop_polys):
  802. if crop_poly == []:
  803. delete_id.append(id)
  804. else:
  805. valid_polys.append(crop_poly)
  806. valid_ids = np.delete(valid_ids, delete_id)
  807. if len(valid_polys) == 0:
  808. return (im, im_info, label_info)
  809. label_info['gt_poly'] = valid_polys
  810. else:
  811. label_info['gt_poly'] = crop_polys
  812. im = crop_image(im, crop_box)
  813. label_info['gt_bbox'] = np.take(cropped_box, valid_ids, axis=0)
  814. label_info['gt_class'] = np.take(
  815. label_info['gt_class'], valid_ids, axis=0)
  816. im_info['image_shape'] = np.array(
  817. [crop_box[3] - crop_box[1],
  818. crop_box[2] - crop_box[0]]).astype('int32')
  819. if 'gt_score' in label_info:
  820. label_info['gt_score'] = np.take(
  821. label_info['gt_score'], valid_ids, axis=0)
  822. if 'is_crowd' in label_info:
  823. label_info['is_crowd'] = np.take(
  824. label_info['is_crowd'], valid_ids, axis=0)
  825. return (im, im_info, label_info)
  826. return (im, im_info, label_info)
  827. class ArrangeFasterRCNN:
  828. """获取FasterRCNN模型训练/验证/预测所需信息。
  829. Args:
  830. mode (str): 指定数据用于何种用途,取值范围为['train', 'eval', 'test', 'quant']。
  831. Raises:
  832. ValueError: mode的取值不在['train', 'eval', 'test', 'quant']之内。
  833. """
  834. def __init__(self, mode=None):
  835. if mode not in ['train', 'eval', 'test', 'quant']:
  836. raise ValueError(
  837. "mode must be in ['train', 'eval', 'test', 'quant']!")
  838. self.mode = mode
  839. def __call__(self, im, im_info=None, label_info=None):
  840. """
  841. Args:
  842. im (np.ndarray): 图像np.ndarray数据。
  843. im_info (dict, 可选): 存储与图像相关的信息。
  844. label_info (dict, 可选): 存储与标注框相关的信息。
  845. Returns:
  846. tuple: 当mode为'train'时,返回(im, im_resize_info, gt_bbox, gt_class, is_crowd),分别对应
  847. 图像np.ndarray数据、图像相当对于原图的resize信息、真实标注框、真实标注框对应的类别、真实标注框内是否是一组对象;
  848. 当mode为'eval'时,返回(im, im_resize_info, im_id, im_shape, gt_bbox, gt_class, is_difficult),
  849. 分别对应图像np.ndarray数据、图像相当对于原图的resize信息、图像id、图像大小信息、真实标注框、真实标注框对应的类别、
  850. 真实标注框是否为难识别对象;当mode为'test'或'quant'时,返回(im, im_resize_info, im_shape),分别对应图像np.ndarray数据、
  851. 图像相当对于原图的resize信息、图像大小信息。
  852. Raises:
  853. TypeError: 形参数据类型不满足需求。
  854. ValueError: 数据长度不匹配。
  855. """
  856. im = permute(im, False)
  857. if self.mode == 'train':
  858. if im_info is None or label_info is None:
  859. raise TypeError(
  860. 'Cannot do ArrangeFasterRCNN! ' +
  861. 'Becasuse the im_info and label_info can not be None!')
  862. if len(label_info['gt_bbox']) != len(label_info['gt_class']):
  863. raise ValueError("gt num mismatch: bbox and class.")
  864. im_resize_info = im_info['im_resize_info']
  865. gt_bbox = label_info['gt_bbox']
  866. gt_class = label_info['gt_class']
  867. is_crowd = label_info['is_crowd']
  868. outputs = (im, im_resize_info, gt_bbox, gt_class, is_crowd)
  869. elif self.mode == 'eval':
  870. if im_info is None or label_info is None:
  871. raise TypeError(
  872. 'Cannot do ArrangeFasterRCNN! ' +
  873. 'Becasuse the im_info and label_info can not be None!')
  874. im_resize_info = im_info['im_resize_info']
  875. im_id = im_info['im_id']
  876. im_shape = np.array(
  877. (im_info['image_shape'][0], im_info['image_shape'][1], 1),
  878. dtype=np.float32)
  879. gt_bbox = label_info['gt_bbox']
  880. gt_class = label_info['gt_class']
  881. is_difficult = label_info['difficult']
  882. outputs = (im, im_resize_info, im_id, im_shape, gt_bbox, gt_class,
  883. is_difficult)
  884. else:
  885. if im_info is None:
  886. raise TypeError('Cannot do ArrangeFasterRCNN! ' +
  887. 'Becasuse the im_info can not be None!')
  888. im_resize_info = im_info['im_resize_info']
  889. im_shape = np.array(
  890. (im_info['image_shape'][0], im_info['image_shape'][1], 1),
  891. dtype=np.float32)
  892. outputs = (im, im_resize_info, im_shape)
  893. return outputs
  894. class ArrangeMaskRCNN:
  895. """获取MaskRCNN模型训练/验证/预测所需信息。
  896. Args:
  897. mode (str): 指定数据用于何种用途,取值范围为['train', 'eval', 'test', 'quant']。
  898. Raises:
  899. ValueError: mode的取值不在['train', 'eval', 'test', 'quant']之内。
  900. """
  901. def __init__(self, mode=None):
  902. if mode not in ['train', 'eval', 'test', 'quant']:
  903. raise ValueError(
  904. "mode must be in ['train', 'eval', 'test', 'quant']!")
  905. self.mode = mode
  906. def __call__(self, im, im_info=None, label_info=None):
  907. """
  908. Args:
  909. im (np.ndarray): 图像np.ndarray数据。
  910. im_info (dict, 可选): 存储与图像相关的信息。
  911. label_info (dict, 可选): 存储与标注框相关的信息。
  912. Returns:
  913. tuple: 当mode为'train'时,返回(im, im_resize_info, gt_bbox, gt_class, is_crowd, gt_masks),分别对应
  914. 图像np.ndarray数据、图像相当对于原图的resize信息、真实标注框、真实标注框对应的类别、真实标注框内是否是一组对象、
  915. 真实分割区域;当mode为'eval'时,返回(im, im_resize_info, im_id, im_shape),分别对应图像np.ndarray数据、
  916. 图像相当对于原图的resize信息、图像id、图像大小信息;当mode为'test'或'quant'时,返回(im, im_resize_info, im_shape),
  917. 分别对应图像np.ndarray数据、图像相当对于原图的resize信息、图像大小信息。
  918. Raises:
  919. TypeError: 形参数据类型不满足需求。
  920. ValueError: 数据长度不匹配。
  921. """
  922. im = permute(im, False)
  923. if self.mode == 'train':
  924. if im_info is None or label_info is None:
  925. raise TypeError(
  926. 'Cannot do ArrangeTrainMaskRCNN! ' +
  927. 'Becasuse the im_info and label_info can not be None!')
  928. if len(label_info['gt_bbox']) != len(label_info['gt_class']):
  929. raise ValueError("gt num mismatch: bbox and class.")
  930. im_resize_info = im_info['im_resize_info']
  931. gt_bbox = label_info['gt_bbox']
  932. gt_class = label_info['gt_class']
  933. is_crowd = label_info['is_crowd']
  934. assert 'gt_poly' in label_info
  935. segms = label_info['gt_poly']
  936. if len(segms) != 0:
  937. assert len(segms) == is_crowd.shape[0]
  938. gt_masks = []
  939. valid = True
  940. for i in range(len(segms)):
  941. segm = segms[i]
  942. gt_segm = []
  943. if is_crowd[i]:
  944. gt_segm.append([[0, 0]])
  945. else:
  946. for poly in segm:
  947. if len(poly) == 0:
  948. valid = False
  949. break
  950. gt_segm.append(np.array(poly).reshape(-1, 2))
  951. if (not valid) or len(gt_segm) == 0:
  952. break
  953. gt_masks.append(gt_segm)
  954. outputs = (im, im_resize_info, gt_bbox, gt_class, is_crowd,
  955. gt_masks)
  956. else:
  957. if im_info is None:
  958. raise TypeError('Cannot do ArrangeMaskRCNN! ' +
  959. 'Becasuse the im_info can not be None!')
  960. im_resize_info = im_info['im_resize_info']
  961. im_shape = np.array(
  962. (im_info['image_shape'][0], im_info['image_shape'][1], 1),
  963. dtype=np.float32)
  964. if self.mode == 'eval':
  965. im_id = im_info['im_id']
  966. outputs = (im, im_resize_info, im_id, im_shape)
  967. else:
  968. outputs = (im, im_resize_info, im_shape)
  969. return outputs
  970. class ArrangeYOLOv3:
  971. """获取YOLOv3模型训练/验证/预测所需信息。
  972. Args:
  973. mode (str): 指定数据用于何种用途,取值范围为['train', 'eval', 'test', 'quant']。
  974. Raises:
  975. ValueError: mode的取值不在['train', 'eval', 'test', 'quant']之内。
  976. """
  977. def __init__(self, mode=None):
  978. if mode not in ['train', 'eval', 'test', 'quant']:
  979. raise ValueError(
  980. "mode must be in ['train', 'eval', 'test', 'quant']!")
  981. self.mode = mode
  982. def __call__(self, im, im_info=None, label_info=None):
  983. """
  984. Args:
  985. im (np.ndarray): 图像np.ndarray数据。
  986. im_info (dict, 可选): 存储与图像相关的信息。
  987. label_info (dict, 可选): 存储与标注框相关的信息。
  988. Returns:
  989. tuple: 当mode为'train'时,返回(im, gt_bbox, gt_class, gt_score, im_shape),分别对应
  990. 图像np.ndarray数据、真实标注框、真实标注框对应的类别、真实标注框混合得分、图像大小信息;
  991. 当mode为'eval'时,返回(im, im_shape, im_id, gt_bbox, gt_class, difficult),
  992. 分别对应图像np.ndarray数据、图像大小信息、图像id、真实标注框、真实标注框对应的类别、
  993. 真实标注框是否为难识别对象;当mode为'test'或'quant'时,返回(im, im_shape),
  994. 分别对应图像np.ndarray数据、图像大小信息。
  995. Raises:
  996. TypeError: 形参数据类型不满足需求。
  997. ValueError: 数据长度不匹配。
  998. """
  999. im = permute(im, False)
  1000. if self.mode == 'train':
  1001. if im_info is None or label_info is None:
  1002. raise TypeError(
  1003. 'Cannot do ArrangeYolov3! ' +
  1004. 'Becasuse the im_info and label_info can not be None!')
  1005. im_shape = im_info['image_shape']
  1006. if len(label_info['gt_bbox']) != len(label_info['gt_class']):
  1007. raise ValueError("gt num mismatch: bbox and class.")
  1008. if len(label_info['gt_bbox']) != len(label_info['gt_score']):
  1009. raise ValueError("gt num mismatch: bbox and score.")
  1010. gt_bbox = np.zeros((50, 4), dtype=im.dtype)
  1011. gt_class = np.zeros((50, ), dtype=np.int32)
  1012. gt_score = np.zeros((50, ), dtype=im.dtype)
  1013. gt_num = min(50, len(label_info['gt_bbox']))
  1014. if gt_num > 0:
  1015. label_info['gt_class'][:gt_num, 0] = label_info[
  1016. 'gt_class'][:gt_num, 0] - 1
  1017. gt_bbox[:gt_num, :] = label_info['gt_bbox'][:gt_num, :]
  1018. gt_class[:gt_num] = label_info['gt_class'][:gt_num, 0]
  1019. gt_score[:gt_num] = label_info['gt_score'][:gt_num, 0]
  1020. # parse [x1, y1, x2, y2] to [x, y, w, h]
  1021. gt_bbox[:, 2:4] = gt_bbox[:, 2:4] - gt_bbox[:, :2]
  1022. gt_bbox[:, :2] = gt_bbox[:, :2] + gt_bbox[:, 2:4] / 2.
  1023. outputs = (im, gt_bbox, gt_class, gt_score, im_shape)
  1024. elif self.mode == 'eval':
  1025. if im_info is None or label_info is None:
  1026. raise TypeError(
  1027. 'Cannot do ArrangeYolov3! ' +
  1028. 'Becasuse the im_info and label_info can not be None!')
  1029. im_shape = im_info['image_shape']
  1030. if len(label_info['gt_bbox']) != len(label_info['gt_class']):
  1031. raise ValueError("gt num mismatch: bbox and class.")
  1032. im_id = im_info['im_id']
  1033. gt_bbox = np.zeros((50, 4), dtype=im.dtype)
  1034. gt_class = np.zeros((50, ), dtype=np.int32)
  1035. difficult = np.zeros((50, ), dtype=np.int32)
  1036. gt_num = min(50, len(label_info['gt_bbox']))
  1037. if gt_num > 0:
  1038. label_info['gt_class'][:gt_num, 0] = label_info[
  1039. 'gt_class'][:gt_num, 0] - 1
  1040. gt_bbox[:gt_num, :] = label_info['gt_bbox'][:gt_num, :]
  1041. gt_class[:gt_num] = label_info['gt_class'][:gt_num, 0]
  1042. difficult[:gt_num] = label_info['difficult'][:gt_num, 0]
  1043. outputs = (im, im_shape, im_id, gt_bbox, gt_class, difficult)
  1044. else:
  1045. if im_info is None:
  1046. raise TypeError('Cannot do ArrangeYolov3! ' +
  1047. 'Becasuse the im_info can not be None!')
  1048. im_shape = im_info['image_shape']
  1049. outputs = (im, im_shape)
  1050. return outputs