det_transforms.py 54 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169
  1. # copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. from .ops import *
  15. from .box_utils import *
  16. import random
  17. import os.path as osp
  18. import numpy as np
  19. from PIL import Image, ImageEnhance
  20. import cv2
  21. class Compose:
  22. """根据数据预处理/增强列表对输入数据进行操作。
  23. 所有操作的输入图像流形状均是[H, W, C],其中H为图像高,W为图像宽,C为图像通道数。
  24. Args:
  25. transforms (list): 数据预处理/增强列表。
  26. Raises:
  27. TypeError: 形参数据类型不满足需求。
  28. ValueError: 数据长度不匹配。
  29. """
  30. def __init__(self, transforms):
  31. if not isinstance(transforms, list):
  32. raise TypeError('The transforms must be a list!')
  33. if len(transforms) < 1:
  34. raise ValueError('The length of transforms ' + \
  35. 'must be equal or larger than 1!')
  36. self.transforms = transforms
  37. self.use_mixup = False
  38. for t in self.transforms:
  39. if t.__class__.__name__ == 'MixupImage':
  40. self.use_mixup = True
  41. def __call__(self, im, im_info=None, label_info=None):
  42. """
  43. Args:
  44. im (str/np.ndarray): 图像路径/图像np.ndarray数据。
  45. im_info (dict): 存储与图像相关的信息,dict中的字段如下:
  46. - im_id (np.ndarray): 图像序列号,形状为(1,)。
  47. - origin_shape (np.ndarray): 图像原始大小,形状为(2,),
  48. origin_shape[0]为高,origin_shape[1]为宽。
  49. - mixup (list): list为[im, im_info, label_info],分别对应
  50. 与当前图像进行mixup的图像np.ndarray数据、图像相关信息、标注框相关信息;
  51. 注意,当前epoch若无需进行mixup,则无该字段。
  52. label_info (dict): 存储与标注框相关的信息,dict中的字段如下:
  53. - gt_bbox (np.ndarray): 真实标注框坐标[x1, y1, x2, y2],形状为(n, 4),
  54. 其中n代表真实标注框的个数。
  55. - gt_class (np.ndarray): 每个真实标注框对应的类别序号,形状为(n, 1),
  56. 其中n代表真实标注框的个数。
  57. - gt_score (np.ndarray): 每个真实标注框对应的混合得分,形状为(n, 1),
  58. 其中n代表真实标注框的个数。
  59. - gt_poly (list): 每个真实标注框内的多边形分割区域,每个分割区域由点的x、y坐标组成,
  60. 长度为n,其中n代表真实标注框的个数。
  61. - is_crowd (np.ndarray): 每个真实标注框中是否是一组对象,形状为(n, 1),
  62. 其中n代表真实标注框的个数。
  63. - difficult (np.ndarray): 每个真实标注框中的对象是否为难识别对象,形状为(n, 1),
  64. 其中n代表真实标注框的个数。
  65. Returns:
  66. tuple: 根据网络所需字段所组成的tuple;
  67. 字段由transforms中的最后一个数据预处理操作决定。
  68. """
  69. def decode_image(im_file, im_info, label_info):
  70. if im_info is None:
  71. im_info = dict()
  72. try:
  73. im = cv2.imread(im_file).astype('float32')
  74. except:
  75. raise TypeError(
  76. 'Can\'t read The image file {}!'.format(im_file))
  77. im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
  78. # make default im_info with [h, w, 1]
  79. im_info['im_resize_info'] = np.array(
  80. [im.shape[0], im.shape[1], 1.], dtype=np.float32)
  81. # copy augment_shape from origin_shape
  82. im_info['augment_shape'] = np.array([im.shape[0],
  83. im.shape[1]]).astype('int32')
  84. if not self.use_mixup:
  85. if 'mixup' in im_info:
  86. del im_info['mixup']
  87. # decode mixup image
  88. if 'mixup' in im_info:
  89. im_info['mixup'] = \
  90. decode_image(im_info['mixup'][0],
  91. im_info['mixup'][1],
  92. im_info['mixup'][2])
  93. if label_info is None:
  94. return (im, im_info)
  95. else:
  96. return (im, im_info, label_info)
  97. outputs = decode_image(im, im_info, label_info)
  98. im = outputs[0]
  99. im_info = outputs[1]
  100. if len(outputs) == 3:
  101. label_info = outputs[2]
  102. for op in self.transforms:
  103. if im is None:
  104. return None
  105. outputs = op(im, im_info, label_info)
  106. im = outputs[0]
  107. return outputs
  108. class ResizeByShort:
  109. """根据图像的短边调整图像大小(resize)。
  110. 1. 获取图像的长边和短边长度。
  111. 2. 根据短边与short_size的比例,计算长边的目标长度,
  112. 此时高、宽的resize比例为short_size/原图短边长度。
  113. 3. 如果max_size>0,调整resize比例:
  114. 如果长边的目标长度>max_size,则高、宽的resize比例为max_size/原图长边长度。
  115. 4. 根据调整大小的比例对图像进行resize。
  116. Args:
  117. target_size (int): 短边目标长度。默认为800。
  118. max_size (int): 长边目标长度的最大限制。默认为1333。
  119. Raises:
  120. TypeError: 形参数据类型不满足需求。
  121. """
  122. def __init__(self, short_size=800, max_size=1333):
  123. self.max_size = int(max_size)
  124. if not isinstance(short_size, int):
  125. raise TypeError(
  126. "Type of short_size is invalid. Must be Integer, now is {}".
  127. format(type(short_size)))
  128. self.short_size = short_size
  129. if not (isinstance(self.max_size, int)):
  130. raise TypeError("max_size: input type is invalid.")
  131. def __call__(self, im, im_info=None, label_info=None):
  132. """
  133. Args:
  134. im (numnp.ndarraypy): 图像np.ndarray数据。
  135. im_info (dict, 可选): 存储与图像相关的信息。
  136. label_info (dict, 可选): 存储与标注框相关的信息。
  137. Returns:
  138. tuple: 当label_info为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典;
  139. 当label_info不为空时,返回的tuple为(im, im_info, label_info),分别对应图像np.ndarray数据、
  140. 存储与标注框相关信息的字典。
  141. 其中,im_info更新字段为:
  142. - im_resize_info (np.ndarray): resize后的图像高、resize后的图像宽、resize后的图像相对原始图的缩放比例
  143. 三者组成的np.ndarray,形状为(3,)。
  144. Raises:
  145. TypeError: 形参数据类型不满足需求。
  146. ValueError: 数据长度不匹配。
  147. """
  148. if im_info is None:
  149. im_info = dict()
  150. if not isinstance(im, np.ndarray):
  151. raise TypeError("ResizeByShort: image type is not numpy.")
  152. if len(im.shape) != 3:
  153. raise ValueError('ResizeByShort: image is not 3-dimensional.')
  154. im_short_size = min(im.shape[0], im.shape[1])
  155. im_long_size = max(im.shape[0], im.shape[1])
  156. scale = float(self.short_size) / im_short_size
  157. if self.max_size > 0 and np.round(
  158. scale * im_long_size) > self.max_size:
  159. scale = float(self.max_size) / float(im_long_size)
  160. resized_width = int(round(im.shape[1] * scale))
  161. resized_height = int(round(im.shape[0] * scale))
  162. im_resize_info = [resized_height, resized_width, scale]
  163. im = cv2.resize(
  164. im, (resized_width, resized_height),
  165. interpolation=cv2.INTER_LINEAR)
  166. im_info['im_resize_info'] = np.array(im_resize_info).astype(np.float32)
  167. if label_info is None:
  168. return (im, im_info)
  169. else:
  170. return (im, im_info, label_info)
  171. class Padding:
  172. """将图像的长和宽padding至coarsest_stride的倍数。如输入图像为[300, 640],
  173. `coarest_stride`为32,则由于300不为32的倍数,因此在图像最右和最下使用0值
  174. 进行padding,最终输出图像为[320, 640]。
  175. 1. 如果coarsest_stride为1则直接返回。
  176. 2. 获取图像的高H、宽W。
  177. 3. 计算填充后图像的高H_new、宽W_new。
  178. 4. 构建大小为(H_new, W_new, 3)像素值为0的np.ndarray,
  179. 并将原图的np.ndarray粘贴于左上角。
  180. Args:
  181. coarsest_stride (int): 填充后的图像长、宽为该参数的倍数,默认为1。
  182. """
  183. def __init__(self, coarsest_stride=1):
  184. self.coarsest_stride = coarsest_stride
  185. def __call__(self, im, im_info=None, label_info=None):
  186. """
  187. Args:
  188. im (numnp.ndarraypy): 图像np.ndarray数据。
  189. im_info (dict, 可选): 存储与图像相关的信息。
  190. label_info (dict, 可选): 存储与标注框相关的信息。
  191. Returns:
  192. tuple: 当label_info为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典;
  193. 当label_info不为空时,返回的tuple为(im, im_info, label_info),分别对应图像np.ndarray数据、
  194. 存储与标注框相关信息的字典。
  195. Raises:
  196. TypeError: 形参数据类型不满足需求。
  197. ValueError: 数据长度不匹配。
  198. """
  199. if self.coarsest_stride == 1:
  200. if label_info is None:
  201. return (im, im_info)
  202. else:
  203. return (im, im_info, label_info)
  204. if im_info is None:
  205. im_info = dict()
  206. if not isinstance(im, np.ndarray):
  207. raise TypeError("Padding: image type is not numpy.")
  208. if len(im.shape) != 3:
  209. raise ValueError('Padding: image is not 3-dimensional.')
  210. im_h, im_w, im_c = im.shape[:]
  211. if self.coarsest_stride > 1:
  212. padding_im_h = int(
  213. np.ceil(im_h / self.coarsest_stride) * self.coarsest_stride)
  214. padding_im_w = int(
  215. np.ceil(im_w / self.coarsest_stride) * self.coarsest_stride)
  216. padding_im = np.zeros((padding_im_h, padding_im_w, im_c),
  217. dtype=np.float32)
  218. padding_im[:im_h, :im_w, :] = im
  219. if label_info is None:
  220. return (padding_im, im_info)
  221. else:
  222. return (padding_im, im_info, label_info)
  223. class Resize:
  224. """调整图像大小(resize)。
  225. - 当目标大小(target_size)类型为int时,根据插值方式,
  226. 将图像resize为[target_size, target_size]。
  227. - 当目标大小(target_size)类型为list或tuple时,根据插值方式,
  228. 将图像resize为target_size。
  229. 注意:当插值方式为“RANDOM”时,则随机选取一种插值方式进行resize。
  230. Args:
  231. target_size (int/list/tuple): 短边目标长度。默认为608。
  232. interp (str): resize的插值方式,与opencv的插值方式对应,取值范围为
  233. ['NEAREST', 'LINEAR', 'CUBIC', 'AREA', 'LANCZOS4', 'RANDOM']。默认为"LINEAR"。
  234. Raises:
  235. TypeError: 形参数据类型不满足需求。
  236. ValueError: 插值方式不在['NEAREST', 'LINEAR', 'CUBIC',
  237. 'AREA', 'LANCZOS4', 'RANDOM']中。
  238. """
  239. # The interpolation mode
  240. interp_dict = {
  241. 'NEAREST': cv2.INTER_NEAREST,
  242. 'LINEAR': cv2.INTER_LINEAR,
  243. 'CUBIC': cv2.INTER_CUBIC,
  244. 'AREA': cv2.INTER_AREA,
  245. 'LANCZOS4': cv2.INTER_LANCZOS4
  246. }
  247. def __init__(self, target_size=608, interp='LINEAR'):
  248. self.interp = interp
  249. if not (interp == "RANDOM" or interp in self.interp_dict):
  250. raise ValueError("interp should be one of {}".format(
  251. self.interp_dict.keys()))
  252. if isinstance(target_size, list) or isinstance(target_size, tuple):
  253. if len(target_size) != 2:
  254. raise TypeError(
  255. 'when target is list or tuple, it should include 2 elements, but it is {}'
  256. .format(target_size))
  257. elif not isinstance(target_size, int):
  258. raise TypeError(
  259. "Type of target_size is invalid. Must be Integer or List or tuple, now is {}"
  260. .format(type(target_size)))
  261. self.target_size = target_size
  262. def __call__(self, im, im_info=None, label_info=None):
  263. """
  264. Args:
  265. im (np.ndarray): 图像np.ndarray数据。
  266. im_info (dict, 可选): 存储与图像相关的信息。
  267. label_info (dict, 可选): 存储与标注框相关的信息。
  268. Returns:
  269. tuple: 当label_info为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典;
  270. 当label_info不为空时,返回的tuple为(im, im_info, label_info),分别对应图像np.ndarray数据、
  271. 存储与标注框相关信息的字典。
  272. Raises:
  273. TypeError: 形参数据类型不满足需求。
  274. ValueError: 数据长度不匹配。
  275. """
  276. if im_info is None:
  277. im_info = dict()
  278. if not isinstance(im, np.ndarray):
  279. raise TypeError("Resize: image type is not numpy.")
  280. if len(im.shape) != 3:
  281. raise ValueError('Resize: image is not 3-dimensional.')
  282. if self.interp == "RANDOM":
  283. interp = random.choice(list(self.interp_dict.keys()))
  284. else:
  285. interp = self.interp
  286. im = resize(im, self.target_size, self.interp_dict[interp])
  287. if label_info is None:
  288. return (im, im_info)
  289. else:
  290. return (im, im_info, label_info)
  291. class RandomHorizontalFlip:
  292. """随机翻转图像、标注框、分割信息,模型训练时的数据增强操作。
  293. 1. 随机采样一个0-1之间的小数,当小数小于水平翻转概率时,
  294. 执行2-4步操作,否则直接返回。
  295. 2. 水平翻转图像。
  296. 3. 计算翻转后的真实标注框的坐标,更新label_info中的gt_bbox信息。
  297. 4. 计算翻转后的真实分割区域的坐标,更新label_info中的gt_poly信息。
  298. Args:
  299. prob (float): 随机水平翻转的概率。默认为0.5。
  300. Raises:
  301. TypeError: 形参数据类型不满足需求。
  302. """
  303. def __init__(self, prob=0.5):
  304. self.prob = prob
  305. if not isinstance(self.prob, float):
  306. raise TypeError("RandomHorizontalFlip: input type is invalid.")
  307. def __call__(self, im, im_info=None, label_info=None):
  308. """
  309. Args:
  310. im (np.ndarray): 图像np.ndarray数据。
  311. im_info (dict, 可选): 存储与图像相关的信息。
  312. label_info (dict, 可选): 存储与标注框相关的信息。
  313. Returns:
  314. tuple: 当label_info为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典;
  315. 当label_info不为空时,返回的tuple为(im, im_info, label_info),分别对应图像np.ndarray数据、
  316. 存储与标注框相关信息的字典。
  317. 其中,im_info更新字段为:
  318. - gt_bbox (np.ndarray): 水平翻转后的标注框坐标[x1, y1, x2, y2],形状为(n, 4),
  319. 其中n代表真实标注框的个数。
  320. - gt_poly (list): 水平翻转后的多边形分割区域的x、y坐标,长度为n,
  321. 其中n代表真实标注框的个数。
  322. Raises:
  323. TypeError: 形参数据类型不满足需求。
  324. ValueError: 数据长度不匹配。
  325. """
  326. if not isinstance(im, np.ndarray):
  327. raise TypeError(
  328. "RandomHorizontalFlip: image is not a numpy array.")
  329. if len(im.shape) != 3:
  330. raise ValueError(
  331. "RandomHorizontalFlip: image is not 3-dimensional.")
  332. if im_info is None or label_info is None:
  333. raise TypeError(
  334. 'Cannot do RandomHorizontalFlip! ' +
  335. 'Becasuse the im_info and label_info can not be None!')
  336. if 'augment_shape' not in im_info:
  337. raise TypeError('Cannot do RandomHorizontalFlip! ' + \
  338. 'Becasuse augment_shape is not in im_info!')
  339. if 'gt_bbox' not in label_info:
  340. raise TypeError('Cannot do RandomHorizontalFlip! ' + \
  341. 'Becasuse gt_bbox is not in label_info!')
  342. augment_shape = im_info['augment_shape']
  343. gt_bbox = label_info['gt_bbox']
  344. height = augment_shape[0]
  345. width = augment_shape[1]
  346. if np.random.uniform(0, 1) < self.prob:
  347. im = horizontal_flip(im)
  348. if gt_bbox.shape[0] == 0:
  349. if label_info is None:
  350. return (im, im_info)
  351. else:
  352. return (im, im_info, label_info)
  353. label_info['gt_bbox'] = box_horizontal_flip(gt_bbox, width)
  354. if 'gt_poly' in label_info and \
  355. len(label_info['gt_poly']) != 0:
  356. label_info['gt_poly'] = segms_horizontal_flip(
  357. label_info['gt_poly'], height, width)
  358. if label_info is None:
  359. return (im, im_info)
  360. else:
  361. return (im, im_info, label_info)
  362. class Normalize:
  363. """对图像进行标准化。
  364. 1. 归一化图像到到区间[0.0, 1.0]。
  365. 2. 对图像进行减均值除以标准差操作。
  366. Args:
  367. mean (list): 图像数据集的均值。默认为[0.485, 0.456, 0.406]。
  368. std (list): 图像数据集的标准差。默认为[0.229, 0.224, 0.225]。
  369. Raises:
  370. TypeError: 形参数据类型不满足需求。
  371. """
  372. def __init__(self, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]):
  373. self.mean = mean
  374. self.std = std
  375. if not (isinstance(self.mean, list) and isinstance(self.std, list)):
  376. raise TypeError("NormalizeImage: input type is invalid.")
  377. from functools import reduce
  378. if reduce(lambda x, y: x * y, self.std) == 0:
  379. raise TypeError('NormalizeImage: std is invalid!')
  380. def __call__(self, im, im_info=None, label_info=None):
  381. """
  382. Args:
  383. im (numnp.ndarraypy): 图像np.ndarray数据。
  384. im_info (dict, 可选): 存储与图像相关的信息。
  385. label_info (dict, 可选): 存储与标注框相关的信息。
  386. Returns:
  387. tuple: 当label_info为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典;
  388. 当label_info不为空时,返回的tuple为(im, im_info, label_info),分别对应图像np.ndarray数据、
  389. 存储与标注框相关信息的字典。
  390. """
  391. mean = np.array(self.mean)[np.newaxis, np.newaxis, :]
  392. std = np.array(self.std)[np.newaxis, np.newaxis, :]
  393. im = normalize(im, mean, std)
  394. if label_info is None:
  395. return (im, im_info)
  396. else:
  397. return (im, im_info, label_info)
  398. class RandomDistort:
  399. """以一定的概率对图像进行随机像素内容变换,模型训练时的数据增强操作
  400. 1. 对变换的操作顺序进行随机化操作。
  401. 2. 按照1中的顺序以一定的概率在范围[-range, range]对图像进行随机像素内容变换。
  402. Args:
  403. brightness_range (float): 明亮度因子的范围。默认为0.5。
  404. brightness_prob (float): 随机调整明亮度的概率。默认为0.5。
  405. contrast_range (float): 对比度因子的范围。默认为0.5。
  406. contrast_prob (float): 随机调整对比度的概率。默认为0.5。
  407. saturation_range (float): 饱和度因子的范围。默认为0.5。
  408. saturation_prob (float): 随机调整饱和度的概率。默认为0.5。
  409. hue_range (int): 色调因子的范围。默认为18。
  410. hue_prob (float): 随机调整色调的概率。默认为0.5。
  411. """
  412. def __init__(self,
  413. brightness_range=0.5,
  414. brightness_prob=0.5,
  415. contrast_range=0.5,
  416. contrast_prob=0.5,
  417. saturation_range=0.5,
  418. saturation_prob=0.5,
  419. hue_range=18,
  420. hue_prob=0.5):
  421. self.brightness_range = brightness_range
  422. self.brightness_prob = brightness_prob
  423. self.contrast_range = contrast_range
  424. self.contrast_prob = contrast_prob
  425. self.saturation_range = saturation_range
  426. self.saturation_prob = saturation_prob
  427. self.hue_range = hue_range
  428. self.hue_prob = hue_prob
  429. def __call__(self, im, im_info=None, label_info=None):
  430. """
  431. Args:
  432. im (np.ndarray): 图像np.ndarray数据。
  433. im_info (dict, 可选): 存储与图像相关的信息。
  434. label_info (dict, 可选): 存储与标注框相关的信息。
  435. Returns:
  436. tuple: 当label_info为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典;
  437. 当label_info不为空时,返回的tuple为(im, im_info, label_info),分别对应图像np.ndarray数据、
  438. 存储与标注框相关信息的字典。
  439. """
  440. brightness_lower = 1 - self.brightness_range
  441. brightness_upper = 1 + self.brightness_range
  442. contrast_lower = 1 - self.contrast_range
  443. contrast_upper = 1 + self.contrast_range
  444. saturation_lower = 1 - self.saturation_range
  445. saturation_upper = 1 + self.saturation_range
  446. hue_lower = -self.hue_range
  447. hue_upper = self.hue_range
  448. ops = [brightness, contrast, saturation, hue]
  449. random.shuffle(ops)
  450. params_dict = {
  451. 'brightness': {
  452. 'brightness_lower': brightness_lower,
  453. 'brightness_upper': brightness_upper
  454. },
  455. 'contrast': {
  456. 'contrast_lower': contrast_lower,
  457. 'contrast_upper': contrast_upper
  458. },
  459. 'saturation': {
  460. 'saturation_lower': saturation_lower,
  461. 'saturation_upper': saturation_upper
  462. },
  463. 'hue': {
  464. 'hue_lower': hue_lower,
  465. 'hue_upper': hue_upper
  466. }
  467. }
  468. prob_dict = {
  469. 'brightness': self.brightness_prob,
  470. 'contrast': self.contrast_prob,
  471. 'saturation': self.saturation_prob,
  472. 'hue': self.hue_prob
  473. }
  474. im = im.astype('uint8')
  475. im = Image.fromarray(im)
  476. for id in range(4):
  477. params = params_dict[ops[id].__name__]
  478. prob = prob_dict[ops[id].__name__]
  479. params['im'] = im
  480. if np.random.uniform(0, 1) < prob:
  481. im = ops[id](**params)
  482. im = np.asarray(im).astype('float32')
  483. if label_info is None:
  484. return (im, im_info)
  485. else:
  486. return (im, im_info, label_info)
  487. class MixupImage:
  488. """对图像进行mixup操作,模型训练时的数据增强操作,目前仅YOLOv3模型支持该transform。
  489. 当label_info中不存在mixup字段时,直接返回,否则进行下述操作:
  490. 1. 从随机beta分布中抽取出随机因子factor。
  491. 2.
  492. - 当factor>=1.0时,去除label_info中的mixup字段,直接返回。
  493. - 当factor<=0.0时,直接返回label_info中的mixup字段,并在label_info中去除该字段。
  494. - 其余情况,执行下述操作:
  495. (1)原图像乘以factor,mixup图像乘以(1-factor),叠加2个结果。
  496. (2)拼接原图像标注框和mixup图像标注框。
  497. (3)拼接原图像标注框类别和mixup图像标注框类别。
  498. (4)原图像标注框混合得分乘以factor,mixup图像标注框混合得分乘以(1-factor),叠加2个结果。
  499. 3. 更新im_info中的augment_shape信息。
  500. Args:
  501. alpha (float): 随机beta分布的下限。默认为1.5。
  502. beta (float): 随机beta分布的上限。默认为1.5。
  503. mixup_epoch (int): 在前mixup_epoch轮使用mixup增强操作;当该参数为-1时,该策略不会生效。
  504. 默认为-1。
  505. Raises:
  506. ValueError: 数据长度不匹配。
  507. """
  508. def __init__(self, alpha=1.5, beta=1.5, mixup_epoch=-1):
  509. self.alpha = alpha
  510. self.beta = beta
  511. if self.alpha <= 0.0:
  512. raise ValueError("alpha shold be positive in MixupImage")
  513. if self.beta <= 0.0:
  514. raise ValueError("beta shold be positive in MixupImage")
  515. self.mixup_epoch = mixup_epoch
  516. def _mixup_img(self, img1, img2, factor):
  517. h = max(img1.shape[0], img2.shape[0])
  518. w = max(img1.shape[1], img2.shape[1])
  519. img = np.zeros((h, w, img1.shape[2]), 'float32')
  520. img[:img1.shape[0], :img1.shape[1], :] = \
  521. img1.astype('float32') * factor
  522. img[:img2.shape[0], :img2.shape[1], :] += \
  523. img2.astype('float32') * (1.0 - factor)
  524. return img.astype('uint8')
  525. def __call__(self, im, im_info=None, label_info=None):
  526. """
  527. Args:
  528. im (np.ndarray): 图像np.ndarray数据。
  529. im_info (dict, 可选): 存储与图像相关的信息。
  530. label_info (dict, 可选): 存储与标注框相关的信息。
  531. Returns:
  532. tuple: 当label_info为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典;
  533. 当label_info不为空时,返回的tuple为(im, im_info, label_info),分别对应图像np.ndarray数据、
  534. 存储与标注框相关信息的字典。
  535. 其中,im_info更新字段为:
  536. - augment_shape (np.ndarray): mixup后的图像高、宽二者组成的np.ndarray,形状为(2,)。
  537. im_info删除的字段:
  538. - mixup (list): 与当前字段进行mixup的图像相关信息。
  539. label_info更新字段为:
  540. - gt_bbox (np.ndarray): mixup后真实标注框坐标,形状为(n, 4),
  541. 其中n代表真实标注框的个数。
  542. - gt_class (np.ndarray): mixup后每个真实标注框对应的类别序号,形状为(n, 1),
  543. 其中n代表真实标注框的个数。
  544. - gt_score (np.ndarray): mixup后每个真实标注框对应的混合得分,形状为(n, 1),
  545. 其中n代表真实标注框的个数。
  546. Raises:
  547. TypeError: 形参数据类型不满足需求。
  548. """
  549. if im_info is None:
  550. raise TypeError('Cannot do MixupImage! ' +
  551. 'Becasuse the im_info can not be None!')
  552. if 'mixup' not in im_info:
  553. if label_info is None:
  554. return (im, im_info)
  555. else:
  556. return (im, im_info, label_info)
  557. factor = np.random.beta(self.alpha, self.beta)
  558. factor = max(0.0, min(1.0, factor))
  559. if im_info['epoch'] > self.mixup_epoch \
  560. or factor >= 1.0:
  561. im_info.pop('mixup')
  562. if label_info is None:
  563. return (im, im_info)
  564. else:
  565. return (im, im_info, label_info)
  566. if factor <= 0.0:
  567. return im_info.pop('mixup')
  568. im = self._mixup_img(im, im_info['mixup'][0], factor)
  569. if label_info is None:
  570. raise TypeError('Cannot do MixupImage! ' +
  571. 'Becasuse the label_info can not be None!')
  572. if 'gt_bbox' not in label_info or \
  573. 'gt_class' not in label_info or \
  574. 'gt_score' not in label_info:
  575. raise TypeError('Cannot do MixupImage! ' + \
  576. 'Becasuse gt_bbox/gt_class/gt_score is not in label_info!')
  577. gt_bbox1 = label_info['gt_bbox']
  578. gt_bbox2 = im_info['mixup'][2]['gt_bbox']
  579. gt_bbox = np.concatenate((gt_bbox1, gt_bbox2), axis=0)
  580. gt_class1 = label_info['gt_class']
  581. gt_class2 = im_info['mixup'][2]['gt_class']
  582. gt_class = np.concatenate((gt_class1, gt_class2), axis=0)
  583. gt_score1 = label_info['gt_score']
  584. gt_score2 = im_info['mixup'][2]['gt_score']
  585. gt_score = np.concatenate(
  586. (gt_score1 * factor, gt_score2 * (1. - factor)), axis=0)
  587. label_info['gt_bbox'] = gt_bbox
  588. label_info['gt_score'] = gt_score
  589. label_info['gt_class'] = gt_class
  590. im_info['augment_shape'] = np.array([im.shape[0],
  591. im.shape[1]]).astype('int32')
  592. im_info.pop('mixup')
  593. if label_info is None:
  594. return (im, im_info)
  595. else:
  596. return (im, im_info, label_info)
  597. class RandomExpand:
  598. """随机扩张图像,模型训练时的数据增强操作。
  599. 1. 随机选取扩张比例(扩张比例大于1时才进行扩张)。
  600. 2. 计算扩张后图像大小。
  601. 3. 初始化像素值为数据集均值的图像,并将原图像随机粘贴于该图像上。
  602. 4. 根据原图像粘贴位置换算出扩张后真实标注框的位置坐标。
  603. Args:
  604. max_ratio (float): 图像扩张的最大比例。默认为4.0。
  605. prob (float): 随机扩张的概率。默认为0.5。
  606. mean (list): 图像数据集的均值(0-255)。默认为[127.5, 127.5, 127.5]。
  607. """
  608. def __init__(self, max_ratio=4., prob=0.5, mean=[127.5, 127.5, 127.5]):
  609. self.max_ratio = max_ratio
  610. self.mean = mean
  611. self.prob = prob
  612. def __call__(self, im, im_info=None, label_info=None):
  613. """
  614. Args:
  615. im (np.ndarray): 图像np.ndarray数据。
  616. im_info (dict, 可选): 存储与图像相关的信息。
  617. label_info (dict, 可选): 存储与标注框相关的信息。
  618. Returns:
  619. tuple: 当label_info为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典;
  620. 当label_info不为空时,返回的tuple为(im, im_info, label_info),分别对应图像np.ndarray数据、
  621. 存储与标注框相关信息的字典。
  622. 其中,im_info更新字段为:
  623. - augment_shape (np.ndarray): 扩张后的图像高、宽二者组成的np.ndarray,形状为(2,)。
  624. label_info更新字段为:
  625. - gt_bbox (np.ndarray): 随机扩张后真实标注框坐标,形状为(n, 4),
  626. 其中n代表真实标注框的个数。
  627. - gt_class (np.ndarray): 随机扩张后每个真实标注框对应的类别序号,形状为(n, 1),
  628. 其中n代表真实标注框的个数。
  629. Raises:
  630. TypeError: 形参数据类型不满足需求。
  631. """
  632. if im_info is None or label_info is None:
  633. raise TypeError(
  634. 'Cannot do RandomExpand! ' +
  635. 'Becasuse the im_info and label_info can not be None!')
  636. if 'augment_shape' not in im_info:
  637. raise TypeError('Cannot do RandomExpand! ' + \
  638. 'Becasuse augment_shape is not in im_info!')
  639. if 'gt_bbox' not in label_info or \
  640. 'gt_class' not in label_info:
  641. raise TypeError('Cannot do RandomExpand! ' + \
  642. 'Becasuse gt_bbox/gt_class is not in label_info!')
  643. prob = np.random.uniform(0, 1)
  644. augment_shape = im_info['augment_shape']
  645. im_width = augment_shape[1]
  646. im_height = augment_shape[0]
  647. gt_bbox = label_info['gt_bbox']
  648. gt_class = label_info['gt_class']
  649. if prob < self.prob:
  650. if self.max_ratio - 1 >= 0.01:
  651. expand_ratio = np.random.uniform(1, self.max_ratio)
  652. height = int(im_height * expand_ratio)
  653. width = int(im_width * expand_ratio)
  654. h_off = math.floor(np.random.uniform(0, height - im_height))
  655. w_off = math.floor(np.random.uniform(0, width - im_width))
  656. expand_bbox = [
  657. -w_off / im_width, -h_off / im_height,
  658. (width - w_off) / im_width, (height - h_off) / im_height
  659. ]
  660. expand_im = np.ones((height, width, 3))
  661. expand_im = np.uint8(expand_im * np.squeeze(self.mean))
  662. expand_im = Image.fromarray(expand_im)
  663. im = im.astype('uint8')
  664. im = Image.fromarray(im)
  665. expand_im.paste(im, (int(w_off), int(h_off)))
  666. expand_im = np.asarray(expand_im)
  667. for i in range(gt_bbox.shape[0]):
  668. gt_bbox[i][0] = gt_bbox[i][0] / im_width
  669. gt_bbox[i][1] = gt_bbox[i][1] / im_height
  670. gt_bbox[i][2] = gt_bbox[i][2] / im_width
  671. gt_bbox[i][3] = gt_bbox[i][3] / im_height
  672. gt_bbox, gt_class, _ = filter_and_process(
  673. expand_bbox, gt_bbox, gt_class)
  674. for i in range(gt_bbox.shape[0]):
  675. gt_bbox[i][0] = gt_bbox[i][0] * width
  676. gt_bbox[i][1] = gt_bbox[i][1] * height
  677. gt_bbox[i][2] = gt_bbox[i][2] * width
  678. gt_bbox[i][3] = gt_bbox[i][3] * height
  679. im = expand_im.astype('float32')
  680. label_info['gt_bbox'] = gt_bbox
  681. label_info['gt_class'] = gt_class
  682. im_info['augment_shape'] = np.array([height,
  683. width]).astype('int32')
  684. if label_info is None:
  685. return (im, im_info)
  686. else:
  687. return (im, im_info, label_info)
  688. class RandomCrop:
  689. """随机裁剪图像。
  690. 1. 根据batch_sampler计算获取裁剪候选区域的位置。
  691. (1) 根据min scale、max scale、min aspect ratio、max aspect ratio计算随机剪裁的高、宽。
  692. (2) 根据随机剪裁的高、宽随机选取剪裁的起始点。
  693. (3) 筛选出裁剪候选区域:
  694. - 当satisfy_all为True时,需所有真实标注框与裁剪候选区域的重叠度满足需求时,该裁剪候选区域才可保留。
  695. - 当satisfy_all为False时,当有一个真实标注框与裁剪候选区域的重叠度满足需求时,该裁剪候选区域就可保留。
  696. 2. 遍历所有裁剪候选区域:
  697. (1) 若真实标注框与候选裁剪区域不重叠,或其中心点不在候选裁剪区域,
  698. 则将该真实标注框去除。
  699. (2) 计算相对于该候选裁剪区域,真实标注框的位置,并筛选出对应的类别、混合得分。
  700. (3) 若avoid_no_bbox为False,返回当前裁剪后的信息即可;
  701. 反之,要找到一个裁剪区域中真实标注框个数不为0的区域,才返回裁剪后的信息。
  702. Args:
  703. batch_sampler (list): 随机裁剪参数的多种组合,每种组合包含8个值,如下:
  704. - max sample (int):满足当前组合的裁剪区域的个数上限。
  705. - max trial (int): 查找满足当前组合的次数。
  706. - min scale (float): 裁剪面积相对原面积,每条边缩短比例的最小限制。
  707. - max scale (float): 裁剪面积相对原面积,每条边缩短比例的最大限制。
  708. - min aspect ratio (float): 裁剪后短边缩放比例的最小限制。
  709. - max aspect ratio (float): 裁剪后短边缩放比例的最大限制。
  710. - min overlap (float): 真实标注框与裁剪图像重叠面积的最小限制。
  711. - max overlap (float): 真实标注框与裁剪图像重叠面积的最大限制。
  712. 默认值为None,当为None时采用如下设置:
  713. [[1, 1, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0],
  714. [1, 50, 0.3, 1.0, 0.5, 2.0, 0.1, 1.0],
  715. [1, 50, 0.3, 1.0, 0.5, 2.0, 0.3, 1.0],
  716. [1, 50, 0.3, 1.0, 0.5, 2.0, 0.5, 1.0],
  717. [1, 50, 0.3, 1.0, 0.5, 2.0, 0.7, 1.0],
  718. [1, 50, 0.3, 1.0, 0.5, 2.0, 0.9, 1.0],
  719. [1, 50, 0.3, 1.0, 0.5, 2.0, 0.0, 1.0]]
  720. satisfy_all (bool): 是否需要所有标注框满足条件,裁剪候选区域才保留。默认为False。
  721. avoid_no_bbox (bool): 是否对裁剪图像不存在标注框的图像进行保留。默认为True。
  722. """
  723. def __init__(self,
  724. batch_sampler=None,
  725. satisfy_all=False,
  726. avoid_no_bbox=True):
  727. if batch_sampler is None:
  728. batch_sampler = [[1, 1, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0],
  729. [1, 50, 0.3, 1.0, 0.5, 2.0, 0.1, 1.0],
  730. [1, 50, 0.3, 1.0, 0.5, 2.0, 0.3, 1.0],
  731. [1, 50, 0.3, 1.0, 0.5, 2.0, 0.5, 1.0],
  732. [1, 50, 0.3, 1.0, 0.5, 2.0, 0.7, 1.0],
  733. [1, 50, 0.3, 1.0, 0.5, 2.0, 0.9, 1.0],
  734. [1, 50, 0.3, 1.0, 0.5, 2.0, 0.0, 1.0]]
  735. self.batch_sampler = batch_sampler
  736. self.satisfy_all = satisfy_all
  737. self.avoid_no_bbox = avoid_no_bbox
  738. def __call__(self, im, im_info=None, label_info=None):
  739. """
  740. Args:
  741. im (np.ndarray): 图像np.ndarray数据。
  742. im_info (dict, 可选): 存储与图像相关的信息。
  743. label_info (dict, 可选): 存储与标注框相关的信息。
  744. Returns:
  745. tuple: 当label_info为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典;
  746. 当label_info不为空时,返回的tuple为(im, im_info, label_info),分别对应图像np.ndarray数据、
  747. 存储与标注框相关信息的字典。
  748. 其中,label_info更新字段为:
  749. - gt_bbox (np.ndarray): 随机裁剪后真实标注框坐标,形状为(n, 4),
  750. 其中n代表真实标注框的个数。
  751. - gt_class (np.ndarray): 随机裁剪后每个真实标注框对应的类别序号,形状为(n, 1),
  752. 其中n代表真实标注框的个数。
  753. - gt_score (np.ndarray): 随机裁剪后每个真实标注框对应的混合得分,形状为(n, 1),
  754. 其中n代表真实标注框的个数。
  755. Raises:
  756. TypeError: 形参数据类型不满足需求。
  757. """
  758. if im_info is None or label_info is None:
  759. raise TypeError(
  760. 'Cannot do RandomCrop! ' +
  761. 'Becasuse the im_info and label_info can not be None!')
  762. if 'augment_shape' not in im_info:
  763. raise TypeError('Cannot do RandomCrop! ' + \
  764. 'Becasuse augment_shape is not in im_info!')
  765. if 'gt_bbox' not in label_info or \
  766. 'gt_class' not in label_info:
  767. raise TypeError('Cannot do RandomCrop! ' + \
  768. 'Becasuse gt_bbox/gt_class is not in label_info!')
  769. augment_shape = im_info['augment_shape']
  770. im_width = augment_shape[1]
  771. im_height = augment_shape[0]
  772. gt_bbox = label_info['gt_bbox']
  773. gt_bbox_tmp = gt_bbox.copy()
  774. for i in range(gt_bbox_tmp.shape[0]):
  775. gt_bbox_tmp[i][0] = gt_bbox[i][0] / im_width
  776. gt_bbox_tmp[i][1] = gt_bbox[i][1] / im_height
  777. gt_bbox_tmp[i][2] = gt_bbox[i][2] / im_width
  778. gt_bbox_tmp[i][3] = gt_bbox[i][3] / im_height
  779. gt_class = label_info['gt_class']
  780. gt_score = None
  781. if 'gt_score' in label_info:
  782. gt_score = label_info['gt_score']
  783. sampled_bbox = []
  784. gt_bbox_tmp = gt_bbox_tmp.tolist()
  785. for sampler in self.batch_sampler:
  786. found = 0
  787. for i in range(sampler[1]):
  788. if found >= sampler[0]:
  789. break
  790. sample_bbox = generate_sample_bbox(sampler)
  791. if satisfy_sample_constraint(sampler, sample_bbox, gt_bbox_tmp,
  792. self.satisfy_all):
  793. sampled_bbox.append(sample_bbox)
  794. found = found + 1
  795. im = np.array(im)
  796. while sampled_bbox:
  797. idx = int(np.random.uniform(0, len(sampled_bbox)))
  798. sample_bbox = sampled_bbox.pop(idx)
  799. sample_bbox = clip_bbox(sample_bbox)
  800. crop_bbox, crop_class, crop_score = \
  801. filter_and_process(sample_bbox, gt_bbox_tmp, gt_class, gt_score)
  802. if self.avoid_no_bbox:
  803. if len(crop_bbox) < 1:
  804. continue
  805. xmin = int(sample_bbox[0] * im_width)
  806. xmax = int(sample_bbox[2] * im_width)
  807. ymin = int(sample_bbox[1] * im_height)
  808. ymax = int(sample_bbox[3] * im_height)
  809. im = im[ymin:ymax, xmin:xmax]
  810. for i in range(crop_bbox.shape[0]):
  811. crop_bbox[i][0] = crop_bbox[i][0] * (xmax - xmin)
  812. crop_bbox[i][1] = crop_bbox[i][1] * (ymax - ymin)
  813. crop_bbox[i][2] = crop_bbox[i][2] * (xmax - xmin)
  814. crop_bbox[i][3] = crop_bbox[i][3] * (ymax - ymin)
  815. label_info['gt_bbox'] = crop_bbox
  816. label_info['gt_class'] = crop_class
  817. label_info['gt_score'] = crop_score
  818. im_info['augment_shape'] = np.array([ymax - ymin,
  819. xmax - xmin]).astype('int32')
  820. if label_info is None:
  821. return (im, im_info)
  822. else:
  823. return (im, im_info, label_info)
  824. if label_info is None:
  825. return (im, im_info)
  826. else:
  827. return (im, im_info, label_info)
  828. class ArrangeFasterRCNN:
  829. """获取FasterRCNN模型训练/验证/预测所需信息。
  830. Args:
  831. mode (str): 指定数据用于何种用途,取值范围为['train', 'eval', 'test', 'quant']。
  832. Raises:
  833. ValueError: mode的取值不在['train', 'eval', 'test', 'quant']之内。
  834. """
  835. def __init__(self, mode=None):
  836. if mode not in ['train', 'eval', 'test', 'quant']:
  837. raise ValueError(
  838. "mode must be in ['train', 'eval', 'test', 'quant']!")
  839. self.mode = mode
  840. def __call__(self, im, im_info=None, label_info=None):
  841. """
  842. Args:
  843. im (np.ndarray): 图像np.ndarray数据。
  844. im_info (dict, 可选): 存储与图像相关的信息。
  845. label_info (dict, 可选): 存储与标注框相关的信息。
  846. Returns:
  847. tuple: 当mode为'train'时,返回(im, im_resize_info, gt_bbox, gt_class, is_crowd),分别对应
  848. 图像np.ndarray数据、图像相当对于原图的resize信息、真实标注框、真实标注框对应的类别、真实标注框内是否是一组对象;
  849. 当mode为'eval'时,返回(im, im_resize_info, im_id, im_shape, gt_bbox, gt_class, is_difficult),
  850. 分别对应图像np.ndarray数据、图像相当对于原图的resize信息、图像id、图像大小信息、真实标注框、真实标注框对应的类别、
  851. 真实标注框是否为难识别对象;当mode为'test'或'quant'时,返回(im, im_resize_info, im_shape),分别对应图像np.ndarray数据、
  852. 图像相当对于原图的resize信息、图像大小信息。
  853. Raises:
  854. TypeError: 形参数据类型不满足需求。
  855. ValueError: 数据长度不匹配。
  856. """
  857. im = permute(im, False)
  858. if self.mode == 'train':
  859. if im_info is None or label_info is None:
  860. raise TypeError(
  861. 'Cannot do ArrangeFasterRCNN! ' +
  862. 'Becasuse the im_info and label_info can not be None!')
  863. if len(label_info['gt_bbox']) != len(label_info['gt_class']):
  864. raise ValueError("gt num mismatch: bbox and class.")
  865. im_resize_info = im_info['im_resize_info']
  866. gt_bbox = label_info['gt_bbox']
  867. gt_class = label_info['gt_class']
  868. is_crowd = label_info['is_crowd']
  869. outputs = (im, im_resize_info, gt_bbox, gt_class, is_crowd)
  870. elif self.mode == 'eval':
  871. if im_info is None or label_info is None:
  872. raise TypeError(
  873. 'Cannot do ArrangeFasterRCNN! ' +
  874. 'Becasuse the im_info and label_info can not be None!')
  875. im_resize_info = im_info['im_resize_info']
  876. im_id = im_info['im_id']
  877. im_shape = np.array(
  878. (im_info['augment_shape'][0], im_info['augment_shape'][1], 1),
  879. dtype=np.float32)
  880. gt_bbox = label_info['gt_bbox']
  881. gt_class = label_info['gt_class']
  882. is_difficult = label_info['difficult']
  883. outputs = (im, im_resize_info, im_id, im_shape, gt_bbox, gt_class,
  884. is_difficult)
  885. else:
  886. if im_info is None:
  887. raise TypeError('Cannot do ArrangeFasterRCNN! ' +
  888. 'Becasuse the im_info can not be None!')
  889. im_resize_info = im_info['im_resize_info']
  890. im_shape = np.array(
  891. (im_info['augment_shape'][0], im_info['augment_shape'][1], 1),
  892. dtype=np.float32)
  893. outputs = (im, im_resize_info, im_shape)
  894. return outputs
  895. class ArrangeMaskRCNN:
  896. """获取MaskRCNN模型训练/验证/预测所需信息。
  897. Args:
  898. mode (str): 指定数据用于何种用途,取值范围为['train', 'eval', 'test', 'quant']。
  899. Raises:
  900. ValueError: mode的取值不在['train', 'eval', 'test', 'quant']之内。
  901. """
  902. def __init__(self, mode=None):
  903. if mode not in ['train', 'eval', 'test', 'quant']:
  904. raise ValueError(
  905. "mode must be in ['train', 'eval', 'test', 'quant']!")
  906. self.mode = mode
  907. def __call__(self, im, im_info=None, label_info=None):
  908. """
  909. Args:
  910. im (np.ndarray): 图像np.ndarray数据。
  911. im_info (dict, 可选): 存储与图像相关的信息。
  912. label_info (dict, 可选): 存储与标注框相关的信息。
  913. Returns:
  914. tuple: 当mode为'train'时,返回(im, im_resize_info, gt_bbox, gt_class, is_crowd, gt_masks),分别对应
  915. 图像np.ndarray数据、图像相当对于原图的resize信息、真实标注框、真实标注框对应的类别、真实标注框内是否是一组对象、
  916. 真实分割区域;当mode为'eval'时,返回(im, im_resize_info, im_id, im_shape),分别对应图像np.ndarray数据、
  917. 图像相当对于原图的resize信息、图像id、图像大小信息;当mode为'test'或'quant'时,返回(im, im_resize_info, im_shape),
  918. 分别对应图像np.ndarray数据、图像相当对于原图的resize信息、图像大小信息。
  919. Raises:
  920. TypeError: 形参数据类型不满足需求。
  921. ValueError: 数据长度不匹配。
  922. """
  923. im = permute(im, False)
  924. if self.mode == 'train':
  925. if im_info is None or label_info is None:
  926. raise TypeError(
  927. 'Cannot do ArrangeTrainMaskRCNN! ' +
  928. 'Becasuse the im_info and label_info can not be None!')
  929. if len(label_info['gt_bbox']) != len(label_info['gt_class']):
  930. raise ValueError("gt num mismatch: bbox and class.")
  931. im_resize_info = im_info['im_resize_info']
  932. gt_bbox = label_info['gt_bbox']
  933. gt_class = label_info['gt_class']
  934. is_crowd = label_info['is_crowd']
  935. assert 'gt_poly' in label_info
  936. segms = label_info['gt_poly']
  937. if len(segms) != 0:
  938. assert len(segms) == is_crowd.shape[0]
  939. gt_masks = []
  940. valid = True
  941. for i in range(len(segms)):
  942. segm = segms[i]
  943. gt_segm = []
  944. if is_crowd[i]:
  945. gt_segm.append([[0, 0]])
  946. else:
  947. for poly in segm:
  948. if len(poly) == 0:
  949. valid = False
  950. break
  951. gt_segm.append(np.array(poly).reshape(-1, 2))
  952. if (not valid) or len(gt_segm) == 0:
  953. break
  954. gt_masks.append(gt_segm)
  955. outputs = (im, im_resize_info, gt_bbox, gt_class, is_crowd,
  956. gt_masks)
  957. else:
  958. if im_info is None:
  959. raise TypeError('Cannot do ArrangeMaskRCNN! ' +
  960. 'Becasuse the im_info can not be None!')
  961. im_resize_info = im_info['im_resize_info']
  962. im_shape = np.array(
  963. (im_info['augment_shape'][0], im_info['augment_shape'][1], 1),
  964. dtype=np.float32)
  965. if self.mode == 'eval':
  966. im_id = im_info['im_id']
  967. outputs = (im, im_resize_info, im_id, im_shape)
  968. else:
  969. outputs = (im, im_resize_info, im_shape)
  970. return outputs
  971. class ArrangeYOLOv3:
  972. """获取YOLOv3模型训练/验证/预测所需信息。
  973. Args:
  974. mode (str): 指定数据用于何种用途,取值范围为['train', 'eval', 'test', 'quant']。
  975. Raises:
  976. ValueError: mode的取值不在['train', 'eval', 'test', 'quant']之内。
  977. """
  978. def __init__(self, mode=None):
  979. if mode not in ['train', 'eval', 'test', 'quant']:
  980. raise ValueError(
  981. "mode must be in ['train', 'eval', 'test', 'quant']!")
  982. self.mode = mode
  983. def __call__(self, im, im_info=None, label_info=None):
  984. """
  985. Args:
  986. im (np.ndarray): 图像np.ndarray数据。
  987. im_info (dict, 可选): 存储与图像相关的信息。
  988. label_info (dict, 可选): 存储与标注框相关的信息。
  989. Returns:
  990. tuple: 当mode为'train'时,返回(im, gt_bbox, gt_class, gt_score, im_shape),分别对应
  991. 图像np.ndarray数据、真实标注框、真实标注框对应的类别、真实标注框混合得分、图像大小信息;
  992. 当mode为'eval'时,返回(im, im_shape, im_id, gt_bbox, gt_class, difficult),
  993. 分别对应图像np.ndarray数据、图像大小信息、图像id、真实标注框、真实标注框对应的类别、
  994. 真实标注框是否为难识别对象;当mode为'test'或'quant'时,返回(im, im_shape),
  995. 分别对应图像np.ndarray数据、图像大小信息。
  996. Raises:
  997. TypeError: 形参数据类型不满足需求。
  998. ValueError: 数据长度不匹配。
  999. """
  1000. im = permute(im, False)
  1001. if self.mode == 'train':
  1002. if im_info is None or label_info is None:
  1003. raise TypeError(
  1004. 'Cannot do ArrangeYolov3! ' +
  1005. 'Becasuse the im_info and label_info can not be None!')
  1006. im_shape = im_info['augment_shape']
  1007. if len(label_info['gt_bbox']) != len(label_info['gt_class']):
  1008. raise ValueError("gt num mismatch: bbox and class.")
  1009. if len(label_info['gt_bbox']) != len(label_info['gt_score']):
  1010. raise ValueError("gt num mismatch: bbox and score.")
  1011. gt_bbox = np.zeros((50, 4), dtype=im.dtype)
  1012. gt_class = np.zeros((50, ), dtype=np.int32)
  1013. gt_score = np.zeros((50, ), dtype=im.dtype)
  1014. gt_num = min(50, len(label_info['gt_bbox']))
  1015. if gt_num > 0:
  1016. label_info['gt_class'][:gt_num, 0] = label_info[
  1017. 'gt_class'][:gt_num, 0] - 1
  1018. gt_bbox[:gt_num, :] = label_info['gt_bbox'][:gt_num, :]
  1019. gt_class[:gt_num] = label_info['gt_class'][:gt_num, 0]
  1020. gt_score[:gt_num] = label_info['gt_score'][:gt_num, 0]
  1021. # parse [x1, y1, x2, y2] to [x, y, w, h]
  1022. gt_bbox[:, 2:4] = gt_bbox[:, 2:4] - gt_bbox[:, :2]
  1023. gt_bbox[:, :2] = gt_bbox[:, :2] + gt_bbox[:, 2:4] / 2.
  1024. outputs = (im, gt_bbox, gt_class, gt_score, im_shape)
  1025. elif self.mode == 'eval':
  1026. if im_info is None or label_info is None:
  1027. raise TypeError(
  1028. 'Cannot do ArrangeYolov3! ' +
  1029. 'Becasuse the im_info and label_info can not be None!')
  1030. im_shape = im_info['augment_shape']
  1031. if len(label_info['gt_bbox']) != len(label_info['gt_class']):
  1032. raise ValueError("gt num mismatch: bbox and class.")
  1033. im_id = im_info['im_id']
  1034. gt_bbox = np.zeros((50, 4), dtype=im.dtype)
  1035. gt_class = np.zeros((50, ), dtype=np.int32)
  1036. difficult = np.zeros((50, ), dtype=np.int32)
  1037. gt_num = min(50, len(label_info['gt_bbox']))
  1038. if gt_num > 0:
  1039. label_info['gt_class'][:gt_num, 0] = label_info[
  1040. 'gt_class'][:gt_num, 0] - 1
  1041. gt_bbox[:gt_num, :] = label_info['gt_bbox'][:gt_num, :]
  1042. gt_class[:gt_num] = label_info['gt_class'][:gt_num, 0]
  1043. difficult[:gt_num] = label_info['difficult'][:gt_num, 0]
  1044. outputs = (im, im_shape, im_id, gt_bbox, gt_class, difficult)
  1045. else:
  1046. if im_info is None:
  1047. raise TypeError('Cannot do ArrangeYolov3! ' +
  1048. 'Becasuse the im_info can not be None!')
  1049. im_shape = im_info['augment_shape']
  1050. outputs = (im, im_shape)
  1051. return outputs