crop_image_regions.py 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551
  1. # Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import copy
  15. from typing import List, Tuple
  16. import cv2
  17. import numpy as np
  18. from numpy.linalg import norm
  19. from shapely.geometry import Polygon
  20. from .base_operator import BaseOperator
  21. from .seal_det_warp import AutoRectifier
  22. class CropByBoxes(BaseOperator):
  23. """Crop Image by Boxes"""
  24. entities = "CropByBoxes"
  25. def __init__(self) -> None:
  26. """Initializes the class."""
  27. super().__init__()
  28. def __call__(self, img: np.ndarray, boxes: List[dict]) -> List[dict]:
  29. """
  30. Process the input image and bounding boxes to produce a list of cropped images
  31. with their corresponding bounding box coordinates and labels.
  32. Args:
  33. img (np.ndarray): The input image as a NumPy array.
  34. boxes (list[dict]): A list of dictionaries, each containing bounding box
  35. information including 'cls_id' (class ID), 'coordinate' (bounding box
  36. coordinates as a list or tuple, left, top, right, bottom),
  37. and optionally 'label' (label text).
  38. Returns:
  39. list[dict]: A list of dictionaries, each containing a cropped image ('img'),
  40. the original bounding box coordinates ('box'), and the label ('label').
  41. """
  42. output_list = []
  43. for bbox_info in boxes:
  44. label_id = bbox_info["cls_id"]
  45. box = bbox_info["coordinate"]
  46. label = bbox_info.get("label", label_id)
  47. xmin, ymin, xmax, ymax = [int(i) for i in box]
  48. img_crop = img[ymin:ymax, xmin:xmax].copy()
  49. output_list.append({"img": img_crop, "box": box, "label": label})
  50. return output_list
  51. class CropByPolys(BaseOperator):
  52. """Crop Image by Polys"""
  53. entities = "CropByPolys"
  54. def __init__(self, det_box_type: str = "quad") -> None:
  55. """
  56. Initializes the operator with a default detection box type.
  57. Args:
  58. det_box_type (str, optional): The type of detection box, quad or poly. Defaults to "quad".
  59. """
  60. super().__init__()
  61. self.det_box_type = det_box_type
  62. def __call__(self, img: np.ndarray, dt_polys: List[list]) -> List[dict]:
  63. """
  64. Call method to crop images based on detection boxes.
  65. Args:
  66. img (nd.ndarray): The input image.
  67. dt_polys (list[list]): List of detection polygons.
  68. Returns:
  69. list[dict]: A list of dictionaries containing cropped images and their sizes.
  70. Raises:
  71. NotImplementedError: If det_box_type is not 'quad' or 'poly'.
  72. """
  73. if self.det_box_type == "quad":
  74. dt_boxes = np.array(dt_polys)
  75. output_list = []
  76. for bno in range(len(dt_boxes)):
  77. tmp_box = copy.deepcopy(dt_boxes[bno])
  78. img_crop = self.get_minarea_rect_crop(img, tmp_box)
  79. output_list.append(img_crop)
  80. elif self.det_box_type == "poly":
  81. output_list = []
  82. dt_boxes = dt_polys
  83. for bno in range(len(dt_boxes)):
  84. tmp_box = copy.deepcopy(dt_boxes[bno])
  85. img_crop = self.get_poly_rect_crop(img.copy(), tmp_box)
  86. output_list.append(img_crop)
  87. else:
  88. raise NotImplementedError
  89. return output_list
  90. def get_minarea_rect_crop(self, img: np.ndarray, points: np.ndarray) -> np.ndarray:
  91. """
  92. Get the minimum area rectangle crop from the given image and points.
  93. Args:
  94. img (np.ndarray): The input image.
  95. points (np.ndarray): A list of points defining the shape to be cropped.
  96. Returns:
  97. np.ndarray: The cropped image with the minimum area rectangle.
  98. """
  99. bounding_box = cv2.minAreaRect(np.array(points).astype(np.int32))
  100. points = sorted(list(cv2.boxPoints(bounding_box)), key=lambda x: x[0])
  101. index_a, index_b, index_c, index_d = 0, 1, 2, 3
  102. if points[1][1] > points[0][1]:
  103. index_a = 0
  104. index_d = 1
  105. else:
  106. index_a = 1
  107. index_d = 0
  108. if points[3][1] > points[2][1]:
  109. index_b = 2
  110. index_c = 3
  111. else:
  112. index_b = 3
  113. index_c = 2
  114. box = [points[index_a], points[index_b], points[index_c], points[index_d]]
  115. crop_img = self.get_rotate_crop_image(img, np.array(box))
  116. return crop_img
  117. def get_rotate_crop_image(self, img: np.ndarray, points: list) -> np.ndarray:
  118. """
  119. Crop and rotate the input image based on the given four points to form a perspective-transformed image.
  120. Args:
  121. img (np.ndarray): The input image array.
  122. points (list): A list of four 2D points defining the crop region in the image.
  123. Returns:
  124. np.ndarray: The transformed image array.
  125. """
  126. assert len(points) == 4, "shape of points must be 4*2"
  127. img_crop_width = int(
  128. max(
  129. np.linalg.norm(points[0] - points[1]),
  130. np.linalg.norm(points[2] - points[3]),
  131. )
  132. )
  133. img_crop_height = int(
  134. max(
  135. np.linalg.norm(points[0] - points[3]),
  136. np.linalg.norm(points[1] - points[2]),
  137. )
  138. )
  139. pts_std = np.float32(
  140. [
  141. [0, 0],
  142. [img_crop_width, 0],
  143. [img_crop_width, img_crop_height],
  144. [0, img_crop_height],
  145. ]
  146. )
  147. M = cv2.getPerspectiveTransform(points, pts_std)
  148. dst_img = cv2.warpPerspective(
  149. img,
  150. M,
  151. (img_crop_width, img_crop_height),
  152. borderMode=cv2.BORDER_REPLICATE,
  153. flags=cv2.INTER_CUBIC,
  154. )
  155. dst_img_height, dst_img_width = dst_img.shape[0:2]
  156. if dst_img_height * 1.0 / dst_img_width >= 1.5:
  157. dst_img = np.rot90(dst_img)
  158. return dst_img
  159. def reorder_poly_edge(
  160. self, points: np.ndarray
  161. ) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
  162. """Get the respective points composing head edge, tail edge, top
  163. sideline and bottom sideline.
  164. Args:
  165. points (ndarray): The points composing a text polygon.
  166. Returns:
  167. head_edge (ndarray): The two points composing the head edge of text
  168. polygon.
  169. tail_edge (ndarray): The two points composing the tail edge of text
  170. polygon.
  171. top_sideline (ndarray): The points composing top curved sideline of
  172. text polygon.
  173. bot_sideline (ndarray): The points composing bottom curved sideline
  174. of text polygon.
  175. """
  176. assert points.ndim == 2
  177. assert points.shape[0] >= 4
  178. assert points.shape[1] == 2
  179. orientation_thr = 2.0 # 一个经验超参数
  180. head_inds, tail_inds = self.find_head_tail(points, orientation_thr)
  181. head_edge, tail_edge = points[head_inds], points[tail_inds]
  182. pad_points = np.vstack([points, points])
  183. if tail_inds[1] < 1:
  184. tail_inds[1] = len(points)
  185. sideline1 = pad_points[head_inds[1] : tail_inds[1]]
  186. sideline2 = pad_points[tail_inds[1] : (head_inds[1] + len(points))]
  187. return head_edge, tail_edge, sideline1, sideline2
  188. def vector_slope(self, vec: list) -> float:
  189. """
  190. Calculate the slope of a vector in 2D space.
  191. Args:
  192. vec (list): A list of two elements representing the coordinates of the vector.
  193. Returns:
  194. float: The slope of the vector.
  195. Raises:
  196. AssertionError: If the length of the vector is not equal to 2.
  197. """
  198. assert len(vec) == 2
  199. return abs(vec[1] / (vec[0] + 1e-8))
  200. def find_head_tail(
  201. self, points: np.ndarray, orientation_thr: float
  202. ) -> Tuple[list, list]:
  203. """Find the head edge and tail edge of a text polygon.
  204. Args:
  205. points (ndarray): The points composing a text polygon.
  206. orientation_thr (float): The threshold for distinguishing between
  207. head edge and tail edge among the horizontal and vertical edges
  208. of a quadrangle.
  209. Returns:
  210. head_inds (list): The indexes of two points composing head edge.
  211. tail_inds (list): The indexes of two points composing tail edge.
  212. """
  213. assert points.ndim == 2
  214. assert points.shape[0] >= 4
  215. assert points.shape[1] == 2
  216. assert isinstance(orientation_thr, float)
  217. if len(points) > 4:
  218. pad_points = np.vstack([points, points[0]])
  219. edge_vec = pad_points[1:] - pad_points[:-1]
  220. theta_sum = []
  221. adjacent_vec_theta = []
  222. for i, edge_vec1 in enumerate(edge_vec):
  223. adjacent_ind = [x % len(edge_vec) for x in [i - 1, i + 1]]
  224. adjacent_edge_vec = edge_vec[adjacent_ind]
  225. temp_theta_sum = np.sum(self.vector_angle(edge_vec1, adjacent_edge_vec))
  226. temp_adjacent_theta = self.vector_angle(
  227. adjacent_edge_vec[0], adjacent_edge_vec[1]
  228. )
  229. theta_sum.append(temp_theta_sum)
  230. adjacent_vec_theta.append(temp_adjacent_theta)
  231. theta_sum_score = np.array(theta_sum) / np.pi
  232. adjacent_theta_score = np.array(adjacent_vec_theta) / np.pi
  233. poly_center = np.mean(points, axis=0)
  234. edge_dist = np.maximum(
  235. norm(pad_points[1:] - poly_center, axis=-1),
  236. norm(pad_points[:-1] - poly_center, axis=-1),
  237. )
  238. dist_score = edge_dist / np.max(edge_dist)
  239. position_score = np.zeros(len(edge_vec))
  240. score = 0.5 * theta_sum_score + 0.15 * adjacent_theta_score
  241. score += 0.35 * dist_score
  242. if len(points) % 2 == 0:
  243. position_score[(len(score) // 2 - 1)] += 1
  244. position_score[-1] += 1
  245. score += 0.1 * position_score
  246. pad_score = np.concatenate([score, score])
  247. score_matrix = np.zeros((len(score), len(score) - 3))
  248. x = np.arange(len(score) - 3) / float(len(score) - 4)
  249. gaussian = (
  250. 1.0
  251. / (np.sqrt(2.0 * np.pi) * 0.5)
  252. * np.exp(-np.power((x - 0.5) / 0.5, 2.0) / 2)
  253. )
  254. gaussian = gaussian / np.max(gaussian)
  255. for i in range(len(score)):
  256. score_matrix[i, :] = (
  257. score[i]
  258. + pad_score[(i + 2) : (i + len(score) - 1)] * gaussian * 0.3
  259. )
  260. head_start, tail_increment = np.unravel_index(
  261. score_matrix.argmax(), score_matrix.shape
  262. )
  263. tail_start = (head_start + tail_increment + 2) % len(points)
  264. head_end = (head_start + 1) % len(points)
  265. tail_end = (tail_start + 1) % len(points)
  266. if head_end > tail_end:
  267. head_start, tail_start = tail_start, head_start
  268. head_end, tail_end = tail_end, head_end
  269. head_inds = [head_start, head_end]
  270. tail_inds = [tail_start, tail_end]
  271. else:
  272. if self.vector_slope(points[1] - points[0]) + self.vector_slope(
  273. points[3] - points[2]
  274. ) < self.vector_slope(points[2] - points[1]) + self.vector_slope(
  275. points[0] - points[3]
  276. ):
  277. horizontal_edge_inds = [[0, 1], [2, 3]]
  278. vertical_edge_inds = [[3, 0], [1, 2]]
  279. else:
  280. horizontal_edge_inds = [[3, 0], [1, 2]]
  281. vertical_edge_inds = [[0, 1], [2, 3]]
  282. vertical_len_sum = norm(
  283. points[vertical_edge_inds[0][0]] - points[vertical_edge_inds[0][1]]
  284. ) + norm(
  285. points[vertical_edge_inds[1][0]] - points[vertical_edge_inds[1][1]]
  286. )
  287. horizontal_len_sum = norm(
  288. points[horizontal_edge_inds[0][0]] - points[horizontal_edge_inds[0][1]]
  289. ) + norm(
  290. points[horizontal_edge_inds[1][0]] - points[horizontal_edge_inds[1][1]]
  291. )
  292. if vertical_len_sum > horizontal_len_sum * orientation_thr:
  293. head_inds = horizontal_edge_inds[0]
  294. tail_inds = horizontal_edge_inds[1]
  295. else:
  296. head_inds = vertical_edge_inds[0]
  297. tail_inds = vertical_edge_inds[1]
  298. return head_inds, tail_inds
  299. def vector_angle(self, vec1: np.ndarray, vec2: np.ndarray) -> float:
  300. """
  301. Calculate the angle between two vectors.
  302. Args:
  303. vec1 (ndarray): The first vector.
  304. vec2 (ndarray): The second vector.
  305. Returns:
  306. float: The angle between the two vectors in radians.
  307. """
  308. if vec1.ndim > 1:
  309. unit_vec1 = vec1 / (norm(vec1, axis=-1) + 1e-8).reshape((-1, 1))
  310. else:
  311. unit_vec1 = vec1 / (norm(vec1, axis=-1) + 1e-8)
  312. if vec2.ndim > 1:
  313. unit_vec2 = vec2 / (norm(vec2, axis=-1) + 1e-8).reshape((-1, 1))
  314. else:
  315. unit_vec2 = vec2 / (norm(vec2, axis=-1) + 1e-8)
  316. return np.arccos(np.clip(np.sum(unit_vec1 * unit_vec2, axis=-1), -1.0, 1.0))
  317. def get_minarea_rect(
  318. self, img: np.ndarray, points: np.ndarray
  319. ) -> Tuple[np.ndarray, list]:
  320. """
  321. Get the minimum area rectangle for the given points and crop the image accordingly.
  322. Args:
  323. img (np.ndarray): The input image.
  324. points (np.ndarray): The points to compute the minimum area rectangle for.
  325. Returns:
  326. tuple[np.ndarray, list]: The cropped image,
  327. and the list of points in the order of the bounding box.
  328. """
  329. bounding_box = cv2.minAreaRect(points)
  330. points = sorted(list(cv2.boxPoints(bounding_box)), key=lambda x: x[0])
  331. index_a, index_b, index_c, index_d = 0, 1, 2, 3
  332. if points[1][1] > points[0][1]:
  333. index_a = 0
  334. index_d = 1
  335. else:
  336. index_a = 1
  337. index_d = 0
  338. if points[3][1] > points[2][1]:
  339. index_b = 2
  340. index_c = 3
  341. else:
  342. index_b = 3
  343. index_c = 2
  344. box = [points[index_a], points[index_b], points[index_c], points[index_d]]
  345. crop_img = self.get_rotate_crop_image(img, np.array(box))
  346. return crop_img, box
  347. def sample_points_on_bbox_bp(self, line, n=50):
  348. """Resample n points on a line.
  349. Args:
  350. line (ndarray): The points composing a line.
  351. n (int): The resampled points number.
  352. Returns:
  353. resampled_line (ndarray): The points composing the resampled line.
  354. """
  355. from numpy.linalg import norm
  356. # 断言检查输入参数的有效性
  357. assert line.ndim == 2
  358. assert line.shape[0] >= 2
  359. assert line.shape[1] == 2
  360. assert isinstance(n, int)
  361. assert n > 0
  362. length_list = [norm(line[i + 1] - line[i]) for i in range(len(line) - 1)]
  363. total_length = sum(length_list)
  364. length_cumsum = np.cumsum([0.0] + length_list)
  365. delta_length = total_length / (float(n) + 1e-8)
  366. current_edge_ind = 0
  367. resampled_line = [line[0]]
  368. for i in range(1, n):
  369. current_line_len = i * delta_length
  370. while (
  371. current_edge_ind + 1 < len(length_cumsum)
  372. and current_line_len >= length_cumsum[current_edge_ind + 1]
  373. ):
  374. current_edge_ind += 1
  375. current_edge_end_shift = current_line_len - length_cumsum[current_edge_ind]
  376. if current_edge_ind >= len(length_list):
  377. break
  378. end_shift_ratio = current_edge_end_shift / length_list[current_edge_ind]
  379. current_point = (
  380. line[current_edge_ind]
  381. + (line[current_edge_ind + 1] - line[current_edge_ind])
  382. * end_shift_ratio
  383. )
  384. resampled_line.append(current_point)
  385. resampled_line.append(line[-1])
  386. resampled_line = np.array(resampled_line)
  387. return resampled_line
  388. def sample_points_on_bbox(self, line, n=50):
  389. """Resample n points on a line.
  390. Args:
  391. line (ndarray): The points composing a line.
  392. n (int): The resampled points number.
  393. Returns:
  394. resampled_line (ndarray): The points composing the resampled line.
  395. """
  396. assert line.ndim == 2
  397. assert line.shape[0] >= 2
  398. assert line.shape[1] == 2
  399. assert isinstance(n, int)
  400. assert n > 0
  401. length_list = [norm(line[i + 1] - line[i]) for i in range(len(line) - 1)]
  402. total_length = sum(length_list)
  403. mean_length = total_length / (len(length_list) + 1e-8)
  404. group = [[0]]
  405. for i in range(len(length_list)):
  406. point_id = i + 1
  407. if length_list[i] < 0.9 * mean_length:
  408. for g in group:
  409. if i in g:
  410. g.append(point_id)
  411. break
  412. else:
  413. g = [point_id]
  414. group.append(g)
  415. top_tail_len = norm(line[0] - line[-1])
  416. if top_tail_len < 0.9 * mean_length:
  417. group[0].extend(g)
  418. group.remove(g)
  419. mean_positions = []
  420. for indices in group:
  421. x_sum = 0
  422. y_sum = 0
  423. for index in indices:
  424. x, y = line[index]
  425. x_sum += x
  426. y_sum += y
  427. num_points = len(indices)
  428. mean_x = x_sum / num_points
  429. mean_y = y_sum / num_points
  430. mean_positions.append((mean_x, mean_y))
  431. resampled_line = np.array(mean_positions)
  432. return resampled_line
  433. def get_poly_rect_crop(self, img, points):
  434. """
  435. 修改该函数,实现使用polygon,对不规则、弯曲文本的矫正以及crop
  436. args: img: 图片 ndarrary格式
  437. points: polygon格式的多点坐标 N*2 shape, ndarray格式
  438. return: 矫正后的图片 ndarray格式
  439. """
  440. points = np.array(points).astype(np.int32).reshape(-1, 2)
  441. temp_crop_img, temp_box = self.get_minarea_rect(img, points)
  442. # 计算最小外接矩形与polygon的IoU
  443. def get_union(pD, pG):
  444. return Polygon(pD).union(Polygon(pG)).area
  445. def get_intersection_over_union(pD, pG):
  446. return get_intersection(pD, pG) / (get_union(pD, pG) + 1e-10)
  447. def get_intersection(pD, pG):
  448. return Polygon(pD).intersection(Polygon(pG)).area
  449. if not Polygon(points).is_valid:
  450. return temp_crop_img
  451. cal_IoU = get_intersection_over_union(points, temp_box)
  452. if cal_IoU >= 0.7:
  453. points = self.sample_points_on_bbox_bp(points, 31)
  454. return temp_crop_img
  455. points_sample = self.sample_points_on_bbox(points)
  456. points_sample = points_sample.astype(np.int32)
  457. head_edge, tail_edge, top_line, bot_line = self.reorder_poly_edge(points_sample)
  458. resample_top_line = self.sample_points_on_bbox_bp(top_line, 15)
  459. resample_bot_line = self.sample_points_on_bbox_bp(bot_line, 15)
  460. sideline_mean_shift = np.mean(resample_top_line, axis=0) - np.mean(
  461. resample_bot_line, axis=0
  462. )
  463. if sideline_mean_shift[1] > 0:
  464. resample_bot_line, resample_top_line = resample_top_line, resample_bot_line
  465. rectifier = AutoRectifier()
  466. new_points = np.concatenate([resample_top_line, resample_bot_line])
  467. new_points_list = list(new_points.astype(np.float32).reshape(1, -1).tolist())
  468. if len(img.shape) == 2:
  469. img = np.stack((img,) * 3, axis=-1)
  470. img_crop, image = rectifier.run(img, new_points_list, mode="homography")
  471. return np.array(img_crop[0], dtype=np.uint8)