ocr_utils.py 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350
  1. # Copyright (c) Opendatalab. All rights reserved.
  2. import cv2
  3. import numpy as np
  4. from magic_pdf.pre_proc.ocr_dict_merge import merge_spans_to_line
  5. from magic_pdf.libs.boxbase import __is_overlaps_y_exceeds_threshold
  6. def img_decode(content: bytes):
  7. np_arr = np.frombuffer(content, dtype=np.uint8)
  8. return cv2.imdecode(np_arr, cv2.IMREAD_UNCHANGED)
  9. def check_img(img):
  10. if isinstance(img, bytes):
  11. img = img_decode(img)
  12. if isinstance(img, np.ndarray) and len(img.shape) == 2:
  13. img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
  14. return img
  15. def alpha_to_color(img, alpha_color=(255, 255, 255)):
  16. if len(img.shape) == 3 and img.shape[2] == 4:
  17. B, G, R, A = cv2.split(img)
  18. alpha = A / 255
  19. R = (alpha_color[0] * (1 - alpha) + R * alpha).astype(np.uint8)
  20. G = (alpha_color[1] * (1 - alpha) + G * alpha).astype(np.uint8)
  21. B = (alpha_color[2] * (1 - alpha) + B * alpha).astype(np.uint8)
  22. img = cv2.merge((B, G, R))
  23. return img
  24. def preprocess_image(_image):
  25. alpha_color = (255, 255, 255)
  26. _image = alpha_to_color(_image, alpha_color)
  27. return _image
  28. def sorted_boxes(dt_boxes):
  29. """
  30. Sort text boxes in order from top to bottom, left to right
  31. args:
  32. dt_boxes(array):detected text boxes with shape [4, 2]
  33. return:
  34. sorted boxes(array) with shape [4, 2]
  35. """
  36. num_boxes = dt_boxes.shape[0]
  37. sorted_boxes = sorted(dt_boxes, key=lambda x: (x[0][1], x[0][0]))
  38. _boxes = list(sorted_boxes)
  39. for i in range(num_boxes - 1):
  40. for j in range(i, -1, -1):
  41. if abs(_boxes[j + 1][0][1] - _boxes[j][0][1]) < 10 and \
  42. (_boxes[j + 1][0][0] < _boxes[j][0][0]):
  43. tmp = _boxes[j]
  44. _boxes[j] = _boxes[j + 1]
  45. _boxes[j + 1] = tmp
  46. else:
  47. break
  48. return _boxes
  49. def bbox_to_points(bbox):
  50. """ 将bbox格式转换为四个顶点的数组 """
  51. x0, y0, x1, y1 = bbox
  52. return np.array([[x0, y0], [x1, y0], [x1, y1], [x0, y1]]).astype('float32')
  53. def points_to_bbox(points):
  54. """ 将四个顶点的数组转换为bbox格式 """
  55. x0, y0 = points[0]
  56. x1, _ = points[1]
  57. _, y1 = points[2]
  58. return [x0, y0, x1, y1]
  59. def merge_intervals(intervals):
  60. # Sort the intervals based on the start value
  61. intervals.sort(key=lambda x: x[0])
  62. merged = []
  63. for interval in intervals:
  64. # If the list of merged intervals is empty or if the current
  65. # interval does not overlap with the previous, simply append it.
  66. if not merged or merged[-1][1] < interval[0]:
  67. merged.append(interval)
  68. else:
  69. # Otherwise, there is overlap, so we merge the current and previous intervals.
  70. merged[-1][1] = max(merged[-1][1], interval[1])
  71. return merged
  72. def remove_intervals(original, masks):
  73. # Merge all mask intervals
  74. merged_masks = merge_intervals(masks)
  75. result = []
  76. original_start, original_end = original
  77. for mask in merged_masks:
  78. mask_start, mask_end = mask
  79. # If the mask starts after the original range, ignore it
  80. if mask_start > original_end:
  81. continue
  82. # If the mask ends before the original range starts, ignore it
  83. if mask_end < original_start:
  84. continue
  85. # Remove the masked part from the original range
  86. if original_start < mask_start:
  87. result.append([original_start, mask_start - 1])
  88. original_start = max(mask_end + 1, original_start)
  89. # Add the remaining part of the original range, if any
  90. if original_start <= original_end:
  91. result.append([original_start, original_end])
  92. return result
  93. def update_det_boxes(dt_boxes, mfd_res):
  94. new_dt_boxes = []
  95. angle_boxes_list = []
  96. for text_box in dt_boxes:
  97. if calculate_is_angle(text_box):
  98. angle_boxes_list.append(text_box)
  99. continue
  100. text_bbox = points_to_bbox(text_box)
  101. masks_list = []
  102. for mf_box in mfd_res:
  103. mf_bbox = mf_box['bbox']
  104. if __is_overlaps_y_exceeds_threshold(text_bbox, mf_bbox):
  105. masks_list.append([mf_bbox[0], mf_bbox[2]])
  106. text_x_range = [text_bbox[0], text_bbox[2]]
  107. text_remove_mask_range = remove_intervals(text_x_range, masks_list)
  108. temp_dt_box = []
  109. for text_remove_mask in text_remove_mask_range:
  110. temp_dt_box.append(bbox_to_points([text_remove_mask[0], text_bbox[1], text_remove_mask[1], text_bbox[3]]))
  111. if len(temp_dt_box) > 0:
  112. new_dt_boxes.extend(temp_dt_box)
  113. new_dt_boxes.extend(angle_boxes_list)
  114. return new_dt_boxes
  115. def merge_overlapping_spans(spans):
  116. """
  117. Merges overlapping spans on the same line.
  118. :param spans: A list of span coordinates [(x1, y1, x2, y2), ...]
  119. :return: A list of merged spans
  120. """
  121. # Return an empty list if the input spans list is empty
  122. if not spans:
  123. return []
  124. # Sort spans by their starting x-coordinate
  125. spans.sort(key=lambda x: x[0])
  126. # Initialize the list of merged spans
  127. merged = []
  128. for span in spans:
  129. # Unpack span coordinates
  130. x1, y1, x2, y2 = span
  131. # If the merged list is empty or there's no horizontal overlap, add the span directly
  132. if not merged or merged[-1][2] < x1:
  133. merged.append(span)
  134. else:
  135. # If there is horizontal overlap, merge the current span with the previous one
  136. last_span = merged.pop()
  137. # Update the merged span's top-left corner to the smaller (x1, y1) and bottom-right to the larger (x2, y2)
  138. x1 = min(last_span[0], x1)
  139. y1 = min(last_span[1], y1)
  140. x2 = max(last_span[2], x2)
  141. y2 = max(last_span[3], y2)
  142. # Add the merged span back to the list
  143. merged.append((x1, y1, x2, y2))
  144. # Return the list of merged spans
  145. return merged
  146. def merge_det_boxes(dt_boxes):
  147. """
  148. Merge detection boxes.
  149. This function takes a list of detected bounding boxes, each represented by four corner points.
  150. The goal is to merge these bounding boxes into larger text regions.
  151. Parameters:
  152. dt_boxes (list): A list containing multiple text detection boxes, where each box is defined by four corner points.
  153. Returns:
  154. list: A list containing the merged text regions, where each region is represented by four corner points.
  155. """
  156. # Convert the detection boxes into a dictionary format with bounding boxes and type
  157. dt_boxes_dict_list = []
  158. angle_boxes_list = []
  159. for text_box in dt_boxes:
  160. text_bbox = points_to_bbox(text_box)
  161. if calculate_is_angle(text_box):
  162. angle_boxes_list.append(text_box)
  163. continue
  164. text_box_dict = {
  165. 'bbox': text_bbox,
  166. 'type': 'text',
  167. }
  168. dt_boxes_dict_list.append(text_box_dict)
  169. # Merge adjacent text regions into lines
  170. lines = merge_spans_to_line(dt_boxes_dict_list)
  171. # Initialize a new list for storing the merged text regions
  172. new_dt_boxes = []
  173. for line in lines:
  174. line_bbox_list = []
  175. for span in line:
  176. line_bbox_list.append(span['bbox'])
  177. # Merge overlapping text regions within the same line
  178. merged_spans = merge_overlapping_spans(line_bbox_list)
  179. # Convert the merged text regions back to point format and add them to the new detection box list
  180. for span in merged_spans:
  181. new_dt_boxes.append(bbox_to_points(span))
  182. new_dt_boxes.extend(angle_boxes_list)
  183. return new_dt_boxes
  184. def get_adjusted_mfdetrec_res(single_page_mfdetrec_res, useful_list):
  185. paste_x, paste_y, xmin, ymin, xmax, ymax, new_width, new_height = useful_list
  186. # Adjust the coordinates of the formula area
  187. adjusted_mfdetrec_res = []
  188. for mf_res in single_page_mfdetrec_res:
  189. mf_xmin, mf_ymin, mf_xmax, mf_ymax = mf_res["bbox"]
  190. # Adjust the coordinates of the formula area to the coordinates relative to the cropping area
  191. x0 = mf_xmin - xmin + paste_x
  192. y0 = mf_ymin - ymin + paste_y
  193. x1 = mf_xmax - xmin + paste_x
  194. y1 = mf_ymax - ymin + paste_y
  195. # Filter formula blocks outside the graph
  196. if any([x1 < 0, y1 < 0]) or any([x0 > new_width, y0 > new_height]):
  197. continue
  198. else:
  199. adjusted_mfdetrec_res.append({
  200. "bbox": [x0, y0, x1, y1],
  201. })
  202. return adjusted_mfdetrec_res
  203. def get_ocr_result_list(ocr_res, useful_list):
  204. paste_x, paste_y, xmin, ymin, xmax, ymax, new_width, new_height = useful_list
  205. ocr_result_list = []
  206. for box_ocr_res in ocr_res:
  207. if len(box_ocr_res) == 2:
  208. p1, p2, p3, p4 = box_ocr_res[0]
  209. text, score = box_ocr_res[1]
  210. # logger.info(f"text: {text}, score: {score}")
  211. if score < 0.6: # 过滤低置信度的结果
  212. continue
  213. else:
  214. p1, p2, p3, p4 = box_ocr_res
  215. text, score = "", 1
  216. # average_angle_degrees = calculate_angle_degrees(box_ocr_res[0])
  217. # if average_angle_degrees > 0.5:
  218. poly = [p1, p2, p3, p4]
  219. if calculate_is_angle(poly):
  220. # logger.info(f"average_angle_degrees: {average_angle_degrees}, text: {text}")
  221. # 与x轴的夹角超过0.5度,对边界做一下矫正
  222. # 计算几何中心
  223. x_center = sum(point[0] for point in poly) / 4
  224. y_center = sum(point[1] for point in poly) / 4
  225. new_height = ((p4[1] - p1[1]) + (p3[1] - p2[1])) / 2
  226. new_width = p3[0] - p1[0]
  227. p1 = [x_center - new_width / 2, y_center - new_height / 2]
  228. p2 = [x_center + new_width / 2, y_center - new_height / 2]
  229. p3 = [x_center + new_width / 2, y_center + new_height / 2]
  230. p4 = [x_center - new_width / 2, y_center + new_height / 2]
  231. # Convert the coordinates back to the original coordinate system
  232. p1 = [p1[0] - paste_x + xmin, p1[1] - paste_y + ymin]
  233. p2 = [p2[0] - paste_x + xmin, p2[1] - paste_y + ymin]
  234. p3 = [p3[0] - paste_x + xmin, p3[1] - paste_y + ymin]
  235. p4 = [p4[0] - paste_x + xmin, p4[1] - paste_y + ymin]
  236. ocr_result_list.append({
  237. 'category_id': 15,
  238. 'poly': p1 + p2 + p3 + p4,
  239. 'score': float(round(score, 2)),
  240. 'text': text,
  241. })
  242. return ocr_result_list
  243. def calculate_is_angle(poly):
  244. p1, p2, p3, p4 = poly
  245. height = ((p4[1] - p1[1]) + (p3[1] - p2[1])) / 2
  246. if 0.8 * height <= (p3[1] - p1[1]) <= 1.2 * height:
  247. return False
  248. else:
  249. # logger.info((p3[1] - p1[1])/height)
  250. return True
  251. def get_rotate_crop_image(img, points):
  252. '''
  253. img_height, img_width = img.shape[0:2]
  254. left = int(np.min(points[:, 0]))
  255. right = int(np.max(points[:, 0]))
  256. top = int(np.min(points[:, 1]))
  257. bottom = int(np.max(points[:, 1]))
  258. img_crop = img[top:bottom, left:right, :].copy()
  259. points[:, 0] = points[:, 0] - left
  260. points[:, 1] = points[:, 1] - top
  261. '''
  262. assert len(points) == 4, "shape of points must be 4*2"
  263. img_crop_width = int(
  264. max(
  265. np.linalg.norm(points[0] - points[1]),
  266. np.linalg.norm(points[2] - points[3])))
  267. img_crop_height = int(
  268. max(
  269. np.linalg.norm(points[0] - points[3]),
  270. np.linalg.norm(points[1] - points[2])))
  271. pts_std = np.float32([[0, 0], [img_crop_width, 0],
  272. [img_crop_width, img_crop_height],
  273. [0, img_crop_height]])
  274. M = cv2.getPerspectiveTransform(points, pts_std)
  275. dst_img = cv2.warpPerspective(
  276. img,
  277. M, (img_crop_width, img_crop_height),
  278. borderMode=cv2.BORDER_REPLICATE,
  279. flags=cv2.INTER_CUBIC)
  280. dst_img_height, dst_img_width = dst_img.shape[0:2]
  281. if dst_img_height * 1.0 / dst_img_width >= 1.5:
  282. dst_img = np.rot90(dst_img)
  283. return dst_img