utils.py 76 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053
  1. # copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. __all__ = [
  15. "get_sub_regions_ocr_res",
  16. "get_layout_ordering",
  17. "get_single_block_parsing_res",
  18. "recursive_img_array2path",
  19. "get_show_color",
  20. "sorted_layout_boxes",
  21. "convert_bgr2rgb",
  22. ]
  23. import numpy as np
  24. import copy
  25. import cv2
  26. from PIL import Image
  27. import uuid
  28. from pathlib import Path
  29. from typing import Optional, Union, List, Tuple, Dict, Any
  30. from ..ocr.result import OCRResult
  31. from ...models.object_detection.result import DetResult
  32. from ..components import convert_points_to_boxes
  33. def convert_bgr2rgb(data: Image.Image) -> Image.Image:
  34. """
  35. Convert BGR image to RGB image.
  36. Args:
  37. data (PIL.Image): The input image data.
  38. Returns:
  39. PIL.Image: The converted RGB image data.
  40. """
  41. return data
  42. original_img_array = np.array(data)
  43. if original_img_array.ndim == 3 and original_img_array.shape[2] == 3:
  44. res_img_array = original_img_array[:, :, ::-1]
  45. else:
  46. res_img_array = original_img_array
  47. res_img = Image.fromarray(res_img_array)
  48. return res_img
  49. def get_overlap_boxes_idx(src_boxes: np.ndarray, ref_boxes: np.ndarray) -> List:
  50. """
  51. Get the indices of source boxes that overlap with reference boxes based on a specified threshold.
  52. Args:
  53. src_boxes (np.ndarray): A 2D numpy array of source bounding boxes.
  54. ref_boxes (np.ndarray): A 2D numpy array of reference bounding boxes.
  55. Returns:
  56. match_idx_list (list): A list of indices of source boxes that overlap with reference boxes.
  57. """
  58. match_idx_list = []
  59. src_boxes_num = len(src_boxes)
  60. if src_boxes_num > 0 and len(ref_boxes) > 0:
  61. for rno in range(len(ref_boxes)):
  62. ref_box = ref_boxes[rno]
  63. x1 = np.maximum(ref_box[0], src_boxes[:, 0])
  64. y1 = np.maximum(ref_box[1], src_boxes[:, 1])
  65. x2 = np.minimum(ref_box[2], src_boxes[:, 2])
  66. y2 = np.minimum(ref_box[3], src_boxes[:, 3])
  67. pub_w = x2 - x1
  68. pub_h = y2 - y1
  69. match_idx = np.where((pub_w > 3) & (pub_h > 3))[0]
  70. match_idx_list.extend(match_idx)
  71. return match_idx_list
  72. def get_sub_regions_ocr_res(
  73. overall_ocr_res: OCRResult,
  74. object_boxes: List,
  75. flag_within: bool = True,
  76. return_match_idx: bool = False,
  77. ) -> OCRResult:
  78. """
  79. Filters OCR results to only include text boxes within specified object boxes based on a flag.
  80. Args:
  81. overall_ocr_res (OCRResult): The original OCR result containing all text boxes.
  82. object_boxes (list): A list of bounding boxes for the objects of interest.
  83. flag_within (bool): If True, only include text boxes within the object boxes. If False, exclude text boxes within the object boxes.
  84. return_match_idx (bool): If True, return the list of matching indices.
  85. Returns:
  86. OCRResult: A filtered OCR result containing only the relevant text boxes.
  87. """
  88. sub_regions_ocr_res = {}
  89. sub_regions_ocr_res["rec_polys"] = []
  90. sub_regions_ocr_res["rec_texts"] = []
  91. sub_regions_ocr_res["rec_scores"] = []
  92. sub_regions_ocr_res["rec_boxes"] = []
  93. overall_text_boxes = overall_ocr_res["rec_boxes"]
  94. match_idx_list = get_overlap_boxes_idx(overall_text_boxes, object_boxes)
  95. match_idx_list = list(set(match_idx_list))
  96. for box_no in range(len(overall_text_boxes)):
  97. if flag_within:
  98. if box_no in match_idx_list:
  99. flag_match = True
  100. else:
  101. flag_match = False
  102. else:
  103. if box_no not in match_idx_list:
  104. flag_match = True
  105. else:
  106. flag_match = False
  107. if flag_match:
  108. sub_regions_ocr_res["rec_polys"].append(
  109. overall_ocr_res["rec_polys"][box_no]
  110. )
  111. sub_regions_ocr_res["rec_texts"].append(
  112. overall_ocr_res["rec_texts"][box_no]
  113. )
  114. sub_regions_ocr_res["rec_scores"].append(
  115. overall_ocr_res["rec_scores"][box_no]
  116. )
  117. sub_regions_ocr_res["rec_boxes"].append(
  118. overall_ocr_res["rec_boxes"][box_no]
  119. )
  120. for key in ["rec_polys", "rec_scores", "rec_boxes"]:
  121. sub_regions_ocr_res[key] = np.array(sub_regions_ocr_res[key])
  122. return (
  123. (sub_regions_ocr_res, match_idx_list)
  124. if return_match_idx
  125. else sub_regions_ocr_res
  126. )
  127. def sorted_layout_boxes(res, w):
  128. """
  129. Sort text boxes in order from top to bottom, left to right
  130. Args:
  131. res: List of dictionaries containing layout information.
  132. w: Width of image.
  133. Returns:
  134. List of dictionaries containing sorted layout information.
  135. """
  136. num_boxes = len(res)
  137. if num_boxes == 1:
  138. return res
  139. # Sort on the y axis first or sort it on the x axis
  140. sorted_boxes = sorted(res, key=lambda x: (x["block_bbox"][1], x["block_bbox"][0]))
  141. _boxes = list(sorted_boxes)
  142. new_res = []
  143. res_left = []
  144. res_right = []
  145. i = 0
  146. while True:
  147. if i >= num_boxes:
  148. break
  149. # Check that the bbox is on the left
  150. elif (
  151. _boxes[i]["block_bbox"][0] < w / 4
  152. and _boxes[i]["block_bbox"][2] < 3 * w / 5
  153. ):
  154. res_left.append(_boxes[i])
  155. i += 1
  156. elif _boxes[i]["block_bbox"][0] > 2 * w / 5:
  157. res_right.append(_boxes[i])
  158. i += 1
  159. else:
  160. new_res += res_left
  161. new_res += res_right
  162. new_res.append(_boxes[i])
  163. res_left = []
  164. res_right = []
  165. i += 1
  166. res_left = sorted(res_left, key=lambda x: (x["block_bbox"][1]))
  167. res_right = sorted(res_right, key=lambda x: (x["block_bbox"][1]))
  168. if res_left:
  169. new_res += res_left
  170. if res_right:
  171. new_res += res_right
  172. return new_res
  173. def _calculate_overlap_area_div_minbox_area_ratio(
  174. bbox1: Union[list, tuple],
  175. bbox2: Union[list, tuple],
  176. ) -> float:
  177. """
  178. Calculate the ratio of the overlap area between bbox1 and bbox2
  179. to the area of the smaller bounding box.
  180. Args:
  181. bbox1 (list or tuple): Coordinates of the first bounding box [x_min, y_min, x_max, y_max].
  182. bbox2 (list or tuple): Coordinates of the second bounding box [x_min, y_min, x_max, y_max].
  183. Returns:
  184. float: The ratio of the overlap area to the area of the smaller bounding box.
  185. """
  186. bbox1 = list(map(int, bbox1))
  187. bbox2 = list(map(int, bbox2))
  188. x_left = max(bbox1[0], bbox2[0])
  189. y_top = max(bbox1[1], bbox2[1])
  190. x_right = min(bbox1[2], bbox2[2])
  191. y_bottom = min(bbox1[3], bbox2[3])
  192. if x_right <= x_left or y_bottom <= y_top:
  193. return 0.0
  194. intersection_area = (x_right - x_left) * (y_bottom - y_top)
  195. area_bbox1 = (bbox1[2] - bbox1[0]) * (bbox1[3] - bbox1[1])
  196. area_bbox2 = (bbox2[2] - bbox2[0]) * (bbox2[3] - bbox2[1])
  197. min_box_area = min(area_bbox1, area_bbox2)
  198. if min_box_area <= 0:
  199. return 0.0
  200. return intersection_area / min_box_area
  201. def _whether_y_overlap_exceeds_threshold(
  202. bbox1: Union[list, tuple],
  203. bbox2: Union[list, tuple],
  204. overlap_ratio_threshold: float = 0.6,
  205. ) -> bool:
  206. """
  207. Determines whether the vertical overlap between two bounding boxes exceeds a given threshold.
  208. Args:
  209. bbox1 (list or tuple): The first bounding box defined as (left, top, right, bottom).
  210. bbox2 (list or tuple): The second bounding box defined as (left, top, right, bottom).
  211. overlap_ratio_threshold (float): The threshold ratio to determine if the overlap is significant.
  212. Defaults to 0.6.
  213. Returns:
  214. bool: True if the vertical overlap divided by the minimum height of the two bounding boxes
  215. exceeds the overlap_ratio_threshold, otherwise False.
  216. """
  217. _, y1_0, _, y1_1 = bbox1
  218. _, y2_0, _, y2_1 = bbox2
  219. overlap = max(0, min(y1_1, y2_1) - max(y1_0, y2_0))
  220. min_height = min(y1_1 - y1_0, y2_1 - y2_0)
  221. return (overlap / min_height) > overlap_ratio_threshold
  222. def _adjust_span_text(span: List[str], prepend: bool = False, append: bool = False):
  223. """
  224. Adjust the text of a span by prepending or appending a newline.
  225. Args:
  226. span (list): A list where the second element is the text of the span.
  227. prepend (bool): If True, prepend a newline to the text.
  228. append (bool): If True, append a newline to the text.
  229. Returns:
  230. None: The function modifies the span in place.
  231. """
  232. if prepend:
  233. span[1] = "\n" + span[1]
  234. if append:
  235. span[1] = span[1] + "\n"
  236. def _format_line(
  237. line: List[List[Union[List[int], str]]],
  238. layout_min: int,
  239. layout_max: int,
  240. is_reference: bool = False,
  241. ) -> None:
  242. """
  243. Format a line of text spans based on layout constraints.
  244. Args:
  245. line (list): A list of spans, where each span is a list containing a bounding box and text.
  246. layout_min (int): The minimum x-coordinate of the layout bounding box.
  247. layout_max (int): The maximum x-coordinate of the layout bounding box.
  248. is_reference (bool): A flag indicating whether the line is a reference line, which affects formatting rules.
  249. Returns:
  250. None: The function modifies the line in place.
  251. """
  252. first_span = line[0]
  253. end_span = line[-1]
  254. if not is_reference:
  255. if first_span[0][0] - layout_min > 10:
  256. _adjust_span_text(first_span, prepend=True)
  257. if layout_max - end_span[0][2] > 10:
  258. _adjust_span_text(end_span, append=True)
  259. else:
  260. if first_span[0][0] - layout_min < 5:
  261. _adjust_span_text(first_span, prepend=True)
  262. if layout_max - end_span[0][2] > 20:
  263. _adjust_span_text(end_span, append=True)
  264. def _sort_ocr_res_by_y_projection(
  265. label: Any,
  266. block_bbox: Tuple[int, int, int, int],
  267. ocr_res: Dict[str, List[Any]],
  268. line_height_iou_threshold: float = 0.7,
  269. ) -> Dict[str, List[Any]]:
  270. """
  271. Sorts OCR results based on their spatial arrangement, grouping them into lines and blocks.
  272. Args:
  273. label (Any): The label associated with the OCR results. It's not used in the function but might be
  274. relevant for other parts of the calling context.
  275. block_bbox (Tuple[int, int, int, int]): A tuple representing the layout bounding box, defined as
  276. (left, top, right, bottom).
  277. ocr_res (Dict[str, List[Any]]): A dictionary containing OCR results with the following keys:
  278. - "boxes": A list of bounding boxes, each defined as [left, top, right, bottom].
  279. - "rec_texts": A corresponding list of recognized text strings for each box.
  280. line_height_iou_threshold (float): The threshold for determining whether two boxes belong to
  281. the same line based on their vertical overlap. Defaults to 0.7.
  282. Returns:
  283. Dict[str, List[Any]]: A dictionary with the same structure as `ocr_res`, but with boxes and texts sorted
  284. and grouped into lines and blocks.
  285. """
  286. assert (
  287. ocr_res["boxes"] and ocr_res["rec_texts"]
  288. ), "OCR results must contain 'boxes' and 'rec_texts'"
  289. boxes = ocr_res["boxes"]
  290. rec_texts = ocr_res["rec_texts"]
  291. x_min, _, x_max, _ = block_bbox
  292. inline_x_min = min([box[0] for box in boxes])
  293. inline_x_max = max([box[2] for box in boxes])
  294. spans = list(zip(boxes, rec_texts))
  295. spans.sort(key=lambda span: span[0][1])
  296. spans = [list(span) for span in spans]
  297. lines = []
  298. current_line = [spans[0]]
  299. current_y0, current_y1 = spans[0][0][1], spans[0][0][3]
  300. for span in spans[1:]:
  301. y0, y1 = span[0][1], span[0][3]
  302. if _whether_y_overlap_exceeds_threshold(
  303. (0, current_y0, 0, current_y1),
  304. (0, y0, 0, y1),
  305. line_height_iou_threshold,
  306. ):
  307. current_line.append(span)
  308. current_y0 = min(current_y0, y0)
  309. current_y1 = max(current_y1, y1)
  310. else:
  311. lines.append(current_line)
  312. current_line = [span]
  313. current_y0, current_y1 = y0, y1
  314. if current_line:
  315. lines.append(current_line)
  316. for line in lines:
  317. line.sort(key=lambda span: span[0][0])
  318. if label == "reference":
  319. line = _format_line(line, inline_x_min, inline_x_max, is_reference=True)
  320. else:
  321. line = _format_line(line, x_min, x_max)
  322. # Flatten lines back into a single list for boxes and texts
  323. ocr_res["boxes"] = [span[0] for line in lines for span in line]
  324. ocr_res["rec_texts"] = [span[1] + " " for line in lines for span in line]
  325. return ocr_res
  326. def get_single_block_parsing_res(
  327. overall_ocr_res: OCRResult,
  328. layout_det_res: DetResult,
  329. table_res_list: list,
  330. seal_res_list: list,
  331. ) -> OCRResult:
  332. """
  333. Extract structured information from OCR and layout detection results.
  334. Args:
  335. overall_ocr_res (OCRResult): An object containing the overall OCR results, including detected text boxes and recognized text. The structure is expected to have:
  336. - "input_img": The image on which OCR was performed.
  337. - "dt_boxes": A list of detected text box coordinates.
  338. - "rec_texts": A list of recognized text corresponding to the detected boxes.
  339. layout_det_res (DetResult): An object containing the layout detection results, including detected layout boxes and their labels. The structure is expected to have:
  340. - "boxes": A list of dictionaries with keys "coordinate" for box coordinates and "block_label" for the type of content.
  341. table_res_list (list): A list of table detection results, where each item is a dictionary containing:
  342. - "block_bbox": The bounding box of the table layout.
  343. - "pred_html": The predicted HTML representation of the table.
  344. seal_res_list (List): A list of seal detection results. The details of each item depend on the specific application context.
  345. Returns:
  346. list: A list of structured boxes where each item is a dictionary containing:
  347. - "block_label": The label of the content (e.g., 'table', 'chart', 'image').
  348. - The label as a key with either table HTML or image data and text.
  349. - "block_bbox": The coordinates of the layout box.
  350. """
  351. single_block_layout_parsing_res = []
  352. input_img = overall_ocr_res["doc_preprocessor_res"]["output_img"]
  353. seal_index = 0
  354. for box_info in layout_det_res["boxes"]:
  355. block_bbox = box_info["coordinate"]
  356. label = box_info["label"]
  357. rec_res = {"boxes": [], "rec_texts": [], "flag": False}
  358. seg_start_flag = True
  359. seg_end_flag = True
  360. if label == "table":
  361. for table_res in table_res_list:
  362. if (
  363. _calculate_overlap_area_div_minbox_area_ratio(
  364. block_bbox, table_res["cell_box_list"][0]
  365. )
  366. > 0.5
  367. ):
  368. single_block_layout_parsing_res.append(
  369. {
  370. "block_label": label,
  371. "block_content": table_res["pred_html"],
  372. "block_bbox": block_bbox,
  373. "seg_start_flag": seg_start_flag,
  374. "seg_end_flag": seg_end_flag,
  375. },
  376. )
  377. break
  378. elif label == "seal":
  379. if len(seal_res_list) > 0:
  380. single_block_layout_parsing_res.append(
  381. {
  382. "block_label": label,
  383. "block_content": ", ".join(
  384. seal_res_list[seal_index]["rec_texts"]
  385. ),
  386. "block_bbox": block_bbox,
  387. "seg_start_flag": seg_start_flag,
  388. "seg_end_flag": seg_end_flag,
  389. },
  390. )
  391. seal_index += 1
  392. else:
  393. overall_text_boxes = overall_ocr_res["rec_boxes"]
  394. for box_no in range(len(overall_text_boxes)):
  395. if (
  396. _calculate_overlap_area_div_minbox_area_ratio(
  397. block_bbox, overall_text_boxes[box_no]
  398. )
  399. > 0.5
  400. ):
  401. rec_res["boxes"].append(overall_text_boxes[box_no])
  402. rec_res["rec_texts"].append(
  403. overall_ocr_res["rec_texts"][box_no],
  404. )
  405. rec_res["flag"] = True
  406. if rec_res["flag"]:
  407. rec_res = _sort_ocr_res_by_y_projection(label, block_bbox, rec_res, 0.7)
  408. rec_res_first_bbox = rec_res["boxes"][0]
  409. rec_res_end_bbox = rec_res["boxes"][-1]
  410. if rec_res_first_bbox[0] - block_bbox[0] < 10:
  411. seg_start_flag = False
  412. if block_bbox[2] - rec_res_end_bbox[2] < 10:
  413. seg_end_flag = False
  414. if label == "formula":
  415. rec_res["rec_texts"] = [
  416. rec_res_text.replace("$", "")
  417. for rec_res_text in rec_res["rec_texts"]
  418. ]
  419. if label in ["chart", "image"]:
  420. single_block_layout_parsing_res.append(
  421. {
  422. "block_label": label,
  423. "block_content": "".join(rec_res["rec_texts"]),
  424. "block_image": input_img[
  425. int(block_bbox[1]) : int(block_bbox[3]),
  426. int(block_bbox[0]) : int(block_bbox[2]),
  427. ],
  428. "block_bbox": block_bbox,
  429. "seg_start_flag": seg_start_flag,
  430. "seg_end_flag": seg_end_flag,
  431. },
  432. )
  433. else:
  434. single_block_layout_parsing_res.append(
  435. {
  436. "block_label": label,
  437. "block_content": "".join(rec_res["rec_texts"]),
  438. "block_bbox": block_bbox,
  439. "seg_start_flag": seg_start_flag,
  440. "seg_end_flag": seg_end_flag,
  441. },
  442. )
  443. return single_block_layout_parsing_res
  444. def _projection_by_bboxes(boxes: np.ndarray, axis: int) -> np.ndarray:
  445. """
  446. Generate a 1D projection histogram from bounding boxes along a specified axis.
  447. Args:
  448. boxes: A (N, 4) array of bounding boxes defined by [x_min, y_min, x_max, y_max].
  449. axis: Axis for projection; 0 for horizontal (x-axis), 1 for vertical (y-axis).
  450. Returns:
  451. A 1D numpy array representing the projection histogram based on bounding box intervals.
  452. """
  453. assert axis in [0, 1]
  454. max_length = np.max(boxes[:, axis::2])
  455. projection = np.zeros(max_length, dtype=int)
  456. # Increment projection histogram over the interval defined by each bounding box
  457. for start, end in boxes[:, axis::2]:
  458. projection[start:end] += 1
  459. return projection
  460. def _split_projection_profile(arr_values: np.ndarray, min_value: float, min_gap: float):
  461. """
  462. Split the projection profile into segments based on specified thresholds.
  463. Args:
  464. arr_values: 1D array representing the projection profile.
  465. min_value: Minimum value threshold to consider a profile segment significant.
  466. min_gap: Minimum gap width to consider a separation between segments.
  467. Returns:
  468. A tuple of start and end indices for each segment that meets the criteria.
  469. """
  470. # Identify indices where the projection exceeds the minimum value
  471. significant_indices = np.where(arr_values > min_value)[0]
  472. if not len(significant_indices):
  473. return
  474. # Calculate gaps between significant indices
  475. index_diffs = significant_indices[1:] - significant_indices[:-1]
  476. gap_indices = np.where(index_diffs > min_gap)[0]
  477. # Determine start and end indices of segments
  478. segment_starts = np.insert(
  479. significant_indices[gap_indices + 1],
  480. 0,
  481. significant_indices[0],
  482. )
  483. segment_ends = np.append(
  484. significant_indices[gap_indices],
  485. significant_indices[-1] + 1,
  486. )
  487. return segment_starts, segment_ends
  488. def _recursive_yx_cut(
  489. boxes: np.ndarray, indices: List[int], res: List[int], min_gap: int = 1
  490. ):
  491. """
  492. Recursively project and segment bounding boxes, starting with Y-axis and followed by X-axis.
  493. Args:
  494. boxes: A (N, 4) array representing bounding boxes.
  495. indices: List of indices indicating the original position of boxes.
  496. res: List to store indices of the final segmented bounding boxes.
  497. min_gap (int): Minimum gap width to consider a separation between segments on the X-axis. Defaults to 1.
  498. Returns:
  499. None: This function modifies the `res` list in place.
  500. """
  501. assert len(boxes) == len(
  502. indices
  503. ), "The length of boxes and indices must be the same."
  504. # Sort by y_min for Y-axis projection
  505. y_sorted_indices = boxes[:, 1].argsort()
  506. y_sorted_boxes = boxes[y_sorted_indices]
  507. y_sorted_indices = np.array(indices)[y_sorted_indices]
  508. # Perform Y-axis projection
  509. y_projection = _projection_by_bboxes(boxes=y_sorted_boxes, axis=1)
  510. y_intervals = _split_projection_profile(y_projection, 0, 1)
  511. if not y_intervals:
  512. return
  513. # Process each segment defined by Y-axis projection
  514. for y_start, y_end in zip(*y_intervals):
  515. # Select boxes within the current y interval
  516. y_interval_indices = (y_start <= y_sorted_boxes[:, 1]) & (
  517. y_sorted_boxes[:, 1] < y_end
  518. )
  519. y_boxes_chunk = y_sorted_boxes[y_interval_indices]
  520. y_indices_chunk = y_sorted_indices[y_interval_indices]
  521. # Sort by x_min for X-axis projection
  522. x_sorted_indices = y_boxes_chunk[:, 0].argsort()
  523. x_sorted_boxes_chunk = y_boxes_chunk[x_sorted_indices]
  524. x_sorted_indices_chunk = y_indices_chunk[x_sorted_indices]
  525. # Perform X-axis projection
  526. x_projection = _projection_by_bboxes(boxes=x_sorted_boxes_chunk, axis=0)
  527. x_intervals = _split_projection_profile(x_projection, 0, min_gap)
  528. if not x_intervals:
  529. continue
  530. # If X-axis cannot be further segmented, add current indices to results
  531. if len(x_intervals[0]) == 1:
  532. res.extend(x_sorted_indices_chunk)
  533. continue
  534. # Recursively process each segment defined by X-axis projection
  535. for x_start, x_end in zip(*x_intervals):
  536. x_interval_indices = (x_start <= x_sorted_boxes_chunk[:, 0]) & (
  537. x_sorted_boxes_chunk[:, 0] < x_end
  538. )
  539. _recursive_yx_cut(
  540. x_sorted_boxes_chunk[x_interval_indices],
  541. x_sorted_indices_chunk[x_interval_indices],
  542. res,
  543. )
  544. def _recursive_xy_cut(
  545. boxes: np.ndarray, indices: List[int], res: List[int], min_gap: int = 1
  546. ):
  547. """
  548. Recursively performs X-axis projection followed by Y-axis projection to segment bounding boxes.
  549. Args:
  550. boxes: A (N, 4) array representing bounding boxes with [x_min, y_min, x_max, y_max].
  551. indices: A list of indices representing the position of boxes in the original data.
  552. res: A list to store indices of bounding boxes that meet the criteria.
  553. min_gap (int): Minimum gap width to consider a separation between segments on the X-axis. Defaults to 1.
  554. Returns:
  555. None: This function modifies the `res` list in place.
  556. """
  557. # Ensure boxes and indices have the same length
  558. assert len(boxes) == len(
  559. indices
  560. ), "The length of boxes and indices must be the same."
  561. # Sort by x_min to prepare for X-axis projection
  562. x_sorted_indices = boxes[:, 0].argsort()
  563. x_sorted_boxes = boxes[x_sorted_indices]
  564. x_sorted_indices = np.array(indices)[x_sorted_indices]
  565. # Perform X-axis projection
  566. x_projection = _projection_by_bboxes(boxes=x_sorted_boxes, axis=0)
  567. x_intervals = _split_projection_profile(x_projection, 0, 1)
  568. if not x_intervals:
  569. return
  570. # Process each segment defined by X-axis projection
  571. for x_start, x_end in zip(*x_intervals):
  572. # Select boxes within the current x interval
  573. x_interval_indices = (x_start <= x_sorted_boxes[:, 0]) & (
  574. x_sorted_boxes[:, 0] < x_end
  575. )
  576. x_boxes_chunk = x_sorted_boxes[x_interval_indices]
  577. x_indices_chunk = x_sorted_indices[x_interval_indices]
  578. # Sort selected boxes by y_min to prepare for Y-axis projection
  579. y_sorted_indices = x_boxes_chunk[:, 1].argsort()
  580. y_sorted_boxes_chunk = x_boxes_chunk[y_sorted_indices]
  581. y_sorted_indices_chunk = x_indices_chunk[y_sorted_indices]
  582. # Perform Y-axis projection
  583. y_projection = _projection_by_bboxes(boxes=y_sorted_boxes_chunk, axis=1)
  584. y_intervals = _split_projection_profile(y_projection, 0, min_gap)
  585. if not y_intervals:
  586. continue
  587. # If Y-axis cannot be further segmented, add current indices to results
  588. if len(y_intervals[0]) == 1:
  589. res.extend(y_sorted_indices_chunk)
  590. continue
  591. # Recursively process each segment defined by Y-axis projection
  592. for y_start, y_end in zip(*y_intervals):
  593. y_interval_indices = (y_start <= y_sorted_boxes_chunk[:, 1]) & (
  594. y_sorted_boxes_chunk[:, 1] < y_end
  595. )
  596. _recursive_xy_cut(
  597. y_sorted_boxes_chunk[y_interval_indices],
  598. y_sorted_indices_chunk[y_interval_indices],
  599. res,
  600. )
  601. def sort_by_xycut(
  602. block_bboxes: Union[np.ndarray, List[List[int]]],
  603. direction: int = 0,
  604. min_gap: int = 1,
  605. ) -> List[int]:
  606. """
  607. Sort bounding boxes using recursive XY cut method based on the specified direction.
  608. Args:
  609. block_bboxes (Union[np.ndarray, List[List[int]]]): An array or list of bounding boxes,
  610. where each box is represented as
  611. [x_min, y_min, x_max, y_max].
  612. direction (int): Direction for the initial cut. Use 1 for Y-axis first and 0 for X-axis first.
  613. Defaults to 0.
  614. min_gap (int): Minimum gap width to consider a separation between segments. Defaults to 1.
  615. Returns:
  616. List[int]: A list of indices representing the order of sorted bounding boxes.
  617. """
  618. block_bboxes = np.asarray(block_bboxes).astype(int)
  619. res = []
  620. if direction == 1:
  621. _recursive_yx_cut(
  622. block_bboxes,
  623. np.arange(len(block_bboxes)),
  624. res,
  625. min_gap,
  626. )
  627. else:
  628. _recursive_xy_cut(
  629. block_bboxes,
  630. np.arange(len(block_bboxes)),
  631. res,
  632. min_gap,
  633. )
  634. return res
  635. def _img_array2path(data: np.ndarray) -> str:
  636. """
  637. Save an image array to disk and return the relative file path.
  638. Args:
  639. data (np.ndarray): An image represented as a numpy array with 3 dimensions (H, W, C).
  640. Returns:
  641. dict: A dictionary with a single key-value pair formatted as:
  642. {"imgs/image_{uuid4_hex}.png": PIL.Image.Image}
  643. Raises:
  644. ValueError: If the input data is not a valid image array.
  645. """
  646. if isinstance(data, np.ndarray) and data.ndim == 3:
  647. # Generate a unique filename using UUID
  648. img_name = f"image_{uuid.uuid4().hex}.png"
  649. return {f"imgs/{img_name}": Image.fromarray(data)}
  650. else:
  651. raise ValueError(
  652. "Input data must be a 3-dimensional numpy array representing an image."
  653. )
  654. def recursive_img_array2path(
  655. data: Union[Dict[str, Any], List[Any]],
  656. labels: List[str] = [],
  657. ) -> None:
  658. """
  659. Recursively process a dictionary or list to save image arrays to disk
  660. and replace them with file paths.
  661. Args:
  662. data (Union[Dict[str, Any], List[Any]]): The data structure that may contain image arrays.
  663. save_path (Union[str, Path]): The base path where images should be saved.
  664. labels (List[str]): List of keys to check for image arrays in dictionaries.
  665. Returns:
  666. None: This function modifies the input data structure in place.
  667. """
  668. if isinstance(data, dict):
  669. for k, v in data.items():
  670. if k in labels and isinstance(v, np.ndarray) and v.ndim == 3:
  671. data[k] = _img_array2path(v)
  672. else:
  673. recursive_img_array2path(v, labels)
  674. elif isinstance(data, list):
  675. for item in data:
  676. recursive_img_array2path(item, labels)
  677. def _get_minbox_if_overlap_by_ratio(
  678. bbox1: Union[List[int], Tuple[int, int, int, int]],
  679. bbox2: Union[List[int], Tuple[int, int, int, int]],
  680. ratio: float,
  681. smaller: bool = True,
  682. ) -> Optional[Union[List[int], Tuple[int, int, int, int]]]:
  683. """
  684. Determine if the overlap area between two bounding boxes exceeds a given ratio
  685. and return the smaller (or larger) bounding box based on the `smaller` flag.
  686. Args:
  687. bbox1 (Union[List[int], Tuple[int, int, int, int]]): Coordinates of the first bounding box [x_min, y_min, x_max, y_max].
  688. bbox2 (Union[List[int], Tuple[int, int, int, int]]): Coordinates of the second bounding box [x_min, y_min, x_max, y_max].
  689. ratio (float): The overlap ratio threshold.
  690. smaller (bool): If True, return the smaller bounding box; otherwise, return the larger one.
  691. Returns:
  692. Optional[Union[List[int], Tuple[int, int, int, int]]]:
  693. The selected bounding box or None if the overlap ratio is not exceeded.
  694. """
  695. # Calculate the areas of both bounding boxes
  696. area1 = (bbox1[2] - bbox1[0]) * (bbox1[3] - bbox1[1])
  697. area2 = (bbox2[2] - bbox2[0]) * (bbox2[3] - bbox2[1])
  698. # Calculate the overlap ratio using a helper function
  699. overlap_ratio = _calculate_overlap_area_div_minbox_area_ratio(bbox1, bbox2)
  700. # Check if the overlap ratio exceeds the threshold
  701. if overlap_ratio > ratio:
  702. if (area1 <= area2 and smaller) or (area1 >= area2 and not smaller):
  703. return 1
  704. else:
  705. return 2
  706. return None
  707. def _remove_overlap_blocks(
  708. blocks: List[Dict[str, List[int]]], threshold: float = 0.65, smaller: bool = True
  709. ) -> Tuple[List[Dict[str, List[int]]], List[Dict[str, List[int]]]]:
  710. """
  711. Remove overlapping blocks based on a specified overlap ratio threshold.
  712. Args:
  713. blocks (List[Dict[str, List[int]]]): List of block dictionaries, each containing a 'block_bbox' key.
  714. threshold (float): Ratio threshold to determine significant overlap.
  715. smaller (bool): If True, the smaller block in overlap is removed.
  716. Returns:
  717. Tuple[List[Dict[str, List[int]]], List[Dict[str, List[int]]]]:
  718. A tuple containing the updated list of blocks and a list of dropped blocks.
  719. """
  720. dropped_blocks = []
  721. dropped_indexes = set()
  722. # Iterate over each pair of blocks to find overlaps
  723. for i, block1 in enumerate(blocks):
  724. for j in range(i + 1, len(blocks)):
  725. block2 = blocks[j]
  726. # Skip blocks that are already marked for removal
  727. if i in dropped_indexes or j in dropped_indexes:
  728. continue
  729. # Check for overlap and determine which block to remove
  730. overlap_box_index = _get_minbox_if_overlap_by_ratio(
  731. block1["block_bbox"],
  732. block2["block_bbox"],
  733. threshold,
  734. smaller=smaller,
  735. )
  736. if overlap_box_index is not None:
  737. # Determine which block to remove based on overlap_box_index
  738. if overlap_box_index == 1:
  739. drop_index = i
  740. else:
  741. drop_index = j
  742. dropped_indexes.add(drop_index)
  743. # Remove marked blocks from the original list
  744. for index in sorted(dropped_indexes, reverse=True):
  745. dropped_blocks.append(blocks[index])
  746. del blocks[index]
  747. return blocks, dropped_blocks
  748. def _get_text_median_width(blocks: List[Dict[str, any]]) -> float:
  749. """
  750. Calculate the median width of blocks labeled as "text".
  751. Args:
  752. blocks (List[Dict[str, any]]): List of block dictionaries, each containing a 'block_bbox' and 'label'.
  753. Returns:
  754. float: The median width of text blocks, or infinity if no text blocks are found.
  755. """
  756. widths = [
  757. block["block_bbox"][2] - block["block_bbox"][0]
  758. for block in blocks
  759. if block.get("block_label") == "text"
  760. ]
  761. return np.median(widths) if widths else float("inf")
  762. def _get_layout_property(
  763. blocks: List[Dict[str, any]],
  764. median_width: float,
  765. no_mask_labels: List[str],
  766. threshold: float = 0.8,
  767. ) -> Tuple[List[Dict[str, any]], bool]:
  768. """
  769. Determine the layout (single or double column) of text blocks.
  770. Args:
  771. blocks (List[Dict[str, any]]): List of block dictionaries containing 'label' and 'block_bbox'.
  772. median_width (float): Median width of text blocks.
  773. no_mask_labels (List[str]): Labels of blocks to be considered for layout analysis.
  774. threshold (float): Threshold for determining layout overlap.
  775. Returns:
  776. Tuple[List[Dict[str, any]], bool]: Updated list of blocks with layout information and a boolean
  777. indicating if the double layout area is greater than the single layout area.
  778. """
  779. blocks.sort(
  780. key=lambda x: (
  781. x["block_bbox"][0],
  782. (x["block_bbox"][2] - x["block_bbox"][0]),
  783. ),
  784. )
  785. check_single_layout = {}
  786. page_min_x, page_max_x = float("inf"), 0
  787. double_label_area = 0
  788. single_label_area = 0
  789. for i, block in enumerate(blocks):
  790. page_min_x = min(page_min_x, block["block_bbox"][0])
  791. page_max_x = max(page_max_x, block["block_bbox"][2])
  792. page_width = page_max_x - page_min_x
  793. for i, block in enumerate(blocks):
  794. if block["block_label"] not in no_mask_labels:
  795. continue
  796. x_min_i, _, x_max_i, _ = block["block_bbox"]
  797. layout_length = x_max_i - x_min_i
  798. cover_count, cover_with_threshold_count = 0, 0
  799. match_block_with_threshold_indexes = []
  800. for j, other_block in enumerate(blocks):
  801. if i == j or other_block["block_label"] not in no_mask_labels:
  802. continue
  803. x_min_j, _, x_max_j, _ = other_block["block_bbox"]
  804. x_match_min, x_match_max = max(
  805. x_min_i,
  806. x_min_j,
  807. ), min(x_max_i, x_max_j)
  808. match_block_iou = (x_match_max - x_match_min) / (x_max_j - x_min_j)
  809. if match_block_iou > 0:
  810. cover_count += 1
  811. if match_block_iou > threshold:
  812. cover_with_threshold_count += 1
  813. match_block_with_threshold_indexes.append(
  814. (j, match_block_iou),
  815. )
  816. x_min_i = x_match_max
  817. if x_min_i >= x_max_i:
  818. break
  819. if (
  820. layout_length > median_width * 1.3
  821. and (cover_with_threshold_count >= 2 or cover_count >= 2)
  822. ) or layout_length > 0.6 * page_width:
  823. # if layout_length > median_width * 1.3 and (cover_with_threshold_count >= 2):
  824. block["layout"] = "double"
  825. double_label_area += (block["block_bbox"][2] - block["block_bbox"][0]) * (
  826. block["block_bbox"][3] - block["block_bbox"][1]
  827. )
  828. else:
  829. block["layout"] = "single"
  830. check_single_layout[i] = match_block_with_threshold_indexes
  831. # Check single-layout block
  832. for i, single_layout in check_single_layout.items():
  833. if single_layout:
  834. index, match_iou = single_layout[-1]
  835. if match_iou > 0.9 and blocks[index]["layout"] == "double":
  836. blocks[i]["layout"] = "double"
  837. double_label_area += (
  838. blocks[i]["block_bbox"][2] - blocks[i]["block_bbox"][0]
  839. ) * (blocks[i]["block_bbox"][3] - blocks[i]["block_bbox"][1])
  840. else:
  841. single_label_area += (
  842. blocks[i]["block_bbox"][2] - blocks[i]["block_bbox"][0]
  843. ) * (blocks[i]["block_bbox"][3] - blocks[i]["block_bbox"][1])
  844. return blocks, (double_label_area > single_label_area)
  845. def _get_bbox_direction(input_bbox: List[float], ratio: float = 1.0) -> bool:
  846. """
  847. Determine if a bounding box is horizontal or vertical.
  848. Args:
  849. input_bbox (List[float]): Bounding box [x_min, y_min, x_max, y_max].
  850. ratio (float): Ratio for determining orientation. Default is 1.0.
  851. Returns:
  852. bool: True if the bounding box is considered horizontal, False if vertical.
  853. """
  854. width = input_bbox[2] - input_bbox[0]
  855. height = input_bbox[3] - input_bbox[1]
  856. return width * ratio >= height
  857. def _get_projection_iou(
  858. input_bbox: List[float], match_bbox: List[float], is_horizontal: bool = True
  859. ) -> float:
  860. """
  861. Calculate the IoU of lines between two bounding boxes.
  862. Args:
  863. input_bbox (List[float]): First bounding box [x_min, y_min, x_max, y_max].
  864. match_bbox (List[float]): Second bounding box [x_min, y_min, x_max, y_max].
  865. is_horizontal (bool): Whether to compare horizontally or vertically.
  866. Returns:
  867. float: Line IoU. Returns 0 if there is no overlap.
  868. """
  869. if is_horizontal:
  870. x_match_min = max(input_bbox[0], match_bbox[0])
  871. x_match_max = min(input_bbox[2], match_bbox[2])
  872. overlap = max(0, x_match_max - x_match_min)
  873. input_width = input_bbox[2] - input_bbox[0]
  874. else:
  875. y_match_min = max(input_bbox[1], match_bbox[1])
  876. y_match_max = min(input_bbox[3], match_bbox[3])
  877. overlap = max(0, y_match_max - y_match_min)
  878. input_width = input_bbox[3] - input_bbox[1]
  879. return overlap / input_width if input_width > 0 else 0.0
  880. def _get_sub_category(
  881. blocks: List[Dict[str, Any]], title_labels: List[str]
  882. ) -> List[Dict[str, Any]]:
  883. """
  884. Determine the layout of title and text blocks.
  885. Args:
  886. blocks (List[Dict[str, Any]]): List of block dictionaries.
  887. title_labels (List[str]): List of labels considered as titles.
  888. Returns:
  889. List[Dict[str, Any]]: Updated list of blocks with title-text layout information.
  890. """
  891. sub_title_labels = ["paragraph_title"]
  892. vision_labels = ["image", "table", "chart", "figure"]
  893. for i, block1 in enumerate(blocks):
  894. block1.setdefault("title_text", [])
  895. block1.setdefault("sub_title", [])
  896. block1.setdefault("vision_footnote", [])
  897. block1.setdefault("sub_label", block1["block_label"])
  898. if (
  899. block1["block_label"] not in title_labels
  900. and block1["block_label"] not in sub_title_labels
  901. and block1["block_label"] not in vision_labels
  902. ):
  903. continue
  904. bbox1 = block1["block_bbox"]
  905. x1, y1, x2, y2 = bbox1
  906. is_horizontal_1 = _get_bbox_direction(block1["block_bbox"])
  907. left_up_title_text_distance = float("inf")
  908. left_up_title_text_index = -1
  909. left_up_title_text_direction = None
  910. right_down_title_text_distance = float("inf")
  911. right_down_title_text_index = -1
  912. right_down_title_text_direction = None
  913. for j, block2 in enumerate(blocks):
  914. if i == j:
  915. continue
  916. bbox2 = block2["block_bbox"]
  917. x1_prime, y1_prime, x2_prime, y2_prime = bbox2
  918. is_horizontal_2 = _get_bbox_direction(bbox2)
  919. match_block_iou = _get_projection_iou(
  920. bbox2,
  921. bbox1,
  922. is_horizontal_1,
  923. )
  924. def distance_(is_horizontal, is_left_up):
  925. if is_horizontal:
  926. if is_left_up:
  927. return (y1 - y2_prime + 2) // 5 + x1_prime / 5000
  928. else:
  929. return (y1_prime - y2 + 2) // 5 + x1_prime / 5000
  930. else:
  931. if is_left_up:
  932. return (x1 - x2_prime + 2) // 5 + y1_prime / 5000
  933. else:
  934. return (x1_prime - x2 + 2) // 5 + y1_prime / 5000
  935. block_iou_threshold = 0.1
  936. if block1["block_label"] in sub_title_labels:
  937. match_block_iou = _calculate_overlap_area_div_minbox_area_ratio(
  938. bbox2,
  939. bbox1,
  940. )
  941. block_iou_threshold = 0.7
  942. if is_horizontal_1:
  943. if match_block_iou >= block_iou_threshold:
  944. left_up_distance = distance_(True, True)
  945. right_down_distance = distance_(True, False)
  946. if (
  947. y2_prime <= y1
  948. and left_up_distance <= left_up_title_text_distance
  949. ):
  950. left_up_title_text_distance = left_up_distance
  951. left_up_title_text_index = j
  952. left_up_title_text_direction = is_horizontal_2
  953. elif (
  954. y1_prime > y2
  955. and right_down_distance < right_down_title_text_distance
  956. ):
  957. right_down_title_text_distance = right_down_distance
  958. right_down_title_text_index = j
  959. right_down_title_text_direction = is_horizontal_2
  960. else:
  961. if match_block_iou >= block_iou_threshold:
  962. left_up_distance = distance_(False, True)
  963. right_down_distance = distance_(False, False)
  964. if (
  965. x2_prime <= x1
  966. and left_up_distance <= left_up_title_text_distance
  967. ):
  968. left_up_title_text_distance = left_up_distance
  969. left_up_title_text_index = j
  970. left_up_title_text_direction = is_horizontal_2
  971. elif (
  972. x1_prime > x2
  973. and right_down_distance < right_down_title_text_distance
  974. ):
  975. right_down_title_text_distance = right_down_distance
  976. right_down_title_text_index = j
  977. right_down_title_text_direction = is_horizontal_2
  978. height = bbox1[3] - bbox1[1]
  979. width = bbox1[2] - bbox1[0]
  980. title_text_weight = [0.8, 0.8]
  981. title_text, sub_title, vision_footnote = [], [], []
  982. def get_sub_category_(
  983. title_text_direction,
  984. title_text_index,
  985. label,
  986. is_left_up=True,
  987. ):
  988. direction_ = [1, 3] if is_left_up else [2, 4]
  989. if (
  990. title_text_direction == is_horizontal_1
  991. and title_text_index != -1
  992. and (label == "text" or label == "paragraph_title")
  993. ):
  994. bbox2 = blocks[title_text_index]["block_bbox"]
  995. if is_horizontal_1:
  996. height1 = bbox2[3] - bbox2[1]
  997. width1 = bbox2[2] - bbox2[0]
  998. if label == "text":
  999. if (
  1000. _nearest_edge_distance(bbox1, bbox2)[0] <= 15
  1001. and block1["block_label"] in vision_labels
  1002. and width1 < width
  1003. and height1 < 0.5 * height
  1004. ):
  1005. blocks[title_text_index]["sub_label"] = "vision_footnote"
  1006. vision_footnote.append(bbox2)
  1007. elif (
  1008. height1 < height * title_text_weight[0]
  1009. and (width1 < width or width1 > 1.5 * width)
  1010. and block1["block_label"] in title_labels
  1011. ):
  1012. blocks[title_text_index]["sub_label"] = "title_text"
  1013. title_text.append((direction_[0], bbox2))
  1014. elif (
  1015. label == "paragraph_title"
  1016. and block1["block_label"] in sub_title_labels
  1017. ):
  1018. sub_title.append(bbox2)
  1019. else:
  1020. height1 = bbox2[3] - bbox2[1]
  1021. width1 = bbox2[2] - bbox2[0]
  1022. if label == "text":
  1023. if (
  1024. _nearest_edge_distance(bbox1, bbox2)[0] <= 15
  1025. and block1["block_label"] in vision_labels
  1026. and height1 < height
  1027. and width1 < 0.5 * width
  1028. ):
  1029. blocks[title_text_index]["sub_label"] = "vision_footnote"
  1030. vision_footnote.append(bbox2)
  1031. elif (
  1032. width1 < width * title_text_weight[1]
  1033. and block1["block_label"] in title_labels
  1034. ):
  1035. blocks[title_text_index]["sub_label"] = "title_text"
  1036. title_text.append((direction_[1], bbox2))
  1037. elif (
  1038. label == "paragraph_title"
  1039. and block1["block_label"] in sub_title_labels
  1040. ):
  1041. sub_title.append(bbox2)
  1042. if (
  1043. is_horizontal_1
  1044. and abs(left_up_title_text_distance - right_down_title_text_distance) * 5
  1045. > height
  1046. ) or (
  1047. not is_horizontal_1
  1048. and abs(left_up_title_text_distance - right_down_title_text_distance) * 5
  1049. > width
  1050. ):
  1051. if left_up_title_text_distance < right_down_title_text_distance:
  1052. get_sub_category_(
  1053. left_up_title_text_direction,
  1054. left_up_title_text_index,
  1055. blocks[left_up_title_text_index]["block_label"],
  1056. True,
  1057. )
  1058. else:
  1059. get_sub_category_(
  1060. right_down_title_text_direction,
  1061. right_down_title_text_index,
  1062. blocks[right_down_title_text_index]["block_label"],
  1063. False,
  1064. )
  1065. else:
  1066. get_sub_category_(
  1067. left_up_title_text_direction,
  1068. left_up_title_text_index,
  1069. blocks[left_up_title_text_index]["block_label"],
  1070. True,
  1071. )
  1072. get_sub_category_(
  1073. right_down_title_text_direction,
  1074. right_down_title_text_index,
  1075. blocks[right_down_title_text_index]["block_label"],
  1076. False,
  1077. )
  1078. if block1["block_label"] in title_labels:
  1079. if blocks[i].get("title_text") == []:
  1080. blocks[i]["title_text"] = title_text
  1081. if block1["block_label"] in sub_title_labels:
  1082. if blocks[i].get("sub_title") == []:
  1083. blocks[i]["sub_title"] = sub_title
  1084. if block1["block_label"] in vision_labels:
  1085. if blocks[i].get("vision_footnote") == []:
  1086. blocks[i]["vision_footnote"] = vision_footnote
  1087. return blocks
  1088. def get_layout_ordering(
  1089. data: List[Dict[str, Any]],
  1090. no_mask_labels: List[str] = [],
  1091. already_sorted: bool = False,
  1092. ) -> None:
  1093. """
  1094. Process layout parsing results to remove overlapping bounding boxes
  1095. and assign an ordering index based on their positions.
  1096. Modifies:
  1097. The 'data' list by adding an 'index' to each block.
  1098. Args:
  1099. data (List[Dict[str, Any]]): List of block dictionaries with 'block_bbox' and 'block_label'.
  1100. no_mask_labels (List[str]): Labels for which overlapping removal is not performed.
  1101. already_sorted (bool): Assumes data is already sorted by position if True.
  1102. """
  1103. if already_sorted:
  1104. return data
  1105. title_text_labels = ["doc_title"]
  1106. title_labels = ["doc_title", "paragraph_title"]
  1107. vision_labels = ["image", "table", "seal", "chart", "figure"]
  1108. vision_title_labels = ["table_title", "chart_title", "figure_title"]
  1109. parsing_result = data
  1110. parsing_result, _ = _remove_overlap_blocks(
  1111. parsing_result,
  1112. threshold=0.5,
  1113. smaller=True,
  1114. )
  1115. parsing_result = _get_sub_category(parsing_result, title_text_labels)
  1116. doc_flag = False
  1117. median_width = _get_text_median_width(parsing_result)
  1118. parsing_result, projection_direction = _get_layout_property(
  1119. parsing_result,
  1120. median_width,
  1121. no_mask_labels=no_mask_labels,
  1122. threshold=0.3,
  1123. )
  1124. # Convert bounding boxes to float and remove overlaps
  1125. (
  1126. double_text_blocks,
  1127. title_text_blocks,
  1128. title_blocks,
  1129. vision_blocks,
  1130. vision_title_blocks,
  1131. vision_footnote_blocks,
  1132. other_blocks,
  1133. ) = ([], [], [], [], [], [], [])
  1134. drop_indexes = []
  1135. for index, block in enumerate(parsing_result):
  1136. label = block["sub_label"]
  1137. block["block_bbox"] = list(map(int, block["block_bbox"]))
  1138. if label == "doc_title":
  1139. doc_flag = True
  1140. if label in no_mask_labels:
  1141. if block["layout"] == "double":
  1142. double_text_blocks.append(block)
  1143. drop_indexes.append(index)
  1144. elif label == "title_text":
  1145. title_text_blocks.append(block)
  1146. drop_indexes.append(index)
  1147. elif label == "vision_footnote":
  1148. vision_footnote_blocks.append(block)
  1149. drop_indexes.append(index)
  1150. elif label in vision_title_labels:
  1151. vision_title_blocks.append(block)
  1152. drop_indexes.append(index)
  1153. elif label in title_labels:
  1154. title_blocks.append(block)
  1155. drop_indexes.append(index)
  1156. elif label in vision_labels:
  1157. vision_blocks.append(block)
  1158. drop_indexes.append(index)
  1159. else:
  1160. other_blocks.append(block)
  1161. drop_indexes.append(index)
  1162. for index in sorted(drop_indexes, reverse=True):
  1163. del parsing_result[index]
  1164. if len(parsing_result) > 0:
  1165. # single text label
  1166. if len(double_text_blocks) > len(parsing_result) or projection_direction:
  1167. parsing_result.extend(title_blocks + double_text_blocks)
  1168. title_blocks = []
  1169. double_text_blocks = []
  1170. block_bboxes = [block["block_bbox"] for block in parsing_result]
  1171. block_bboxes.sort(
  1172. key=lambda x: (
  1173. x[0] // max(20, median_width),
  1174. x[1],
  1175. ),
  1176. )
  1177. block_bboxes = np.array(block_bboxes)
  1178. sorted_indices = sort_by_xycut(
  1179. block_bboxes,
  1180. direction=1,
  1181. min_gap=1,
  1182. )
  1183. else:
  1184. block_bboxes = [block["block_bbox"] for block in parsing_result]
  1185. block_bboxes.sort(key=lambda x: (x[0] // 20, x[1]))
  1186. block_bboxes = np.array(block_bboxes)
  1187. sorted_indices = sort_by_xycut(
  1188. block_bboxes,
  1189. direction=0,
  1190. min_gap=20,
  1191. )
  1192. sorted_boxes = block_bboxes[sorted_indices].tolist()
  1193. for block in parsing_result:
  1194. block["index"] = sorted_boxes.index(block["block_bbox"]) + 1
  1195. block["sub_index"] = sorted_boxes.index(block["block_bbox"]) + 1
  1196. def nearest_match_(input_blocks, distance_type="manhattan", is_add_index=True):
  1197. for block in input_blocks:
  1198. bbox = block["block_bbox"]
  1199. min_distance = float("inf")
  1200. min_distance_config = [
  1201. [float("inf"), float("inf")],
  1202. float("inf"),
  1203. float("inf"),
  1204. ] # for double text
  1205. nearest_gt_index = 0
  1206. for match_block in parsing_result:
  1207. match_bbox = match_block["block_bbox"]
  1208. if distance_type == "nearest_iou_edge_distance":
  1209. distance, min_distance_config = _nearest_iou_edge_distance(
  1210. bbox,
  1211. match_bbox,
  1212. block["sub_label"],
  1213. vision_labels=vision_labels,
  1214. no_mask_labels=no_mask_labels,
  1215. median_width=median_width,
  1216. title_labels=title_labels,
  1217. title_text=block["title_text"],
  1218. sub_title=block["sub_title"],
  1219. min_distance_config=min_distance_config,
  1220. tolerance_len=10,
  1221. )
  1222. elif distance_type == "title_text":
  1223. if (
  1224. match_block["block_label"] in title_labels + ["abstract"]
  1225. and match_block["title_text"] != []
  1226. ):
  1227. iou_left_up = _calculate_overlap_area_div_minbox_area_ratio(
  1228. bbox,
  1229. match_block["title_text"][0][1],
  1230. )
  1231. iou_right_down = _calculate_overlap_area_div_minbox_area_ratio(
  1232. bbox,
  1233. match_block["title_text"][-1][1],
  1234. )
  1235. iou = 1 - max(iou_left_up, iou_right_down)
  1236. distance = _manhattan_distance(bbox, match_bbox) * iou
  1237. else:
  1238. distance = float("inf")
  1239. elif distance_type == "manhattan":
  1240. distance = _manhattan_distance(bbox, match_bbox)
  1241. elif distance_type == "vision_footnote":
  1242. if (
  1243. match_block["block_label"] in vision_labels
  1244. and match_block["vision_footnote"] != []
  1245. ):
  1246. iou_left_up = _calculate_overlap_area_div_minbox_area_ratio(
  1247. bbox,
  1248. match_block["vision_footnote"][0],
  1249. )
  1250. iou_right_down = _calculate_overlap_area_div_minbox_area_ratio(
  1251. bbox,
  1252. match_block["vision_footnote"][-1],
  1253. )
  1254. iou = 1 - max(iou_left_up, iou_right_down)
  1255. distance = _manhattan_distance(bbox, match_bbox) * iou
  1256. else:
  1257. distance = float("inf")
  1258. elif distance_type == "vision_body":
  1259. if (
  1260. match_block["block_label"] in vision_title_labels
  1261. and block["vision_footnote"] != []
  1262. ):
  1263. iou_left_up = _calculate_overlap_area_div_minbox_area_ratio(
  1264. match_bbox,
  1265. block["vision_footnote"][0],
  1266. )
  1267. iou_right_down = _calculate_overlap_area_div_minbox_area_ratio(
  1268. match_bbox,
  1269. block["vision_footnote"][-1],
  1270. )
  1271. iou = 1 - max(iou_left_up, iou_right_down)
  1272. distance = _manhattan_distance(bbox, match_bbox) * iou
  1273. else:
  1274. distance = float("inf")
  1275. else:
  1276. raise NotImplementedError
  1277. if distance < min_distance:
  1278. min_distance = distance
  1279. if is_add_index:
  1280. nearest_gt_index = match_block.get("index", 999)
  1281. else:
  1282. nearest_gt_index = match_block.get("sub_index", 999)
  1283. if is_add_index:
  1284. block["index"] = nearest_gt_index
  1285. else:
  1286. block["sub_index"] = nearest_gt_index
  1287. parsing_result.append(block)
  1288. # double text label
  1289. double_text_blocks.sort(
  1290. key=lambda x: (
  1291. x["block_bbox"][1] // 10,
  1292. x["block_bbox"][0] // median_width,
  1293. x["block_bbox"][1] ** 2 + x["block_bbox"][0] ** 2,
  1294. ),
  1295. )
  1296. nearest_match_(
  1297. double_text_blocks,
  1298. distance_type="nearest_iou_edge_distance",
  1299. )
  1300. parsing_result.sort(
  1301. key=lambda x: (x["index"], x["block_bbox"][1], x["block_bbox"][0]),
  1302. )
  1303. for idx, block in enumerate(parsing_result):
  1304. block["index"] = idx + 1
  1305. block["sub_index"] = idx + 1
  1306. # title label
  1307. title_blocks.sort(
  1308. key=lambda x: (
  1309. x["block_bbox"][1] // 10,
  1310. x["block_bbox"][0] // median_width,
  1311. x["block_bbox"][1] ** 2 + x["block_bbox"][0] ** 2,
  1312. ),
  1313. )
  1314. nearest_match_(title_blocks, distance_type="nearest_iou_edge_distance")
  1315. if doc_flag:
  1316. text_sort_labels = ["doc_title"]
  1317. text_label_priority = {
  1318. label: priority for priority, label in enumerate(text_sort_labels)
  1319. }
  1320. doc_titles = []
  1321. for i, block in enumerate(parsing_result):
  1322. if block["block_label"] == "doc_title":
  1323. doc_titles.append(
  1324. (i, block["block_bbox"][1], block["block_bbox"][0]),
  1325. )
  1326. doc_titles.sort(key=lambda x: (x[1], x[2]))
  1327. first_doc_title_index = doc_titles[0][0]
  1328. parsing_result[first_doc_title_index]["index"] = 1
  1329. parsing_result.sort(
  1330. key=lambda x: (
  1331. x["index"],
  1332. text_label_priority.get(x["block_label"], 9999),
  1333. x["block_bbox"][1],
  1334. x["block_bbox"][0],
  1335. ),
  1336. )
  1337. else:
  1338. parsing_result.sort(
  1339. key=lambda x: (
  1340. x["index"],
  1341. x["block_bbox"][1],
  1342. x["block_bbox"][0],
  1343. ),
  1344. )
  1345. for idx, block in enumerate(parsing_result):
  1346. block["index"] = idx + 1
  1347. block["sub_index"] = idx + 1
  1348. # title-text label
  1349. nearest_match_(title_text_blocks, distance_type="title_text")
  1350. text_sort_labels = ["doc_title", "paragraph_title", "title_text"]
  1351. text_label_priority = {
  1352. label: priority for priority, label in enumerate(text_sort_labels)
  1353. }
  1354. parsing_result.sort(
  1355. key=lambda x: (
  1356. x["index"],
  1357. text_label_priority.get(x["sub_label"], 9999),
  1358. x["block_bbox"][1],
  1359. x["block_bbox"][0],
  1360. ),
  1361. )
  1362. for idx, block in enumerate(parsing_result):
  1363. block["index"] = idx + 1
  1364. block["sub_index"] = idx + 1
  1365. # image,figure,chart,seal label
  1366. nearest_match_(
  1367. vision_blocks,
  1368. distance_type="nearest_iou_edge_distance",
  1369. is_add_index=False,
  1370. )
  1371. parsing_result.sort(
  1372. key=lambda x: (
  1373. x["sub_index"],
  1374. x["block_bbox"][1],
  1375. x["block_bbox"][0],
  1376. ),
  1377. )
  1378. for idx, block in enumerate(parsing_result):
  1379. block["sub_index"] = idx + 1
  1380. # image,figure,chart,seal title label
  1381. nearest_match_(
  1382. vision_title_blocks,
  1383. distance_type="nearest_iou_edge_distance",
  1384. is_add_index=False,
  1385. )
  1386. parsing_result.sort(
  1387. key=lambda x: (
  1388. x["sub_index"],
  1389. x["block_bbox"][1],
  1390. x["block_bbox"][0],
  1391. ),
  1392. )
  1393. for idx, block in enumerate(parsing_result):
  1394. block["sub_index"] = idx + 1
  1395. # vision footnote label
  1396. nearest_match_(
  1397. vision_footnote_blocks,
  1398. distance_type="vision_footnote",
  1399. is_add_index=False,
  1400. )
  1401. text_label_priority = {"vision_footnote": 9999}
  1402. parsing_result.sort(
  1403. key=lambda x: (
  1404. x["sub_index"],
  1405. text_label_priority.get(x["sub_label"], 0),
  1406. x["block_bbox"][1],
  1407. x["block_bbox"][0],
  1408. ),
  1409. )
  1410. for idx, block in enumerate(parsing_result):
  1411. block["sub_index"] = idx + 1
  1412. # header、footnote、header_image... label
  1413. nearest_match_(other_blocks, distance_type="manhattan", is_add_index=False)
  1414. return data
  1415. def _manhattan_distance(
  1416. point1: Tuple[float, float],
  1417. point2: Tuple[float, float],
  1418. weight_x: float = 1.0,
  1419. weight_y: float = 1.0,
  1420. ) -> float:
  1421. """
  1422. Calculate the weighted Manhattan distance between two points.
  1423. Args:
  1424. point1 (Tuple[float, float]): The first point as (x, y).
  1425. point2 (Tuple[float, float]): The second point as (x, y).
  1426. weight_x (float): The weight for the x-axis distance. Default is 1.0.
  1427. weight_y (float): The weight for the y-axis distance. Default is 1.0.
  1428. Returns:
  1429. float: The weighted Manhattan distance between the two points.
  1430. """
  1431. return weight_x * abs(point1[0] - point2[0]) + weight_y * abs(point1[1] - point2[1])
  1432. def _calculate_horizontal_distance(
  1433. input_bbox: List[int],
  1434. match_bbox: List[int],
  1435. height: int,
  1436. disperse: int,
  1437. title_text: List[Tuple[int, List[int]]],
  1438. ) -> float:
  1439. """
  1440. Calculate the horizontal distance between two bounding boxes, considering title text adjustments.
  1441. Args:
  1442. input_bbox (List[int]): The bounding box coordinates [x1, y1, x2, y2] of the input object.
  1443. match_bbox (List[int]): The bounding box coordinates [x1', y1', x2', y2'] of the object to match against.
  1444. height (int): The height of the input bounding box used for normalization.
  1445. disperse (int): The dispersion factor used to normalize the horizontal distance.
  1446. title_text (List[Tuple[int, List[int]]]): A list of tuples containing title text information and their bounding box coordinates.
  1447. Format: [(position_indicator, [x1, y1, x2, y2]), ...].
  1448. Returns:
  1449. float: The calculated horizontal distance taking into account the title text adjustments.
  1450. """
  1451. x1, y1, x2, y2 = input_bbox
  1452. x1_prime, y1_prime, x2_prime, y2_prime = match_bbox
  1453. # Determine vertical distance adjustment based on title text
  1454. if y2 < y1_prime:
  1455. if title_text and title_text[-1][0] == 2:
  1456. y2 += title_text[-1][1][3] - title_text[-1][1][1]
  1457. vertical_adjustment = (y1_prime - y2) * 0.5
  1458. else:
  1459. if title_text and title_text[0][0] == 1:
  1460. y1 -= title_text[0][1][3] - title_text[0][1][1]
  1461. vertical_adjustment = y1 - y2_prime
  1462. # Calculate horizontal distance with adjustments
  1463. horizontal_distance = (
  1464. abs(x2_prime - x1) // disperse
  1465. + vertical_adjustment // height
  1466. + vertical_adjustment / 5000
  1467. )
  1468. return horizontal_distance
  1469. def _calculate_vertical_distance(
  1470. input_bbox: List[int],
  1471. match_bbox: List[int],
  1472. width: int,
  1473. disperse: int,
  1474. title_text: List[Tuple[int, List[int]]],
  1475. ) -> float:
  1476. """
  1477. Calculate the vertical distance between two bounding boxes, considering title text adjustments.
  1478. Args:
  1479. input_bbox (List[int]): The bounding box coordinates [x1, y1, x2, y2] of the input object.
  1480. match_bbox (List[int]): The bounding box coordinates [x1', y1', x2', y2'] of the object to match against.
  1481. width (int): The width of the input bounding box used for normalization.
  1482. disperse (int): The dispersion factor used to normalize the vertical distance.
  1483. title_text (List[Tuple[int, List[int]]]): A list of tuples containing title text information and their bounding box coordinates.
  1484. Format: [(position_indicator, [x1, y1, x2, y2]), ...].
  1485. Returns:
  1486. float: The calculated vertical distance taking into account the title text adjustments.
  1487. """
  1488. x1, y1, x2, y2 = input_bbox
  1489. x1_prime, y1_prime, x2_prime, y2_prime = match_bbox
  1490. # Determine horizontal distance adjustment based on title text
  1491. if x1 > x2_prime:
  1492. if title_text and title_text[0][0] == 3:
  1493. x1 -= title_text[0][1][2] - title_text[0][1][0]
  1494. horizontal_adjustment = (x1 - x2_prime) * 0.5
  1495. else:
  1496. if title_text and title_text[-1][0] == 4:
  1497. x2 += title_text[-1][1][2] - title_text[-1][1][0]
  1498. horizontal_adjustment = x1_prime - x2
  1499. # Calculate vertical distance with adjustments
  1500. vertical_distance = (
  1501. abs(y2_prime - y1) // disperse
  1502. + horizontal_adjustment // width
  1503. + horizontal_adjustment / 5000
  1504. )
  1505. return vertical_distance
  1506. def _nearest_edge_distance(
  1507. input_bbox: List[int],
  1508. match_bbox: List[int],
  1509. weight: List[float] = [1.0, 1.0, 1.0, 1.0],
  1510. label: str = "text",
  1511. no_mask_labels: List[str] = [],
  1512. min_edge_distance_config: List[float] = [],
  1513. tolerance_len: float = 10.0,
  1514. ) -> Tuple[float, List[float]]:
  1515. """
  1516. Calculate the nearest edge distance between two bounding boxes, considering directional weights.
  1517. Args:
  1518. input_bbox (list): The bounding box coordinates [x1, y1, x2, y2] of the input object.
  1519. match_bbox (list): The bounding box coordinates [x1', y1', x2', y2'] of the object to match against.
  1520. weight (list, optional): Directional weights for the edge distances [left, right, up, down]. Defaults to [1, 1, 1, 1].
  1521. label (str, optional): The label/type of the object in the bounding box (e.g., 'text'). Defaults to 'text'.
  1522. no_mask_labels (list, optional): Labels for which no masking is applied when calculating edge distances. Defaults to an empty list.
  1523. min_edge_distance_config (list, optional): Configuration for minimum edge distances [min_edge_distance_x, min_edge_distance_y].
  1524. Defaults to [float('inf'), float('inf')].
  1525. tolerance_len (float, optional): The tolerance length for adjusting edge distances. Defaults to 10.
  1526. Returns:
  1527. Tuple[float, List[float]]: A tuple containing:
  1528. - The calculated minimum edge distance between the bounding boxes.
  1529. - A list with the minimum edge distances in the x and y directions.
  1530. """
  1531. match_bbox_iou = _calculate_overlap_area_div_minbox_area_ratio(
  1532. input_bbox,
  1533. match_bbox,
  1534. )
  1535. if match_bbox_iou > 0 and label not in no_mask_labels:
  1536. return 0, [0, 0]
  1537. if not min_edge_distance_config:
  1538. min_edge_distance_config = [float("inf"), float("inf")]
  1539. min_edge_distance_x, min_edge_distance_y = min_edge_distance_config
  1540. x1, y1, x2, y2 = input_bbox
  1541. x1_prime, y1_prime, x2_prime, y2_prime = match_bbox
  1542. direction_num = 0
  1543. distance_x = float("inf")
  1544. distance_y = float("inf")
  1545. distance = [float("inf")] * 4
  1546. # input_bbox is to the left of match_bbox
  1547. if x2 < x1_prime:
  1548. direction_num += 1
  1549. distance[0] = x1_prime - x2
  1550. if abs(distance[0] - min_edge_distance_x) <= tolerance_len:
  1551. distance_x = min_edge_distance_x * weight[0]
  1552. else:
  1553. distance_x = distance[0] * weight[0]
  1554. # input_bbox is to the right of match_bbox
  1555. elif x1 > x2_prime:
  1556. direction_num += 1
  1557. distance[1] = x1 - x2_prime
  1558. if abs(distance[1] - min_edge_distance_x) <= tolerance_len:
  1559. distance_x = min_edge_distance_x * weight[1]
  1560. else:
  1561. distance_x = distance[1] * weight[1]
  1562. elif match_bbox_iou > 0:
  1563. distance[0] = 0
  1564. distance_x = 0
  1565. # input_bbox is above match_bbox
  1566. if y2 < y1_prime:
  1567. direction_num += 1
  1568. distance[2] = y1_prime - y2
  1569. if abs(distance[2] - min_edge_distance_y) <= tolerance_len:
  1570. distance_y = min_edge_distance_y * weight[2]
  1571. else:
  1572. distance_y = distance[2] * weight[2]
  1573. if label in no_mask_labels:
  1574. distance_y = max(0.1, distance_y) * 100
  1575. # input_bbox is below match_bbox
  1576. elif y1 > y2_prime:
  1577. direction_num += 1
  1578. distance[3] = y1 - y2_prime
  1579. if abs(distance[3] - min_edge_distance_y) <= tolerance_len:
  1580. distance_y = min_edge_distance_y * weight[3]
  1581. else:
  1582. distance_y = distance[3] * weight[3]
  1583. elif match_bbox_iou > 0:
  1584. distance[2] = 0
  1585. distance_y = 0
  1586. if direction_num == 2:
  1587. return (distance_x + distance_y), [
  1588. min(distance[0], distance[1]),
  1589. min(distance[2], distance[3]),
  1590. ]
  1591. else:
  1592. return min(distance_x, distance_y), [
  1593. min(distance[0], distance[1]),
  1594. min(distance[2], distance[3]),
  1595. ]
  1596. def _get_weights(label, horizontal):
  1597. """Define weights based on the label and orientation."""
  1598. if label == "doc_title":
  1599. return (
  1600. [1, 0.1, 0.1, 1] if horizontal else [0.2, 0.1, 1, 1]
  1601. ) # left-down , right-left
  1602. elif label in [
  1603. "paragraph_title",
  1604. "table_title",
  1605. "abstract",
  1606. "image",
  1607. "seal",
  1608. "chart",
  1609. "figure",
  1610. ]:
  1611. return [1, 1, 0.1, 1] # down
  1612. else:
  1613. return [1, 1, 1, 0.1] # up
  1614. def _nearest_iou_edge_distance(
  1615. input_bbox: List[int],
  1616. match_bbox: List[int],
  1617. label: str,
  1618. vision_labels: List[str],
  1619. no_mask_labels: List[str],
  1620. median_width: int = -1,
  1621. title_labels: List[str] = [],
  1622. title_text: List[Tuple[int, List[int]]] = [],
  1623. sub_title: List[List[int]] = [],
  1624. min_distance_config: List[float] = [],
  1625. tolerance_len: float = 10.0,
  1626. ) -> Tuple[float, List[float]]:
  1627. """
  1628. Calculate the nearest IOU edge distance between two bounding boxes, considering label types, title adjustments, and minimum distance configurations.
  1629. This function computes the edge distance between two bounding boxes while considering their overlap (IOU) and various adjustments based on label types,
  1630. title text, and subtitle information. It also applies minimum distance configurations and tolerance adjustments.
  1631. Args:
  1632. input_bbox (List[int]): The bounding box coordinates [x1, y1, x2, y2] of the input object.
  1633. match_bbox (List[int]): The bounding box coordinates [x1', y1', x2', y2'] of the object to match against.
  1634. label (str): The label/type of the object in the bounding box (e.g., 'image', 'text', etc.).
  1635. vision_labels (List[str]): List of labels for vision-related objects (e.g., images, icons).
  1636. no_mask_labels (List[str]): Labels for which no masking is applied when calculating edge distances.
  1637. median_width (int, optional): The median width for title dispersion calculation. Defaults to -1.
  1638. title_labels (List[str], optional): Labels that indicate the object is a title. Defaults to an empty list.
  1639. title_text (List[Tuple[int, List[int]]], optional): Text content associated with title labels, in the format [(position_indicator, [x1, y1, x2, y2]), ...].
  1640. sub_title (List[List[int]], optional): List of subtitle bounding boxes to adjust the input_bbox. Defaults to an empty list.
  1641. min_distance_config (List[float], optional): Configuration for minimum distances [min_edge_distance_config, up_edge_distances_config, total_distance].
  1642. tolerance_len (float, optional): The tolerance length for adjusting edge distances. Defaults to 10.0.
  1643. Returns:
  1644. Tuple[float, List[float]]: A tuple containing:
  1645. - The calculated distance considering IOU and adjustments.
  1646. - The updated minimum distance configuration.
  1647. """
  1648. x1, y1, x2, y2 = input_bbox
  1649. x1_prime, y1_prime, x2_prime, y2_prime = match_bbox
  1650. min_edge_distance_config, up_edge_distances_config, total_distance = (
  1651. min_distance_config
  1652. )
  1653. iou_distance = 0
  1654. if label in vision_labels:
  1655. horizontal1 = horizontal2 = True
  1656. else:
  1657. horizontal1 = _get_bbox_direction(input_bbox)
  1658. horizontal2 = _get_bbox_direction(match_bbox, 3)
  1659. if (
  1660. horizontal1 != horizontal2
  1661. or _get_projection_iou(input_bbox, match_bbox, horizontal1) < 0.01
  1662. ):
  1663. iou_distance = 1
  1664. elif label == "doc_title" or (label in title_labels and title_text):
  1665. # Calculate distance for titles
  1666. disperse = max(1, median_width)
  1667. width = x2 - x1
  1668. height = y2 - y1
  1669. if horizontal1:
  1670. return (
  1671. _calculate_horizontal_distance(
  1672. input_bbox,
  1673. match_bbox,
  1674. height,
  1675. disperse,
  1676. title_text,
  1677. ),
  1678. min_distance_config,
  1679. )
  1680. else:
  1681. return (
  1682. _calculate_vertical_distance(
  1683. input_bbox,
  1684. match_bbox,
  1685. width,
  1686. disperse,
  1687. title_text,
  1688. ),
  1689. min_distance_config,
  1690. )
  1691. # Adjust input_bbox based on sub_title
  1692. if sub_title:
  1693. for sub in sub_title:
  1694. x1_, y1_, x2_, y2_ = sub
  1695. x1, y1, x2, y2 = (
  1696. min(x1, x1_),
  1697. min(
  1698. y1,
  1699. y1_,
  1700. ),
  1701. max(x2, x2_),
  1702. max(y2, y2_),
  1703. )
  1704. input_bbox = [x1, y1, x2, y2]
  1705. # Calculate edge distance
  1706. weight = _get_weights(label, horizontal1)
  1707. if label == "abstract":
  1708. tolerance_len *= 3
  1709. edge_distance, edge_distance_config = _nearest_edge_distance(
  1710. input_bbox,
  1711. match_bbox,
  1712. weight,
  1713. label=label,
  1714. no_mask_labels=no_mask_labels,
  1715. min_edge_distance_config=min_edge_distance_config,
  1716. tolerance_len=tolerance_len,
  1717. )
  1718. # Weights for combining distances
  1719. iou_edge_weight = [10**6, 10**3, 1, 0.001]
  1720. # Calculate up and left edge distances
  1721. up_edge_distance = y1_prime
  1722. left_edge_distance = x1_prime
  1723. if (
  1724. label in no_mask_labels or label == "paragraph_title" or label in vision_labels
  1725. ) and y1 > y2_prime:
  1726. up_edge_distance = -y2_prime
  1727. left_edge_distance = -x2_prime
  1728. min_up_edge_distance = up_edge_distances_config
  1729. if abs(min_up_edge_distance - up_edge_distance) <= tolerance_len:
  1730. up_edge_distance = min_up_edge_distance
  1731. # Calculate total distance
  1732. distance = (
  1733. iou_distance * iou_edge_weight[0]
  1734. + edge_distance * iou_edge_weight[1]
  1735. + up_edge_distance * iou_edge_weight[2]
  1736. + left_edge_distance * iou_edge_weight[3]
  1737. )
  1738. # Update minimum distance configuration if a smaller distance is found
  1739. if total_distance > distance:
  1740. edge_distance_config = [
  1741. min(min_edge_distance_config[0], edge_distance_config[0]),
  1742. min(min_edge_distance_config[1], edge_distance_config[1]),
  1743. ]
  1744. min_distance_config = [
  1745. edge_distance_config,
  1746. min(up_edge_distance, up_edge_distances_config),
  1747. distance,
  1748. ]
  1749. return distance, min_distance_config
  1750. def get_show_color(label: str) -> Tuple:
  1751. label_colors = {
  1752. # Medium Blue (from 'titles_list')
  1753. "paragraph_title": (102, 102, 255, 100),
  1754. "doc_title": (255, 248, 220, 100), # Cornsilk
  1755. # Light Yellow (from 'tables_caption_list')
  1756. "table_title": (255, 255, 102, 100),
  1757. # Sky Blue (from 'imgs_caption_list')
  1758. "figure_title": (102, 178, 255, 100),
  1759. "chart_title": (221, 160, 221, 100), # Plum
  1760. "vision_footnote": (144, 238, 144, 100), # Light Green
  1761. # Deep Purple (from 'texts_list')
  1762. "text": (153, 0, 76, 100),
  1763. # Bright Green (from 'interequations_list')
  1764. "formula": (0, 255, 0, 100),
  1765. "abstract": (255, 239, 213, 100), # Papaya Whip
  1766. # Medium Green (from 'lists_list' and 'indexs_list')
  1767. "content": (40, 169, 92, 100),
  1768. # Neutral Gray (from 'dropped_bbox_list')
  1769. "seal": (158, 158, 158, 100),
  1770. # Olive Yellow (from 'tables_body_list')
  1771. "table": (204, 204, 0, 100),
  1772. # Bright Green (from 'imgs_body_list')
  1773. "image": (153, 255, 51, 100),
  1774. # Bright Green (from 'imgs_body_list')
  1775. "figure": (153, 255, 51, 100),
  1776. "chart": (216, 191, 216, 100), # Thistle
  1777. # Pale Yellow-Green (from 'tables_footnote_list')
  1778. "reference": (229, 255, 204, 100),
  1779. "algorithm": (255, 250, 240, 100), # Floral White
  1780. }
  1781. default_color = (158, 158, 158, 100)
  1782. return label_colors.get(label, default_color)