unet_table.py 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417
  1. import html
  2. import os
  3. import time
  4. import traceback
  5. from dataclasses import dataclass, asdict
  6. from pathlib import Path
  7. from typing import List, Optional, Union, Dict, Any
  8. import cv2
  9. import numpy as np
  10. from loguru import logger
  11. from rapid_table import RapidTableInput, RapidTable
  12. from mineru.utils.enum_class import ModelPath
  13. from mineru.utils.models_download_utils import auto_download_and_get_model_root_path
  14. from .table_structure_unet import TSRUnet
  15. from .table_recover import TableRecover
  16. from .wired_table_rec_utils import InputType, LoadImage
  17. from .table_recover_utils import (
  18. match_ocr_cell,
  19. plot_html_table,
  20. box_4_2_poly_to_box_4_1,
  21. sorted_ocr_boxes,
  22. gather_ocr_list_by_row,
  23. )
  24. @dataclass
  25. class UnetTableInput:
  26. model_path: str
  27. device: str = "cpu"
  28. @dataclass
  29. class UnetTableOutput:
  30. pred_html: Optional[str] = None
  31. cell_bboxes: Optional[np.ndarray] = None
  32. logic_points: Optional[np.ndarray] = None
  33. elapse: Optional[float] = None
  34. class UnetTableRecognition:
  35. def __init__(self, config: UnetTableInput):
  36. self.table_structure = TSRUnet(asdict(config))
  37. self.load_img = LoadImage()
  38. self.table_recover = TableRecover()
  39. def __call__(
  40. self,
  41. img: InputType,
  42. ocr_result: Optional[List[Union[List[List[float]], str, str]]] = None,
  43. ocr_engine = None,
  44. **kwargs,
  45. ) -> UnetTableOutput:
  46. s = time.perf_counter()
  47. need_ocr = True
  48. col_threshold = 15
  49. row_threshold = 10
  50. if kwargs:
  51. need_ocr = kwargs.get("need_ocr", True)
  52. col_threshold = kwargs.get("col_threshold", 15)
  53. row_threshold = kwargs.get("row_threshold", 10)
  54. img = self.load_img(img)
  55. polygons, rotated_polygons = self.table_structure(img, **kwargs)
  56. if polygons is None:
  57. # logger.warning("polygons is None.")
  58. return UnetTableOutput("", None, None, 0.0)
  59. try:
  60. table_res, logi_points = self.table_recover(
  61. rotated_polygons, row_threshold, col_threshold
  62. )
  63. # 将坐标由逆时针转为顺时针方向,后续处理与无线表格对齐
  64. polygons[:, 1, :], polygons[:, 3, :] = (
  65. polygons[:, 3, :].copy(),
  66. polygons[:, 1, :].copy(),
  67. )
  68. if not need_ocr:
  69. sorted_polygons, idx_list = sorted_ocr_boxes(
  70. [box_4_2_poly_to_box_4_1(box) for box in polygons]
  71. )
  72. return UnetTableOutput(
  73. "",
  74. sorted_polygons,
  75. logi_points[idx_list],
  76. time.perf_counter() - s,
  77. )
  78. cell_box_det_map, not_match_orc_boxes = match_ocr_cell(ocr_result, polygons)
  79. # 如果有识别框没有ocr结果,直接进行rec补充
  80. cell_box_det_map = self.fill_blank_rec(img, polygons, cell_box_det_map, ocr_engine)
  81. # 转换为中间格式,修正识别框坐标,将物理识别框,逻辑识别框,ocr识别框整合为dict,方便后续处理
  82. t_rec_ocr_list = self.transform_res(cell_box_det_map, polygons, logi_points)
  83. # 将每个单元格中的ocr识别结果排序和同行合并,输出的html能完整保留文字的换行格式
  84. t_rec_ocr_list = self.sort_and_gather_ocr_res(t_rec_ocr_list)
  85. # cell_box_map =
  86. logi_points = [t_box_ocr["t_logic_box"] for t_box_ocr in t_rec_ocr_list]
  87. cell_box_det_map = {
  88. i: [ocr_box_and_text[1] for ocr_box_and_text in t_box_ocr["t_ocr_res"]]
  89. for i, t_box_ocr in enumerate(t_rec_ocr_list)
  90. }
  91. pred_html = plot_html_table(logi_points, cell_box_det_map)
  92. polygons = np.array(polygons).reshape(-1, 8)
  93. logi_points = np.array(logi_points)
  94. elapse = time.perf_counter() - s
  95. except Exception:
  96. logger.warning(traceback.format_exc())
  97. return UnetTableOutput("", None, None, 0.0)
  98. return UnetTableOutput(pred_html, polygons, logi_points, elapse)
  99. def transform_res(
  100. self,
  101. cell_box_det_map: Dict[int, List[any]],
  102. polygons: np.ndarray,
  103. logi_points: List[np.ndarray],
  104. ) -> List[Dict[str, any]]:
  105. res = []
  106. for i in range(len(polygons)):
  107. ocr_res_list = cell_box_det_map.get(i)
  108. if not ocr_res_list:
  109. continue
  110. xmin = min([ocr_box[0][0][0] for ocr_box in ocr_res_list])
  111. ymin = min([ocr_box[0][0][1] for ocr_box in ocr_res_list])
  112. xmax = max([ocr_box[0][2][0] for ocr_box in ocr_res_list])
  113. ymax = max([ocr_box[0][2][1] for ocr_box in ocr_res_list])
  114. dict_res = {
  115. # xmin,xmax,ymin,ymax
  116. "t_box": [xmin, ymin, xmax, ymax],
  117. # row_start,row_end,col_start,col_end
  118. "t_logic_box": logi_points[i].tolist(),
  119. # [[xmin,xmax,ymin,ymax], text]
  120. "t_ocr_res": [
  121. [box_4_2_poly_to_box_4_1(ocr_det[0]), ocr_det[1]]
  122. for ocr_det in ocr_res_list
  123. ],
  124. }
  125. res.append(dict_res)
  126. return res
  127. def sort_and_gather_ocr_res(self, res):
  128. for i, dict_res in enumerate(res):
  129. _, sorted_idx = sorted_ocr_boxes(
  130. [ocr_det[0] for ocr_det in dict_res["t_ocr_res"]], threshold=0.3
  131. )
  132. dict_res["t_ocr_res"] = [dict_res["t_ocr_res"][i] for i in sorted_idx]
  133. dict_res["t_ocr_res"] = gather_ocr_list_by_row(
  134. dict_res["t_ocr_res"], threshold=0.3
  135. )
  136. return res
  137. def fill_blank_rec(
  138. self,
  139. img: np.ndarray,
  140. sorted_polygons: np.ndarray,
  141. cell_box_map: Dict[int, List[str]],
  142. ocr_engine
  143. ) -> Dict[int, List[Any]]:
  144. """找到poly对应为空的框,尝试将直接将poly框直接送到识别中"""
  145. img_crop_info_list = []
  146. img_crop_list = []
  147. for i in range(sorted_polygons.shape[0]):
  148. if cell_box_map.get(i):
  149. continue
  150. box = sorted_polygons[i]
  151. if ocr_engine is None:
  152. logger.warning(f"No OCR engine provided for box {i}: {box}")
  153. continue
  154. # 从img中截取对应的区域
  155. x1, y1, x2, y2 = box[0][0], box[0][1], box[2][0], box[2][1]
  156. if x1 >= x2 or y1 >= y2:
  157. logger.warning(f"Invalid box coordinates: {box}")
  158. continue
  159. img_crop = img[int(y1):int(y2), int(x1):int(x2)]
  160. img_crop_list.append(img_crop)
  161. img_crop_info_list.append([i, box])
  162. if len(img_crop_list) > 0:
  163. # 进行ocr识别
  164. ocr_result = ocr_engine.ocr(img_crop_list, det=False)
  165. if not ocr_result or not isinstance(ocr_result, list) or len(ocr_result) == 0:
  166. logger.warning("OCR engine returned no results or invalid result for image crops.")
  167. return cell_box_map
  168. ocr_res_list = ocr_result[0]
  169. if not isinstance(ocr_res_list, list) or len(ocr_res_list) != len(img_crop_list):
  170. logger.warning("OCR result list length does not match image crop list length.")
  171. return cell_box_map
  172. for j, ocr_res in enumerate(ocr_res_list):
  173. img_crop_info_list[j].append(ocr_res)
  174. for i, box, ocr_res in img_crop_info_list:
  175. # 处理ocr结果
  176. ocr_text, ocr_score = ocr_res
  177. # logger.debug(f"OCR result for box {i}: {ocr_text} with score {ocr_score}")
  178. if ocr_score < 0.9:
  179. # logger.warning(f"Low confidence OCR result for box {i}: {ocr_text} with score {ocr_score}")
  180. box = sorted_polygons[i]
  181. cell_box_map[i] = [[box, "", 0.5]]
  182. continue
  183. cell_box_map[i] = [[box, ocr_text, ocr_score]]
  184. return cell_box_map
  185. def escape_html(input_string):
  186. """Escape HTML Entities."""
  187. return html.escape(input_string)
  188. class UnetTableModel:
  189. def __init__(self, ocr_engine):
  190. model_path = os.path.join(auto_download_and_get_model_root_path(ModelPath.unet_structure), ModelPath.unet_structure)
  191. wired_input_args = UnetTableInput(model_path=model_path)
  192. self.wired_table_model = UnetTableRecognition(wired_input_args)
  193. slanet_plus_model_path = os.path.join(auto_download_and_get_model_root_path(ModelPath.slanet_plus), ModelPath.slanet_plus)
  194. wireless_input_args = RapidTableInput(model_type='slanet_plus', model_path=slanet_plus_model_path)
  195. self.wireless_table_model = RapidTable(wireless_input_args)
  196. self.ocr_engine = ocr_engine
  197. def predict(self, img, table_cls_score):
  198. bgr_img = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)
  199. ocr_result = self.ocr_engine.ocr(bgr_img)[0]
  200. if ocr_result:
  201. ocr_result = [
  202. [item[0], escape_html(item[1][0]), item[1][1]]
  203. for item in ocr_result
  204. if len(item) == 2 and isinstance(item[1], tuple)
  205. ]
  206. else:
  207. ocr_result = None
  208. if ocr_result:
  209. try:
  210. wired_table_results = self.wired_table_model(np.asarray(img), ocr_result, self.ocr_engine)
  211. # viser = VisTable()
  212. # save_html_path = f"outputs/output.html"
  213. # save_drawed_path = f"outputs/output_table_vis.jpg"
  214. # save_logic_path = (
  215. # f"outputs/output_table_vis_logic.jpg"
  216. # )
  217. # vis_imged = viser(
  218. # np.asarray(img), wired_table_results, save_html_path, save_drawed_path, save_logic_path
  219. # )
  220. wired_html_code = wired_table_results.pred_html
  221. wired_table_cell_bboxes = wired_table_results.cell_bboxes
  222. wired_logic_points = wired_table_results.logic_points
  223. wired_elapse = wired_table_results.elapse
  224. wireless_table_results = self.wireless_table_model(np.asarray(img), ocr_result)
  225. wireless_html_code = wireless_table_results.pred_html
  226. wireless_table_cell_bboxes = wireless_table_results.cell_bboxes
  227. wireless_logic_points = wireless_table_results.logic_points
  228. wireless_elapse = wireless_table_results.elapse
  229. wired_len = len(wired_table_cell_bboxes) if wired_table_cell_bboxes is not None else 0
  230. wireless_len = len(wireless_table_cell_bboxes) if wireless_table_cell_bboxes is not None else 0
  231. # logger.debug(f"wired table cell bboxes: {wired_len}, wireless table cell bboxes: {wireless_len}")
  232. # 计算两种模型检测的单元格数量差异
  233. gap_of_len = wireless_len - wired_len
  234. # 判断是否使用无线表格模型的结果
  235. if (
  236. wired_len <= round(wireless_len * 0.5) # 有线模型检测到的单元格数太少(低于无线模型的50%)
  237. or ((round(wireless_len*1.2) < wired_len) and (wired_len < (2 * wireless_len)) and table_cls_score <= 0.94) # 有线模型检测到的单元格数反而更多
  238. or (0 <= gap_of_len <= 5 and wired_len <= round(wireless_len * 0.75)) # 两者相差不大但有线模型结果较少
  239. or (gap_of_len == 0 and wired_len <= 4) # 单元格数量完全相等且总量小于等于4
  240. ):
  241. # logger.debug("fall back to wireless table model")
  242. html_code = wireless_html_code
  243. table_cell_bboxes = wireless_table_cell_bboxes
  244. logic_points = wireless_logic_points
  245. else:
  246. html_code = wired_html_code
  247. table_cell_bboxes = wired_table_cell_bboxes
  248. logic_points = wired_logic_points
  249. elapse = wired_elapse + wireless_elapse
  250. return html_code, table_cell_bboxes, logic_points, elapse
  251. except Exception as e:
  252. logger.exception(e)
  253. return None, None, None, None
  254. class VisTable:
  255. def __init__(self):
  256. self.load_img = LoadImage()
  257. def __call__(
  258. self,
  259. img_path: InputType,
  260. table_results,
  261. save_html_path: Optional[Union[str, Path]] = None,
  262. save_drawed_path: Optional[Union[str, Path]] = None,
  263. save_logic_path: Optional[Union[str, Path]] = None,
  264. ):
  265. if save_html_path:
  266. html_with_border = self.insert_border_style(table_results.pred_html)
  267. self.save_html(save_html_path, html_with_border)
  268. table_cell_bboxes = table_results.cell_bboxes
  269. table_logic_points = table_results.logic_points
  270. if table_cell_bboxes is None:
  271. return None
  272. img = self.load_img(img_path)
  273. dims_bboxes = table_cell_bboxes.shape[1]
  274. if dims_bboxes == 4:
  275. drawed_img = self.draw_rectangle(img, table_cell_bboxes)
  276. elif dims_bboxes == 8:
  277. drawed_img = self.draw_polylines(img, table_cell_bboxes)
  278. else:
  279. raise ValueError("Shape of table bounding boxes is not between in 4 or 8.")
  280. if save_drawed_path:
  281. self.save_img(save_drawed_path, drawed_img)
  282. if save_logic_path:
  283. polygons = [[box[0], box[1], box[4], box[5]] for box in table_cell_bboxes]
  284. self.plot_rec_box_with_logic_info(
  285. img_path, save_logic_path, table_logic_points, polygons
  286. )
  287. return drawed_img
  288. def insert_border_style(self, table_html_str: str):
  289. style_res = """<meta charset="UTF-8"><style>
  290. table {
  291. border-collapse: collapse;
  292. width: 100%;
  293. }
  294. th, td {
  295. border: 1px solid black;
  296. padding: 8px;
  297. text-align: center;
  298. }
  299. th {
  300. background-color: #f2f2f2;
  301. }
  302. </style>"""
  303. prefix_table, suffix_table = table_html_str.split("<body>")
  304. html_with_border = f"{prefix_table}{style_res}<body>{suffix_table}"
  305. return html_with_border
  306. def plot_rec_box_with_logic_info(
  307. self, img_path, output_path, logic_points, sorted_polygons
  308. ):
  309. """
  310. :param img_path
  311. :param output_path
  312. :param logic_points: [row_start,row_end,col_start,col_end]
  313. :param sorted_polygons: [xmin,ymin,xmax,ymax]
  314. :return:
  315. """
  316. # 读取原图
  317. img = img_path
  318. img = cv2.copyMakeBorder(
  319. img, 0, 0, 0, 100, cv2.BORDER_CONSTANT, value=[255, 255, 255]
  320. )
  321. # 绘制 polygons 矩形
  322. for idx, polygon in enumerate(sorted_polygons):
  323. x0, y0, x1, y1 = polygon[0], polygon[1], polygon[2], polygon[3]
  324. x0 = round(x0)
  325. y0 = round(y0)
  326. x1 = round(x1)
  327. y1 = round(y1)
  328. cv2.rectangle(img, (x0, y0), (x1, y1), (0, 0, 255), 1)
  329. # 增大字体大小和线宽
  330. font_scale = 0.9 # 原先是0.5
  331. thickness = 1 # 原先是1
  332. logic_point = logic_points[idx]
  333. cv2.putText(
  334. img,
  335. f"row: {logic_point[0]}-{logic_point[1]}",
  336. (x0 + 3, y0 + 8),
  337. cv2.FONT_HERSHEY_PLAIN,
  338. font_scale,
  339. (0, 0, 255),
  340. thickness,
  341. )
  342. cv2.putText(
  343. img,
  344. f"col: {logic_point[2]}-{logic_point[3]}",
  345. (x0 + 3, y0 + 18),
  346. cv2.FONT_HERSHEY_PLAIN,
  347. font_scale,
  348. (0, 0, 255),
  349. thickness,
  350. )
  351. os.makedirs(os.path.dirname(output_path), exist_ok=True)
  352. # 保存绘制后的图像
  353. self.save_img(output_path, img)
  354. @staticmethod
  355. def draw_rectangle(img: np.ndarray, boxes: np.ndarray) -> np.ndarray:
  356. img_copy = img.copy()
  357. for box in boxes.astype(int):
  358. x1, y1, x2, y2 = box
  359. cv2.rectangle(img_copy, (x1, y1), (x2, y2), (255, 0, 0), 2)
  360. return img_copy
  361. @staticmethod
  362. def draw_polylines(img: np.ndarray, points) -> np.ndarray:
  363. img_copy = img.copy()
  364. for point in points.astype(int):
  365. point = point.reshape(4, 2)
  366. cv2.polylines(img_copy, [point.astype(int)], True, (255, 0, 0), 2)
  367. return img_copy
  368. @staticmethod
  369. def save_img(save_path: Union[str, Path], img: np.ndarray):
  370. cv2.imwrite(str(save_path), img)
  371. @staticmethod
  372. def save_html(save_path: Union[str, Path], html: str):
  373. with open(save_path, "w", encoding="utf-8") as f:
  374. f.write(html)