rapid_table.py 3.2 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879
  1. import os
  2. from pathlib import Path
  3. import cv2
  4. import numpy as np
  5. from loguru import logger
  6. from rapid_table import RapidTable, RapidTableInput
  7. class RapidTableModel(object):
  8. def __init__(self, ocr_engine):
  9. root_dir = Path(__file__).absolute().parent.parent.parent
  10. slanet_plus_model_path = os.path.join(root_dir, 'resources', 'slanet_plus', 'slanet-plus.onnx')
  11. input_args = RapidTableInput(model_type='slanet_plus', model_path=slanet_plus_model_path)
  12. self.table_model = RapidTable(input_args)
  13. self.ocr_engine = ocr_engine
  14. def predict(self, image):
  15. bgr_image = cv2.cvtColor(np.asarray(image), cv2.COLOR_RGB2BGR)
  16. # First check the overall image aspect ratio (height/width)
  17. img_height, img_width = bgr_image.shape[:2]
  18. img_aspect_ratio = img_height / img_width if img_width > 0 else 1.0
  19. img_is_portrait = img_aspect_ratio > 1.2
  20. if img_is_portrait:
  21. det_res = self.ocr_engine.ocr(bgr_image, rec=False)[0]
  22. # Check if table is rotated by analyzing text box aspect ratios
  23. is_rotated = False
  24. if det_res:
  25. vertical_count = 0
  26. for box_ocr_res in det_res:
  27. p1, p2, p3, p4 = box_ocr_res
  28. # Calculate width and height
  29. width = p3[0] - p1[0]
  30. height = p3[1] - p1[1]
  31. aspect_ratio = width / height if height > 0 else 1.0
  32. # Count vertical vs horizontal text boxes
  33. if aspect_ratio < 0.8: # Taller than wide - vertical text
  34. vertical_count += 1
  35. # elif aspect_ratio > 1.2: # Wider than tall - horizontal text
  36. # horizontal_count += 1
  37. # If we have more vertical text boxes than horizontal ones,
  38. # and vertical ones are significant, table might be rotated
  39. if vertical_count >= len(det_res) * 0.3:
  40. is_rotated = True
  41. # logger.debug(f"Text orientation analysis: vertical={vertical_count}, det_res={len(det_res)}, rotated={is_rotated}")
  42. # Rotate image if necessary
  43. if is_rotated:
  44. # logger.debug("Table appears to be in portrait orientation, rotating 90 degrees clockwise")
  45. image = cv2.rotate(np.asarray(image), cv2.ROTATE_90_CLOCKWISE)
  46. bgr_image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
  47. # Continue with OCR on potentially rotated image
  48. ocr_result = self.ocr_engine.ocr(bgr_image)[0]
  49. if ocr_result:
  50. ocr_result = [[item[0], item[1][0], item[1][1]] for item in ocr_result if
  51. len(item) == 2 and isinstance(item[1], tuple)]
  52. else:
  53. ocr_result = None
  54. if ocr_result:
  55. table_results = self.table_model(np.asarray(image), ocr_result)
  56. html_code = table_results.pred_html
  57. table_cell_bboxes = table_results.cell_bboxes
  58. logic_points = table_results.logic_points
  59. elapse = table_results.elapse
  60. return html_code, table_cell_bboxes, logic_points, elapse
  61. else:
  62. return None, None, None, None