base.py 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624
  1. from abc import ABC, abstractmethod
  2. from typing import Dict, Any, List, Union, Optional, Tuple
  3. import numpy as np
  4. from PIL import Image
  5. from loguru import logger
  6. from pathlib import Path
  7. import cv2
  8. import json
  9. class BaseAdapter(ABC):
  10. """基础适配器接口"""
  11. def __init__(self, config: Dict[str, Any]):
  12. self.config = config
  13. @abstractmethod
  14. def initialize(self):
  15. """初始化模型"""
  16. pass
  17. @abstractmethod
  18. def cleanup(self):
  19. """清理资源"""
  20. pass
  21. class BasePreprocessor(BaseAdapter):
  22. """预处理器基类"""
  23. @abstractmethod
  24. def process(self, image: Union[np.ndarray, Image.Image]) -> tuple[np.ndarray, int]:
  25. """
  26. 处理图像
  27. 返回处理后的图像和旋转角度
  28. """
  29. pass
  30. def _apply_rotation(self, image: np.ndarray, rotation_angle: int) -> np.ndarray:
  31. """应用旋转"""
  32. import cv2
  33. if rotation_angle == 90: # 90度
  34. return cv2.rotate(image, cv2.ROTATE_90_COUNTERCLOCKWISE)
  35. elif rotation_angle == 180: # 180度
  36. return cv2.rotate(image, cv2.ROTATE_180)
  37. elif rotation_angle == 270: # 270度
  38. return cv2.rotate(image, cv2.ROTATE_90_CLOCKWISE)
  39. return image
  40. class BaseLayoutDetector(BaseAdapter):
  41. """版式检测器基类"""
  42. def __init__(self, config: Dict[str, Any]):
  43. """初始化版式检测器
  44. Args:
  45. config: 配置字典
  46. """
  47. super().__init__(config)
  48. # 初始化 debug 相关属性(支持从配置或运行时设置)
  49. self.debug_mode = None # 将在 detect() 方法中从配置读取
  50. self.output_dir = None # 将在 detect() 方法中从配置读取
  51. self.page_name = None # 将在 detect() 方法中从配置读取
  52. def detect(
  53. self,
  54. image: Union[np.ndarray, Image.Image],
  55. ocr_spans: Optional[List[Dict[str, Any]]] = None
  56. ) -> List[Dict[str, Any]]:
  57. """
  58. 检测版式(模板方法,自动执行后处理)
  59. 此方法会:
  60. 1. 调用子类实现的 _detect_raw() 进行原始检测
  61. 2. 自动执行后处理(去除重叠框、文本转表格等)
  62. Args:
  63. image: 输入图像
  64. ocr_spans: OCR结果(可选,某些detector可能需要)
  65. Returns:
  66. 后处理后的布局检测结果
  67. """
  68. # 调用子类实现的原始检测方法
  69. layout_results = self._detect_raw(image, ocr_spans)
  70. # Debug 模式:打印和可视化后处理前的检测结果
  71. # 优先从实例属性读取(如果存在),否则从配置读取
  72. # 支持两种配置方式:debug_mode 或 debug_options.enabled
  73. debug_mode = getattr(self, 'debug_mode', None)
  74. if debug_mode is None:
  75. if hasattr(self, 'config'):
  76. # 优先从 debug_mode 读取
  77. debug_mode = self.config.get('debug_mode', False)
  78. # 如果没有 debug_mode,尝试从 debug_options.enabled 读取
  79. if not debug_mode:
  80. debug_options = self.config.get('debug_options', {})
  81. if isinstance(debug_options, dict):
  82. debug_mode = debug_options.get('enabled', False)
  83. else:
  84. debug_mode = False
  85. if debug_mode:
  86. logger.debug(f"🔍 Layout detection raw results (before post-processing): {len(layout_results)} elements")
  87. # logger.debug(f"Raw layout_results: {layout_results}")
  88. # 可视化 layout 结果
  89. output_dir = getattr(self, 'output_dir', None)
  90. if output_dir is None:
  91. if hasattr(self, 'config'):
  92. # 优先从 output_dir 读取
  93. output_dir = self.config.get('output_dir', None)
  94. # 如果没有 output_dir,尝试从 debug_options.output_dir 读取
  95. if output_dir is None:
  96. debug_options = self.config.get('debug_options', {})
  97. if isinstance(debug_options, dict):
  98. output_dir = debug_options.get('output_dir', None)
  99. else:
  100. output_dir = None
  101. page_name = getattr(self, 'page_name', None)
  102. if page_name is None:
  103. if hasattr(self, 'config'):
  104. # 优先从 page_name 读取
  105. page_name = self.config.get('page_name', None)
  106. # 如果没有 page_name,尝试从 debug_options.prefix 读取
  107. if page_name is None:
  108. debug_options = self.config.get('debug_options', {})
  109. if isinstance(debug_options, dict):
  110. prefix = debug_options.get('prefix', '')
  111. page_name = prefix if prefix else 'layout_detection'
  112. if page_name is None:
  113. page_name = 'layout_detection'
  114. else:
  115. page_name = 'layout_detection'
  116. if output_dir:
  117. self._visualize_layout_results(image, layout_results, output_dir, page_name, suffix='raw')
  118. # 自动执行后处理
  119. if layout_results:
  120. layout_config = self.config.get('post_process', {}) if hasattr(self, 'config') else {}
  121. layout_results = self.post_process(layout_results, image, layout_config)
  122. return layout_results
  123. @abstractmethod
  124. def _detect_raw(
  125. self,
  126. image: Union[np.ndarray, Image.Image],
  127. ocr_spans: Optional[List[Dict[str, Any]]] = None
  128. ) -> List[Dict[str, Any]]:
  129. """
  130. 原始检测方法(子类必须实现)
  131. Args:
  132. image: 输入图像
  133. ocr_spans: OCR结果(可选)
  134. Returns:
  135. 原始检测结果(未后处理)
  136. """
  137. pass
  138. def post_process(
  139. self,
  140. layout_results: List[Dict[str, Any]],
  141. image: Union[np.ndarray, Image.Image],
  142. config: Optional[Dict[str, Any]] = None
  143. ) -> List[Dict[str, Any]]:
  144. """
  145. 后处理布局检测结果
  146. 默认实现包括:
  147. 1. 去除重叠框
  148. 2. 将大面积文本块转换为表格(如果配置启用)
  149. 子类可以重写此方法以自定义后处理逻辑
  150. Args:
  151. layout_results: 原始检测结果
  152. image: 输入图像
  153. config: 后处理配置(可选),如果为None则使用self.config中的post_process配置
  154. Returns:
  155. 后处理后的布局结果
  156. """
  157. if not layout_results:
  158. return layout_results
  159. # 获取配置
  160. if config is None:
  161. config = self.config.get('post_process', {}) if hasattr(self, 'config') else {}
  162. # 导入 CoordinateUtils(适配器可以访问)
  163. try:
  164. from ocr_utils.coordinate_utils import CoordinateUtils
  165. except ImportError:
  166. try:
  167. from ocr_utils import CoordinateUtils
  168. except ImportError:
  169. # 如果无法导入,返回原始结果
  170. return layout_results
  171. # 1. 去除重叠框
  172. layout_results_removed_overlapping = self._remove_overlapping_boxes(layout_results, CoordinateUtils)
  173. # 2. 将大面积文本块转换为表格(如果配置启用)
  174. layout_config = config if config is not None else {}
  175. if layout_config.get('convert_large_text_to_table', False):
  176. # 获取图像尺寸
  177. if isinstance(image, Image.Image):
  178. h, w = image.size[1], image.size[0]
  179. else:
  180. h, w = image.shape[:2] if len(image.shape) >= 2 else (0, 0)
  181. layout_results_converted_large_text = self._convert_large_text_to_table(
  182. layout_results_removed_overlapping,
  183. (h, w),
  184. min_area_ratio=layout_config.get('min_text_area_ratio', 0.25),
  185. min_width_ratio=layout_config.get('min_text_width_ratio', 0.4),
  186. min_height_ratio=layout_config.get('min_text_height_ratio', 0.3)
  187. )
  188. return layout_results_converted_large_text
  189. def _convert_large_text_to_table(
  190. self,
  191. layout_results: List[Dict[str, Any]],
  192. image_shape: Tuple[int, int],
  193. min_area_ratio: float = 0.25,
  194. min_width_ratio: float = 0.4,
  195. min_height_ratio: float = 0.3
  196. ) -> List[Dict[str, Any]]:
  197. """
  198. 将大面积的文本块转换为表格
  199. 判断规则:
  200. 1. 面积占比:占页面面积超过 min_area_ratio(默认25%)
  201. 2. 尺寸比例:宽度和高度都超过一定比例(避免细长条)
  202. 3. 不与其他表格重叠:如果已有表格,不转换
  203. """
  204. if not layout_results:
  205. return layout_results
  206. img_height, img_width = image_shape
  207. img_area = img_height * img_width
  208. if img_area == 0:
  209. return layout_results
  210. # 检查是否已有表格
  211. has_table = any(
  212. item.get('category', '').lower() in ['table', 'table_body']
  213. for item in layout_results
  214. )
  215. # 如果已有表格,不进行转换(避免误判)
  216. if has_table:
  217. return layout_results
  218. # 复制列表避免修改原数据
  219. results = [item.copy() for item in layout_results]
  220. converted_count = 0
  221. for item in results:
  222. category = item.get('category', '').lower()
  223. # 只处理文本类型的元素
  224. if category not in ['text', 'ocr_text']:
  225. continue
  226. bbox = item.get('bbox', [0, 0, 0, 0])
  227. if len(bbox) < 4:
  228. continue
  229. x1, y1, x2, y2 = bbox[:4]
  230. width = x2 - x1
  231. height = y2 - y1
  232. area = width * height
  233. # 计算占比
  234. area_ratio = area / img_area if img_area > 0 else 0
  235. width_ratio = width / img_width if img_width > 0 else 0
  236. height_ratio = height / img_height if img_height > 0 else 0
  237. # 判断是否满足转换条件
  238. if (area_ratio >= min_area_ratio and
  239. width_ratio >= min_width_ratio and
  240. height_ratio >= min_height_ratio):
  241. # 转换为表格
  242. item['category'] = 'table'
  243. item['original_category'] = category # 保留原始类别
  244. converted_count += 1
  245. return results
  246. def _map_category_id(self, category_id: int) -> str:
  247. """映射类别ID到字符串"""
  248. category_map = {
  249. 0: 'title',
  250. 1: 'text',
  251. 2: 'abandon',
  252. 3: 'image_body',
  253. 4: 'image_caption',
  254. 5: 'table_body',
  255. 6: 'table_caption',
  256. 7: 'table_footnote',
  257. 8: 'interline_equation',
  258. 9: 'interline_equation_number',
  259. 13: 'inline_equation',
  260. 14: 'interline_equation_yolo',
  261. 15: 'ocr_text',
  262. 16: 'low_score_text',
  263. 101: 'image_footnote'
  264. }
  265. return category_map.get(category_id, f'unknown_{category_id}')
  266. def _visualize_layout_results(
  267. self,
  268. image: Union[np.ndarray, Image.Image],
  269. layout_results: List[Dict[str, Any]],
  270. output_dir: str,
  271. page_name: str,
  272. suffix: str = 'raw'
  273. ) -> None:
  274. """
  275. 可视化 layout 检测结果
  276. Args:
  277. image: 输入图像
  278. layout_results: 布局检测结果
  279. output_dir: 输出目录
  280. page_name: 页面名称
  281. suffix: 文件名后缀(如 'raw', 'postprocessed')
  282. """
  283. if not layout_results:
  284. return
  285. try:
  286. # 转换为 numpy 数组
  287. if isinstance(image, Image.Image):
  288. vis_image = np.array(image)
  289. if len(vis_image.shape) == 3 and vis_image.shape[2] == 3:
  290. # PIL RGB -> OpenCV BGR
  291. vis_image = cv2.cvtColor(vis_image, cv2.COLOR_RGB2BGR)
  292. else:
  293. vis_image = image.copy()
  294. if len(vis_image.shape) == 3 and vis_image.shape[2] == 3:
  295. # 如果是 RGB,转换为 BGR
  296. vis_image = cv2.cvtColor(vis_image, cv2.COLOR_RGB2BGR)
  297. # 定义类别颜色映射 (BGR格式)
  298. category_colors = {
  299. 'table_body': (0, 0, 255), # 红色
  300. 'table_caption': (0, 0, 200), # 暗红色
  301. 'table_footnote': (0, 0, 150), # 更暗的红色
  302. 'text': (255, 0, 0), # 蓝色
  303. 'title': (0, 255, 255), # 黄色
  304. 'header': (255, 0, 255), # 紫色
  305. 'footer': (0, 165, 255), # 橙色
  306. 'image_body': (0, 255, 0), # 绿色
  307. 'image_caption': (0, 200, 0), # 暗绿色
  308. 'image_footnote': (0, 150, 0), # 更暗的绿色
  309. 'abandon': (128, 128, 128), # 灰色
  310. }
  311. # 绘制检测框
  312. for result in layout_results:
  313. bbox = result.get('bbox', [])
  314. if not bbox or len(bbox) < 4:
  315. continue
  316. category = result.get('category', 'unknown')
  317. color = category_colors.get(category, (128, 128, 128)) # 默认灰色
  318. thickness = 2
  319. x1, y1, x2, y2 = int(bbox[0]), int(bbox[1]), int(bbox[2]), int(bbox[3])
  320. cv2.rectangle(vis_image, (x1, y1), (x2, y2), color, thickness)
  321. # 添加类别标签
  322. label = f"{category}"
  323. confidence = result.get('confidence', result.get('score', 0))
  324. if confidence:
  325. label += f":{confidence:.2f}"
  326. # 计算文本大小
  327. font = cv2.FONT_HERSHEY_SIMPLEX
  328. font_scale = 0.4
  329. text_thickness = 1
  330. (text_width, text_height), baseline = cv2.getTextSize(label, font, font_scale, text_thickness)
  331. # 在框的上方绘制文本背景
  332. text_y = max(y1 - baseline - 1, text_height + baseline)
  333. cv2.rectangle(vis_image, (x1, text_y - text_height - baseline - 2),
  334. (x1 + text_width, text_y), color, -1)
  335. # 绘制文本
  336. cv2.putText(vis_image, label, (x1, text_y - baseline - 1),
  337. font, font_scale, (255, 255, 255), text_thickness)
  338. # 保存图像
  339. debug_dir = Path(output_dir) / "debug_comparison" / "layout_detection"
  340. debug_dir.mkdir(parents=True, exist_ok=True)
  341. output_path = debug_dir / f"{page_name}_layout_{suffix}.jpg"
  342. cv2.imwrite(str(output_path), vis_image)
  343. logger.info(f"📊 Saved layout detection image ({suffix}): {output_path}")
  344. # 保存 JSON 数据
  345. json_data = {
  346. 'page_name': page_name,
  347. 'suffix': suffix,
  348. 'count': len(layout_results),
  349. 'results': [
  350. {
  351. 'category': r.get('category'),
  352. 'bbox': r.get('bbox'),
  353. 'confidence': r.get('confidence', r.get('score', 0.0))
  354. }
  355. for r in layout_results
  356. ]
  357. }
  358. json_path = debug_dir / f"{page_name}_layout_{suffix}.json"
  359. with open(json_path, 'w', encoding='utf-8') as f:
  360. json.dump(json_data, f, ensure_ascii=False, indent=2)
  361. logger.info(f"📊 Saved layout detection JSON ({suffix}): {json_path}")
  362. except Exception as e:
  363. logger.warning(f"⚠️ Failed to visualize layout results: {e}")
  364. def _remove_overlapping_boxes(
  365. self,
  366. layout_results: List[Dict[str, Any]],
  367. coordinate_utils: Any,
  368. iou_threshold: float = 0.8,
  369. overlap_ratio_threshold: float = 0.8
  370. ) -> List[Dict[str, Any]]:
  371. """
  372. 改进版重叠框处理算法(基于优先级和决策规则的清晰算法)
  373. 策略:
  374. 1. 定义类别优先级(abandon < text/image < table_body)
  375. 2. 使用统一的决策规则
  376. 3. 按综合评分排序处理,优先保留大的聚合框
  377. Args:
  378. layout_results: 布局检测结果
  379. coordinate_utils: 坐标工具类
  380. iou_threshold: IoU阈值(默认0.8)
  381. overlap_ratio_threshold: 重叠比例阈值(默认0.8)
  382. Returns:
  383. 去重后的布局结果
  384. """
  385. if not layout_results or len(layout_results) <= 1:
  386. return layout_results
  387. # 常量定义
  388. CATEGORY_PRIORITY = {
  389. 'abandon': 0,
  390. 'text': 1,
  391. 'image_body': 1,
  392. 'title': 2,
  393. 'footer': 2,
  394. 'header': 2,
  395. 'table_body': 3,
  396. }
  397. AGGREGATE_LABELS = {'key-value region', 'form'}
  398. MAX_AREA = 4000000.0 # 用于面积归一化
  399. AREA_WEIGHT = 0.5
  400. CONFIDENCE_WEIGHT = 0.5
  401. AGGREGATE_BONUS = 0.1
  402. AREA_RATIO_THRESHOLD = 3.0 # 大框面积需大于小框的倍数
  403. def get_bbox_area(bbox: List[float]) -> float:
  404. """计算bbox面积"""
  405. if len(bbox) < 4:
  406. return 0.0
  407. return (bbox[2] - bbox[0]) * (bbox[3] - bbox[1])
  408. def is_aggregate_type(box: Dict[str, Any]) -> bool:
  409. """检查是否是聚合类型"""
  410. original_label = box.get('raw', {}).get('original_label', '').lower()
  411. return original_label in AGGREGATE_LABELS
  412. def is_bbox_inside(inner: List[float], outer: List[float]) -> bool:
  413. """检查inner是否完全包含在outer内"""
  414. if len(inner) < 4 or len(outer) < 4:
  415. return False
  416. return (inner[0] >= outer[0] and inner[1] >= outer[1] and
  417. inner[2] <= outer[2] and inner[3] <= outer[3])
  418. def calculate_composite_score(box: Dict[str, Any], area: float) -> float:
  419. """计算text类型的综合评分(面积+置信度)"""
  420. if box.get('category') != 'text':
  421. return box.get('confidence', box.get('score', 0))
  422. normalized_area = min(area / MAX_AREA, 1.0)
  423. area_score = (normalized_area ** 0.5) * AREA_WEIGHT
  424. confidence_score = box.get('confidence', box.get('score', 0)) * CONFIDENCE_WEIGHT
  425. bonus = AGGREGATE_BONUS if is_aggregate_type(box) else 0.0
  426. return area_score + confidence_score + bonus
  427. def should_keep_box1(box1: Dict[str, Any], box2: Dict[str, Any],
  428. iou: float, overlap_ratio: float,
  429. contained_1_in_2: bool, contained_2_in_1: bool) -> bool:
  430. """判断是否应该保留box1"""
  431. # 提取基本信息
  432. cat1, cat2 = box1.get('category', ''), box2.get('category', '')
  433. score1 = box1.get('confidence', box1.get('score', 0))
  434. score2 = box2.get('confidence', box2.get('score', 0))
  435. bbox1, bbox2 = box1.get('bbox', [0, 0, 0, 0]), box2.get('bbox', [0, 0, 0, 0])
  436. area1, area2 = get_bbox_area(bbox1), get_bbox_area(bbox2)
  437. is_agg1, is_agg2 = is_aggregate_type(box1), is_aggregate_type(box2)
  438. # 规则1: 类别优先级
  439. priority1 = CATEGORY_PRIORITY.get(cat1, 1)
  440. priority2 = CATEGORY_PRIORITY.get(cat2, 1)
  441. if priority1 != priority2:
  442. return priority1 > priority2
  443. # 规则2: 包含关系 + 聚合类型优先
  444. if contained_2_in_1 and is_agg1 and not is_agg2:
  445. return True
  446. if contained_1_in_2 and is_agg2 and not is_agg1:
  447. return False
  448. # 规则3: 包含关系 + 面积比例
  449. if contained_2_in_1 and area1 > area2 * AREA_RATIO_THRESHOLD:
  450. return True
  451. if contained_1_in_2 and area2 > area1 * AREA_RATIO_THRESHOLD:
  452. return False
  453. # 规则4: text类型使用综合评分
  454. if cat1 == 'text' or cat2 == 'text':
  455. comp_score1 = calculate_composite_score(box1, area1)
  456. comp_score2 = calculate_composite_score(box2, area2)
  457. if abs(comp_score1 - comp_score2) > 0.05:
  458. return comp_score1 > comp_score2
  459. # 规则5: 置信度比较
  460. if abs(score1 - score2) > 0.1:
  461. return score1 > score2
  462. # 规则6: 面积比较
  463. return area1 >= area2
  464. # 主处理逻辑
  465. results = [item.copy() for item in layout_results]
  466. need_remove = set()
  467. # 按综合评分排序(高分优先)
  468. def get_sort_key(i: int) -> float:
  469. item = results[i]
  470. if item.get('category') == 'text':
  471. return -calculate_composite_score(item, get_bbox_area(item.get('bbox', [])))
  472. return -item.get('confidence', item.get('score', 0))
  473. sorted_indices = sorted(range(len(results)), key=get_sort_key)
  474. # 比较每对框
  475. for idx_i, i in enumerate(sorted_indices):
  476. if i in need_remove:
  477. continue
  478. for idx_j, j in enumerate(sorted_indices):
  479. if j == i or j in need_remove or idx_j >= idx_i:
  480. continue
  481. bbox1, bbox2 = results[i].get('bbox', []), results[j].get('bbox', [])
  482. if len(bbox1) < 4 or len(bbox2) < 4:
  483. continue
  484. # 计算重叠指标
  485. iou = coordinate_utils.calculate_iou(bbox1, bbox2)
  486. overlap_ratio = coordinate_utils.calculate_overlap_ratio(bbox1, bbox2)
  487. contained_1_in_2 = is_bbox_inside(bbox1, bbox2)
  488. contained_2_in_1 = is_bbox_inside(bbox2, bbox1)
  489. # 检查是否有显著重叠
  490. if not (iou > iou_threshold or overlap_ratio > overlap_ratio_threshold or
  491. contained_1_in_2 or contained_2_in_1):
  492. continue
  493. # 应用决策规则
  494. if should_keep_box1(results[i], results[j], iou, overlap_ratio,
  495. contained_1_in_2, contained_2_in_1):
  496. need_remove.add(j)
  497. else:
  498. need_remove.add(i)
  499. break
  500. return [results[i] for i in range(len(results)) if i not in need_remove]
  501. class BaseVLRecognizer(BaseAdapter):
  502. """VL识别器基类"""
  503. @abstractmethod
  504. def recognize_table(self, image: Union[np.ndarray, Image.Image], **kwargs) -> Dict[str, Any]:
  505. """识别表格"""
  506. pass
  507. @abstractmethod
  508. def recognize_formula(self, image: Union[np.ndarray, Image.Image], **kwargs) -> Dict[str, Any]:
  509. """识别公式"""
  510. pass
  511. class BaseOCRRecognizer(BaseAdapter):
  512. """OCR识别器基类"""
  513. @abstractmethod
  514. def recognize_text(self, image: Union[np.ndarray, Image.Image]) -> List[Dict[str, Any]]:
  515. """识别文本"""
  516. pass
  517. @abstractmethod
  518. def detect_text_boxes(self, image: Union[np.ndarray, Image.Image]) -> List[Dict[str, Any]]:
  519. """
  520. 只检测文本框(不识别文字内容)
  521. 子类必须实现此方法。建议使用只运行检测模型的方式(不运行识别模型)以优化性能。
  522. 如果无法优化,至少实现一个调用 recognize_text() 的版本作为兜底。
  523. Returns:
  524. 文本框列表,每项包含 'bbox', 'poly',可能包含 'confidence'
  525. """
  526. pass