| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906 |
- """
- 表格单元格匹配器
- 负责将 HTML 表格单元格与 PaddleOCR bbox 进行匹配
- """
- from typing import List, Dict, Tuple, Optional
- from bs4 import BeautifulSoup
- import numpy as np
- try:
- from .text_matcher import TextMatcher
- from .bbox_extractor import BBoxExtractor
- except ImportError:
- from text_matcher import TextMatcher
- from bbox_extractor import BBoxExtractor
- class TableCellMatcher:
- """表格单元格匹配器"""
-
- def __init__(self, text_matcher: TextMatcher,
- x_tolerance: int = 3,
- y_tolerance: int = 10):
- """
- Args:
- text_matcher: 文本匹配器
- x_tolerance: X轴容差(用于列边界判断)
- y_tolerance: Y轴容差(用于行分组)
- """
- self.text_matcher = text_matcher
- self.x_tolerance = x_tolerance
- self.y_tolerance = y_tolerance
-
- def enhance_table_html_with_bbox(self, html: str, paddle_text_boxes: List[Dict],
- start_pointer: int, table_bbox: Optional[List[int]] = None) -> Tuple[str, List[Dict], int]:
- """
- 为 HTML 表格添加 bbox 信息(优化版:先筛选表格区域)
-
- 策略:
- 1. 根据 table_bbox 筛选出表格区域内的 paddle_text_boxes
- 2. 将筛选后的 boxes 按行分组
- 3. 智能匹配 HTML 行与 paddle 行组
- 4. 在匹配的组内查找单元格
-
- Args:
- html: HTML 表格
- paddle_text_boxes: 全部 paddle OCR 结果
- start_pointer: 开始位置
- table_bbox: 表格边界框 [x1, y1, x2, y2]
- """
- soup = BeautifulSoup(html, 'html.parser')
- cells = []
-
- # 🔑 第一步:筛选表格区域内的 paddle boxes
- table_region_boxes, actual_table_bbox = self._filter_boxes_in_table_region(
- paddle_text_boxes[start_pointer:],
- table_bbox,
- html
- )
-
- if not table_region_boxes:
- print(f"⚠️ 未在表格区域找到 paddle boxes")
- return str(soup), cells, start_pointer
-
- print(f"📊 表格区域: {len(table_region_boxes)} 个文本框")
- print(f" 边界: {actual_table_bbox}")
-
- # 🔑 第二步:将表格区域的 boxes 按行分组
- grouped_boxes = self._group_paddle_boxes_by_rows(
- table_region_boxes,
- y_tolerance=self.y_tolerance,
- auto_correct_skew=True
- )
-
- # 🔑 第三步:在每组内按 x 坐标排序
- for group in grouped_boxes:
- group['boxes'].sort(key=lambda x: x['bbox'][0])
-
- grouped_boxes.sort(key=lambda g: g['y_center'])
-
- print(f" 分组: {len(grouped_boxes)} 行")
-
- # 🔑 第四步:智能匹配 HTML 行与 paddle 行组
- html_rows = soup.find_all('tr')
- row_mapping = self._match_html_rows_to_paddle_groups(html_rows, grouped_boxes)
-
- print(f" HTML行: {len(html_rows)} 行")
- print(f" 映射: {len([v for v in row_mapping.values() if v])} 个有效映射")
-
- # 🔑 第五步:遍历 HTML 表格,使用映射关系查找
- for row_idx, row in enumerate(html_rows):
- group_indices = row_mapping.get(row_idx, [])
-
- if not group_indices:
- continue
-
- # 合并多个组的 boxes
- current_boxes = []
- for group_idx in group_indices:
- if group_idx < len(grouped_boxes):
- current_boxes.extend(grouped_boxes[group_idx]['boxes'])
-
- current_boxes.sort(key=lambda x: x['bbox'][0])
-
- # 🎯 关键改进:提取 HTML 单元格并预先确定列边界
- html_cells = row.find_all(['td', 'th'])
-
- if not html_cells:
- continue
-
- # 🔑 预估列边界(基于 x 坐标分布)
- col_boundaries = self._estimate_column_boundaries(
- current_boxes,
- len(html_cells)
- )
-
- print(f" 行 {row_idx + 1}: {len(html_cells)} 列,边界: {col_boundaries}")
-
- # 🎯 关键改进:顺序指针匹配
- box_pointer = 0 # 当前行的 boxes 指针
-
- for col_idx, cell in enumerate(html_cells):
- cell_text = cell.get_text(strip=True)
-
- if not cell_text:
- continue
-
- # 🔑 从当前指针开始匹配
- matched_result = self._match_cell_sequential(
- cell_text,
- current_boxes,
- col_boundaries,
- box_pointer
- )
-
- if matched_result:
- merged_bbox = matched_result['bbox']
- merged_text = matched_result['text']
-
- cell['data-bbox'] = f"[{merged_bbox[0]},{merged_bbox[1]},{merged_bbox[2]},{merged_bbox[3]}]"
- cell['data-score'] = f"{matched_result['score']:.4f}"
- cell['data-paddle-indices'] = str(matched_result['paddle_indices'])
-
- cells.append({
- 'type': 'table_cell',
- 'text': cell_text,
- 'matched_text': merged_text,
- 'bbox': merged_bbox,
- 'row': row_idx + 1,
- 'col': col_idx + 1,
- 'score': matched_result['score'],
- 'paddle_bbox_indices': matched_result['paddle_indices']
- })
-
- # 标记已使用
- for box in matched_result['used_boxes']:
- box['used'] = True
-
- # 🎯 移动指针到最后使用的 box 之后
- box_pointer = matched_result['last_used_index'] + 1
-
- print(f" 列 {col_idx + 1}: '{cell_text[:20]}...' 匹配 {len(matched_result['used_boxes'])} 个box (指针: {box_pointer})")
-
- # 计算新的指针位置
- used_count = sum(1 for box in table_region_boxes if box.get('used'))
- new_pointer = start_pointer + used_count
-
- print(f" 匹配: {len(cells)} 个单元格")
-
- return str(soup), cells, new_pointer
- def _estimate_column_boundaries(self, boxes: List[Dict],
- num_cols: int) -> List[Tuple[int, int]]:
- """
- 估算列边界(改进版:处理同列多文本框)
-
- Args:
- boxes: 当前行的所有 boxes(已按 x 排序)
- num_cols: HTML 表格的列数
-
- Returns:
- 列边界列表 [(x_start, x_end), ...]
- """
- if not boxes:
- return []
-
- # 🔑 关键改进:先按 x 坐标聚类(合并同列的多个文本框)
- x_clusters = self._cluster_boxes_by_x(boxes, x_tolerance=self.x_tolerance)
-
- print(f" X聚类: {len(boxes)} 个boxes -> {len(x_clusters)} 个列簇")
-
- # 获取所有 x 坐标范围
- x_min = min(cluster['x_min'] for cluster in x_clusters)
- x_max = max(cluster['x_max'] for cluster in x_clusters)
-
- # 🎯 策略 1: 如果聚类数量<=列数接近
- if len(x_clusters) <= num_cols:
- # 直接使用聚类边界
- boundaries = [(cluster['x_min'], cluster['x_max'])
- for cluster in x_clusters]
- return boundaries
-
- # 🎯 策略 2: 聚类数多于列数(某些列有多个文本簇)
- if len(x_clusters) > num_cols:
- print(f" ℹ️ 聚类数 {len(x_clusters)} > 列数 {num_cols},合并相近簇")
-
- # 合并相近的簇
- merged_clusters = self._merge_close_clusters(x_clusters, num_cols)
-
- boundaries = [(cluster['x_min'], cluster['x_max'])
- for cluster in merged_clusters]
- return boundaries
-
- return []
- def _cluster_boxes_by_x(self, boxes: List[Dict],
- x_tolerance: int = 3) -> List[Dict]:
- """
- 按 x 坐标聚类(合并同列的多个文本框)
-
- Args:
- boxes: 文本框列表
- x_tolerance: X坐标容忍度
-
- Returns:
- 聚类列表 [{'x_min': int, 'x_max': int, 'boxes': List[Dict]}, ...]
- """
- if not boxes:
- return []
-
- # 按左边界 x 坐标排序
- sorted_boxes = sorted(boxes, key=lambda b: b['bbox'][0])
-
- clusters = []
- current_cluster = None
-
- for box in sorted_boxes:
- bbox = box['bbox']
- x_start = bbox[0]
- x_end = bbox[2]
-
- if current_cluster is None:
- # 开始新簇
- current_cluster = {
- 'x_min': x_start,
- 'x_max': x_end,
- 'boxes': [box]
- }
- else:
- # 🔑 检查是否属于当前簇(修正后的逻辑)
- # 1. x 坐标有重叠:x_start <= current_x_max 且 x_end >= current_x_min
- # 2. 或者距离在容忍度内
-
- has_overlap = (x_start <= current_cluster['x_max'] and
- x_end >= current_cluster['x_min'])
-
- is_close = abs(x_start - current_cluster['x_max']) <= x_tolerance
-
- if has_overlap or is_close:
- # 合并到当前簇
- current_cluster['boxes'].append(box)
- current_cluster['x_min'] = min(current_cluster['x_min'], x_start)
- current_cluster['x_max'] = max(current_cluster['x_max'], x_end)
- else:
- # 保存当前簇,开始新簇
- clusters.append(current_cluster)
- current_cluster = {
- 'x_min': x_start,
- 'x_max': x_end,
- 'boxes': [box]
- }
-
- # 添加最后一簇
- if current_cluster:
- clusters.append(current_cluster)
-
- return clusters
- def _merge_close_clusters(self, clusters: List[Dict],
- target_count: int) -> List[Dict]:
- """
- 合并相近的簇,直到数量等于目标列数
-
- Args:
- clusters: 聚类列表
- target_count: 目标列数
-
- Returns:
- 合并后的聚类列表
- """
- if len(clusters) <= target_count:
- return clusters
-
- # 复制一份,避免修改原数据
- working_clusters = [c.copy() for c in clusters]
-
- while len(working_clusters) > target_count:
- # 找到距离最近的两个簇
- min_distance = float('inf')
- merge_idx = 0
-
- for i in range(len(working_clusters) - 1):
- distance = working_clusters[i + 1]['x_min'] - working_clusters[i]['x_max']
- if distance < min_distance:
- min_distance = distance
- merge_idx = i
-
- # 合并
- cluster1 = working_clusters[merge_idx]
- cluster2 = working_clusters[merge_idx + 1]
-
- merged_cluster = {
- 'x_min': cluster1['x_min'],
- 'x_max': cluster2['x_max'],
- 'boxes': cluster1['boxes'] + cluster2['boxes']
- }
-
- # 替换
- working_clusters[merge_idx] = merged_cluster
- working_clusters.pop(merge_idx + 1)
-
- return working_clusters
- def _get_boxes_in_column(self, boxes: List[Dict],
- boundaries: List[Tuple[int, int]],
- col_idx: int) -> List[Dict]:
- """
- 获取指定列范围内的 boxes(改进版:包含重叠)
-
- Args:
- boxes: 当前行的所有 boxes
- boundaries: 列边界
- col_idx: 列索引
-
- Returns:
- 该列的 boxes
- """
- if col_idx >= len(boundaries):
- return []
-
- x_start, x_end = boundaries[col_idx]
-
- col_boxes = []
- for box in boxes:
- bbox = box['bbox']
- box_x_start = bbox[0]
- box_x_end = bbox[2]
-
- # 🔑 改进:检查是否有重叠(不只是中心点)
- overlap = not (box_x_start > x_end or box_x_end < x_start)
-
- if overlap:
- col_boxes.append(box)
-
- return col_boxes
- def _filter_boxes_in_table_region(self, paddle_boxes: List[Dict],
- table_bbox: Optional[List[int]],
- html: str) -> Tuple[List[Dict], List[int]]:
- """
- 筛选表格区域内的 paddle boxes
-
- 策略:
- 1. 如果有 table_bbox,使用边界框筛选(扩展边界)
- 2. 如果没有 table_bbox,通过内容匹配推断区域
-
- Args:
- paddle_boxes: paddle OCR 结果
- table_bbox: 表格边界框 [x1, y1, x2, y2]
- html: HTML 内容(用于内容验证)
-
- Returns:
- (筛选后的 boxes, 实际表格边界框)
- """
- if not paddle_boxes:
- return [], [0, 0, 0, 0]
-
- # 🎯 策略 1: 使用提供的 table_bbox(扩展边界)
- if table_bbox and len(table_bbox) == 4:
- x1, y1, x2, y2 = table_bbox
-
- # 扩展边界(考虑边框外的文本)
- margin = 20
- expanded_bbox = [
- max(0, x1 - margin),
- max(0, y1 - margin),
- x2 + margin,
- y2 + margin
- ]
-
- filtered = []
- for box in paddle_boxes:
- bbox = box['bbox']
- box_center_x = (bbox[0] + bbox[2]) / 2
- box_center_y = (bbox[1] + bbox[3]) / 2
-
- # 中心点在扩展区域内
- if (expanded_bbox[0] <= box_center_x <= expanded_bbox[2] and
- expanded_bbox[1] <= box_center_y <= expanded_bbox[3]):
- filtered.append(box)
-
- if filtered:
- # 计算实际边界框
- actual_bbox = [
- min(b['bbox'][0] for b in filtered),
- min(b['bbox'][1] for b in filtered),
- max(b['bbox'][2] for b in filtered),
- max(b['bbox'][3] for b in filtered)
- ]
- return filtered, actual_bbox
-
- # 🎯 策略 2: 通过内容匹配推断区域
- print(" ℹ️ 无 table_bbox,使用内容匹配推断表格区域...")
-
- # 提取 HTML 中的所有文本
- from bs4 import BeautifulSoup
- soup = BeautifulSoup(html, 'html.parser')
- html_texts = set()
- for cell in soup.find_all(['td', 'th']):
- text = cell.get_text(strip=True)
- if text:
- html_texts.add(self.text_matcher.normalize_text(text))
-
- if not html_texts:
- return [], [0, 0, 0, 0]
-
- # 找出与 HTML 内容匹配的 boxes
- matched_boxes = []
- for box in paddle_boxes:
- normalized_text = self.text_matcher.normalize_text(box['text'])
-
- # 检查是否匹配
- if any(normalized_text in ht or ht in normalized_text
- for ht in html_texts):
- matched_boxes.append(box)
-
- if not matched_boxes:
- # 🔑 降级:如果精确匹配失败,使用模糊匹配
- print(" ℹ️ 精确匹配失败,尝试模糊匹配...")
-
- from fuzzywuzzy import fuzz
- for box in paddle_boxes:
- normalized_text = self.text_matcher.normalize_text(box['text'])
-
- for ht in html_texts:
- similarity = fuzz.partial_ratio(normalized_text, ht)
- if similarity >= 70: # 降低阈值
- matched_boxes.append(box)
- break
-
- if matched_boxes:
- # 计算边界框
- actual_bbox = [
- min(b['bbox'][0] for b in matched_boxes),
- min(b['bbox'][1] for b in matched_boxes),
- max(b['bbox'][2] for b in matched_boxes),
- max(b['bbox'][3] for b in matched_boxes)
- ]
-
- # 🔑 扩展边界,包含可能遗漏的文本
- margin = 30
- expanded_bbox = [
- max(0, actual_bbox[0] - margin),
- max(0, actual_bbox[1] - margin),
- actual_bbox[2] + margin,
- actual_bbox[3] + margin
- ]
-
- # 重新筛选(包含边界上的文本)
- final_filtered = []
- for box in paddle_boxes:
- bbox = box['bbox']
- box_center_x = (bbox[0] + bbox[2]) / 2
- box_center_y = (bbox[1] + bbox[3]) / 2
-
- if (expanded_bbox[0] <= box_center_x <= expanded_bbox[2] and
- expanded_bbox[1] <= box_center_y <= expanded_bbox[3]):
- final_filtered.append(box)
-
- return final_filtered, actual_bbox
-
- # 🔑 最后的降级:返回所有 boxes
- print(" ⚠️ 无法确定表格区域,使用所有 paddle boxes")
- if paddle_boxes:
- actual_bbox = [
- min(b['bbox'][0] for b in paddle_boxes),
- min(b['bbox'][1] for b in paddle_boxes),
- max(b['bbox'][2] for b in paddle_boxes),
- max(b['bbox'][3] for b in paddle_boxes)
- ]
- return paddle_boxes, actual_bbox
-
- return [], [0, 0, 0, 0]
- def _group_paddle_boxes_by_rows(self, paddle_boxes: List[Dict],
- y_tolerance: int = 10,
- auto_correct_skew: bool = True) -> List[Dict]:
- """
- 将 paddle_text_boxes 按 y 坐标分组(聚类)- 增强版本
-
- Args:
- paddle_boxes: Paddle OCR 文字框列表
- y_tolerance: Y 坐标容忍度(像素)
- auto_correct_skew: 是否自动校正倾斜
-
- Returns:
- 分组列表,每组包含 {'y_center': float, 'boxes': List[Dict]}
- """
- if not paddle_boxes:
- return []
-
- # 🎯 步骤 1: 检测并校正倾斜(使用 BBoxExtractor)
- if auto_correct_skew:
- rotation_angle = BBoxExtractor.calculate_skew_angle(paddle_boxes)
-
- if abs(rotation_angle) > 0.5:
- max_x = max(box['bbox'][2] for box in paddle_boxes)
- max_y = max(box['bbox'][3] for box in paddle_boxes)
- image_size = (max_x, max_y)
-
- print(f" 🔧 校正倾斜角度: {rotation_angle:.2f}°")
- paddle_boxes = BBoxExtractor.correct_boxes_skew(
- paddle_boxes, -rotation_angle, image_size
- )
-
- # 🎯 步骤 2: 按校正后的 y 坐标分组
- boxes_with_y = []
- for box in paddle_boxes:
- bbox = box['bbox']
- y_center = (bbox[1] + bbox[3]) / 2
- boxes_with_y.append({
- 'y_center': y_center,
- 'box': box
- })
-
- # 按 y 坐标排序
- boxes_with_y.sort(key=lambda x: x['y_center'])
-
- groups = []
- current_group = None
-
- for item in boxes_with_y:
- if current_group is None:
- # 开始新组
- current_group = {
- 'y_center': item['y_center'],
- 'boxes': [item['box']]
- }
- else:
- if abs(item['y_center'] - current_group['y_center']) <= y_tolerance:
- current_group['boxes'].append(item['box'])
- # 更新组的中心
- current_group['y_center'] = sum(
- (b['bbox'][1] + b['bbox'][3]) / 2 for b in current_group['boxes']
- ) / len(current_group['boxes'])
- else:
- groups.append(current_group)
- current_group = {
- 'y_center': item['y_center'],
- 'boxes': [item['box']]
- }
-
- if current_group:
- groups.append(current_group)
-
- print(f" ✓ 分组完成: {len(groups)} 行")
-
- return groups
- def _match_html_rows_to_paddle_groups(self, html_rows: List,
- grouped_boxes: List[Dict]) -> Dict[int, List[int]]:
- """
- 智能匹配 HTML 行与 paddle 分组(优化版:支持跳过无关组 + 防贪婪)
- """
- if not html_rows or not grouped_boxes:
- return {}
-
- mapping = {}
-
- # 🎯 策略 1: 数量相等,简单 1:1 映射
- if len(html_rows) == len(grouped_boxes):
- for i in range(len(html_rows)):
- mapping[i] = [i]
- return mapping
-
- # 🎯 策略 2: 基于内容匹配(带跳过机制的单调匹配)
- # 提取 HTML 文本
- html_row_texts = []
- for row in html_rows:
- cells = row.find_all(['td', 'th'])
- texts = [self.text_matcher.normalize_text(c.get_text(strip=True)) for c in cells]
- html_row_texts.append("".join(texts))
- # 辅助函数:获取组的文本
- def get_group_text(idx):
- if 0 <= idx < len(grouped_boxes):
- boxes = grouped_boxes[idx]['boxes']
- # 注意:这里不排序,假设 grouped_boxes 内部已经按 X 排序
- texts = [self.text_matcher.normalize_text(b['text']) for b in boxes]
- return "".join(texts)
- return ""
- paddle_idx = 0
- num_paddle = len(grouped_boxes)
- used_groups = set()
- for html_idx, html_row_text in enumerate(html_row_texts):
- if paddle_idx >= num_paddle:
- mapping[html_idx] = []
- continue
-
- if not html_row_text:
- mapping[html_idx] = []
- continue
- # --- 步骤 1: 在搜索窗口内找到最佳的"起始"组 ---
- best_score = 0.0
- best_start_idx = -1
-
- # 搜索窗口:当前指针向后 10 个组
- search_limit = min(paddle_idx + 10, num_paddle)
-
- for i in range(paddle_idx, search_limit):
- # 计算单组得分
- group_text = get_group_text(i)
- score = self._calculate_similarity(html_row_text, group_text)
-
- # 距离惩罚(越远的组扣分越多,优先匹配最近的)
- dist_penalty = (i - paddle_idx) * 0.02
- final_score = score - dist_penalty
-
- if final_score > best_score and final_score > 0.3: # 基础阈值
- best_score = final_score
- best_start_idx = i
-
- if best_start_idx == -1:
- mapping[html_idx] = []
- continue
-
- # --- 步骤 2: 贪婪合并 (Look-ahead Merge) ---
- current_indices = [best_start_idx]
- current_text = get_group_text(best_start_idx)
- current_score = self._calculate_similarity(html_row_text, current_text)
-
- next_probe = best_start_idx + 1
- max_merge = 5 # 限制最大合并行数
-
- while next_probe < num_paddle and len(current_indices) < max_merge:
- next_group_text = get_group_text(next_probe)
-
- # 🛑 关键改进:前瞻检查 (Look-ahead Check)
- # 检查候选组是否更属于下一行
- should_stop = False
- if html_idx + 1 < len(html_row_texts):
- next_html_text = html_row_texts[html_idx + 1]
-
- # 计算该组属于当前行的程度 vs 属于下一行的程度
- # 使用 (group, html) 顺序,计算 group 被 html 覆盖的程度
- match_current = self._calculate_similarity(next_group_text, html_row_text)
- match_next = self._calculate_similarity(next_group_text, next_html_text)
-
- # 如果更匹配下一行,且匹配度可信(>0.5),则停止
- if match_next > match_current and match_next > 0.5:
- print(f" 🛑 停止合并组 {next_probe}: 更匹配下一行 (Next: {match_next:.2f} > Curr: {match_current:.2f})")
- should_stop = True
-
- if should_stop:
- break
- # 尝试合并
- combined_text = current_text + next_group_text
- new_score = self._calculate_similarity(html_row_text, combined_text)
-
- # 判定条件:分数提升
- if new_score > current_score + 0.01:
- current_indices.append(next_probe)
- current_text = combined_text
- current_score = new_score
- next_probe += 1
- else:
- break
-
- mapping[html_idx] = current_indices
- used_groups.update(current_indices)
-
- print(f" ✓ 行 {html_idx}: 匹配组 {current_indices} (得分: {current_score:.2f})")
-
- # 更新指针:跳过已使用的组
- paddle_idx = current_indices[-1] + 1
- # --- 步骤 3: 处理未匹配的组 (Orphans) ---
- unused_groups = [i for i in range(len(grouped_boxes)) if i not in used_groups]
-
- if unused_groups:
- print(f" ℹ️ 发现 {len(unused_groups)} 个未匹配的 paddle 组: {unused_groups}")
- for unused_idx in unused_groups:
- unused_group = grouped_boxes[unused_idx]
- unused_y_min = min(b['bbox'][1] for b in unused_group['boxes'])
- unused_y_max = max(b['bbox'][3] for b in unused_group['boxes'])
-
- above_idx = None
- below_idx = None
- above_distance = float('inf')
- below_distance = float('inf')
-
- for i in range(unused_idx - 1, -1, -1):
- if i in used_groups:
- above_idx = i
- above_group = grouped_boxes[i]
- max_y_box = max(above_group['boxes'], key=lambda b: b['bbox'][3])
- above_y_center = (max_y_box['bbox'][1] + max_y_box['bbox'][3]) / 2
- above_distance = abs(unused_y_min - above_y_center)
- break
-
- for i in range(unused_idx + 1, len(grouped_boxes)):
- if i in used_groups:
- below_idx = i
- below_group = grouped_boxes[i]
- min_y_box = min(below_group['boxes'], key=lambda b: b['bbox'][1])
- below_y_center = (min_y_box['bbox'][1] + min_y_box['bbox'][3]) / 2
- below_distance = abs(below_y_center - unused_y_max)
- break
-
- closest_used_idx = None
- merge_direction = ""
-
- if above_idx is not None and below_idx is not None:
- if above_distance < below_distance:
- closest_used_idx = above_idx
- merge_direction = "上方"
- else:
- closest_used_idx = below_idx
- merge_direction = "下方"
- elif above_idx is not None:
- closest_used_idx = above_idx
- merge_direction = "上方"
- elif below_idx is not None:
- closest_used_idx = below_idx
- merge_direction = "下方"
-
- if closest_used_idx is not None:
- target_html_row = None
- for html_row_idx, group_indices in mapping.items():
- if closest_used_idx in group_indices:
- target_html_row = html_row_idx
- break
-
- if target_html_row is not None:
- if unused_idx not in mapping[target_html_row]:
- mapping[target_html_row].append(unused_idx)
- mapping[target_html_row].sort()
- print(f" • 组 {unused_idx} 合并到 HTML 行 {target_html_row}({merge_direction}行)")
- used_groups.add(unused_idx)
-
- # 🔑 策略 4: 第三遍 - 按 y 坐标排序每行的组索引
- for row_idx in mapping:
- if mapping[row_idx]:
- mapping[row_idx].sort(key=lambda idx: grouped_boxes[idx]['y_center'])
-
- return mapping
- def _calculate_similarity(self, text1: str, text2: str) -> float:
- """
- 计算两个文本的相似度,结合字符覆盖率和序列相似度
- """
- if not text1 or not text2:
- return 0.0
-
- # 1. 字符覆盖率 (Character Overlap) - 解决乱序/交错问题
- from collections import Counter
- c1 = Counter(text1)
- c2 = Counter(text2)
-
- # 计算交集字符数
- intersection = c1 & c2
- overlap_count = sum(intersection.values())
-
- # 覆盖率:paddle 文本中有多少是 html 文本需要的
- coverage = overlap_count / len(text1) if len(text1) > 0 else 0
-
- # 2. 序列相似度 (Sequence Similarity) - 解决完全不相关但字符相似的问题
- from fuzzywuzzy import fuzz
- # 使用 token_sort_ratio 来容忍一定的乱序
- seq_score = fuzz.token_sort_ratio(text1, text2) / 100.0
-
- # 综合评分:侧重覆盖率,因为对于 OCR 结果合并,内容完整性比顺序更重要
- return (coverage * 0.7) + (seq_score * 0.3)
- def _match_cell_sequential(self, cell_text: str,
- boxes: List[Dict],
- col_boundaries: List[Tuple[int, int]],
- start_idx: int) -> Optional[Dict]:
- """
- 🎯 顺序匹配单元格:从指定位置开始,逐步合并 boxes 直到匹配
-
- 策略:
- 1. 找到第一个未使用的 box
- 2. 尝试单个 box 精确匹配
- 3. 如果失败,尝试合并多个 boxes
-
- Args:
- cell_text: HTML 单元格文本
- boxes: 候选 boxes(已按 x 坐标排序)
- col_boundaries: 列边界列表
- start_idx: 起始索引
-
- Returns:
- {'bbox': [x1,y1,x2,y2], 'text': str, 'score': float,
- 'paddle_indices': [idx1, idx2], 'used_boxes': [box1, box2],
- 'last_used_index': int}
- """
- from fuzzywuzzy import fuzz
-
- cell_text_normalized = self.text_matcher.normalize_text(cell_text)
-
- if len(cell_text_normalized) < 2:
- return None
-
- # 🔑 找到第一个未使用的 box
- first_unused_idx = start_idx
- while first_unused_idx < len(boxes) and boxes[first_unused_idx].get('used'):
- first_unused_idx += 1
-
- if first_unused_idx >= len(boxes):
- return None
- # 🔑 策略 1: 单个 box 精确匹配
- for box in boxes[first_unused_idx:]:
- if box.get('used'):
- continue
-
- box_text = self.text_matcher.normalize_text(box['text'])
-
- if cell_text_normalized == box_text:
- return self._build_match_result([box], box['text'], 100.0, boxes.index(box))
-
- # 🔑 策略 2: 多个 boxes 合并匹配
- unused_boxes = [b for b in boxes if not b.get('used')]
- # 合并同列的 boxes 合并
- merged_bboxes = []
- for col_idx in range(len(col_boundaries)):
- combo_boxes = self._get_boxes_in_column(unused_boxes, col_boundaries, col_idx)
- if len(combo_boxes) > 0:
- sorted_combo = sorted(combo_boxes, key=lambda b: (b['bbox'][1], b['bbox'][0]))
- merged_text = ''.join([b['text'] for b in sorted_combo])
- merged_bboxes.append({
- 'text': merged_text,
- 'sorted_combo': sorted_combo
- })
- for box in merged_bboxes:
- # 1. 精确匹配
- merged_text_normalized = self.text_matcher.normalize_text(box['text'])
- if cell_text_normalized == merged_text_normalized:
- last_sort_idx = boxes.index(box['sorted_combo'][-1])
- return self._build_match_result(box['sorted_combo'], box['text'], 100.0, last_sort_idx)
-
- # 2. 子串匹配
- is_substring = (cell_text_normalized in merged_text_normalized or
- merged_text_normalized in cell_text_normalized)
-
- # 3. 模糊匹配
- similarity = fuzz.partial_ratio(cell_text_normalized, merged_text_normalized)
-
- # 🎯 子串匹配加分
- if is_substring:
- similarity = min(100, similarity + 10)
-
- if similarity >= self.text_matcher.similarity_threshold:
- print(f" ✓ 匹配成功: '{cell_text[:15]}' vs '{merged_text[:15]}' (相似度: {similarity})")
- return self._build_match_result(box['sorted_combo'], box['text'], similarity, start_idx)
-
- print(f" ✗ 匹配失败: '{cell_text[:15]}'")
- return None
- def _build_match_result(self, boxes: List[Dict], text: str,
- score: float, last_index: int) -> Dict:
- """构建匹配结果(使用原始坐标)"""
-
- # 🔑 关键修复:使用 original_bbox(如果存在)
- def get_original_bbox(box: Dict) -> List[int]:
- return box.get('original_bbox', box['bbox'])
-
- original_bboxes = [get_original_bbox(b) for b in boxes]
-
- merged_bbox = [
- min(b[0] for b in original_bboxes),
- min(b[1] for b in original_bboxes),
- max(b[2] for b in original_bboxes),
- max(b[3] for b in original_bboxes)
- ]
-
- return {
- 'bbox': merged_bbox, # ✅ 使用原始坐标
- 'text': text,
- 'score': score,
- 'paddle_indices': [b['paddle_bbox_index'] for b in boxes],
- 'used_boxes': boxes,
- 'last_used_index': last_index
- }
|