|
|
@@ -0,0 +1,507 @@
|
|
|
+"""
|
|
|
+智能 Layout 模型路由器
|
|
|
+支持自动选择和多模型评估策略
|
|
|
+"""
|
|
|
+from typing import Dict, List, Any, Optional, Union
|
|
|
+from pathlib import Path
|
|
|
+import numpy as np
|
|
|
+from PIL import Image
|
|
|
+from loguru import logger
|
|
|
+import cv2
|
|
|
+import json
|
|
|
+
|
|
|
+try:
|
|
|
+ from .model_factory import ModelFactory
|
|
|
+ from .ocr_based_layout_evaluator import OCRBasedLayoutEvaluator
|
|
|
+ from models.adapters.base import BaseLayoutDetector
|
|
|
+except ImportError:
|
|
|
+ from model_factory import ModelFactory
|
|
|
+ from ocr_based_layout_evaluator import OCRBasedLayoutEvaluator
|
|
|
+ from models.adapters.base import BaseLayoutDetector
|
|
|
+
|
|
|
+
|
|
|
+class SmartLayoutRouter(BaseLayoutDetector):
|
|
|
+ """智能 Layout 模型路由器
|
|
|
+
|
|
|
+ 支持多种策略:
|
|
|
+ 1. ocr_eval: 运行所有模型,用OCR评估器选择最佳结果(推荐,默认)
|
|
|
+ 2. auto: 基于文档特征自动选择(快速模式,无需OCR)
|
|
|
+ """
|
|
|
+
|
|
|
+ def __init__(self, config: Dict[str, Any]):
|
|
|
+ super().__init__(config)
|
|
|
+ self.strategy = config.get('strategy', 'ocr_eval') # ocr_eval, auto
|
|
|
+ self.models = {}
|
|
|
+ self.model_configs = config.get('models', {})
|
|
|
+ self.fallback_config = config.get('fallback_model', None)
|
|
|
+ self.evaluator = OCRBasedLayoutEvaluator()
|
|
|
+ self.ocr_recognizer = None # 用于在ocr_eval策略中获取OCR结果
|
|
|
+ # 调试模式支持
|
|
|
+ self.debug_mode = config.get('debug_mode', False)
|
|
|
+ self.output_dir = config.get('output_dir', None)
|
|
|
+ self.page_name = None # 将在 detect 方法中设置
|
|
|
+ # 分数差距阈值:当模型间分数差距小于此值时,优先选择 docling
|
|
|
+ self.score_diff_threshold = config.get('score_diff_threshold', 0.05)
|
|
|
+
|
|
|
+ def initialize(self):
|
|
|
+ """初始化所有模型"""
|
|
|
+ # 获取 post_process 配置(从父配置中)
|
|
|
+ post_process_config = self.config.get('post_process', {})
|
|
|
+
|
|
|
+ # 初始化主模型
|
|
|
+ for model_name, model_config in self.model_configs.items():
|
|
|
+ try:
|
|
|
+ logger.info(f"🔧 Initializing layout model: {model_name}")
|
|
|
+ # 将 post_process 配置添加到子模型配置中
|
|
|
+ if post_process_config:
|
|
|
+ model_config = model_config.copy()
|
|
|
+ model_config['post_process'] = post_process_config
|
|
|
+ detector = ModelFactory.create_layout_detector(model_config)
|
|
|
+ self.models[model_name] = detector
|
|
|
+ logger.info(f"✅ Model {model_name} initialized")
|
|
|
+ except Exception as e:
|
|
|
+ logger.warning(f"⚠️ Failed to initialize {model_name}: {e}")
|
|
|
+
|
|
|
+ # 初始化回退模型(如果配置了)
|
|
|
+ if self.fallback_config:
|
|
|
+ try:
|
|
|
+ # 将 post_process 配置添加到回退模型配置中
|
|
|
+ fallback_config = self.fallback_config.copy()
|
|
|
+ if post_process_config:
|
|
|
+ fallback_config['post_process'] = post_process_config
|
|
|
+ fallback_detector = ModelFactory.create_layout_detector(fallback_config)
|
|
|
+ self.models['fallback'] = fallback_detector
|
|
|
+ logger.info("✅ Fallback model initialized")
|
|
|
+ except Exception as e:
|
|
|
+ logger.warning(f"⚠️ Failed to initialize fallback model: {e}")
|
|
|
+
|
|
|
+ if not self.models:
|
|
|
+ raise RuntimeError("No layout models available")
|
|
|
+
|
|
|
+ def cleanup(self):
|
|
|
+ """清理所有模型资源"""
|
|
|
+ for model_name, model in self.models.items():
|
|
|
+ try:
|
|
|
+ model.cleanup()
|
|
|
+ except Exception as e:
|
|
|
+ logger.warning(f"⚠️ Failed to cleanup {model_name}: {e}")
|
|
|
+ self.models.clear()
|
|
|
+
|
|
|
+ def set_ocr_recognizer(self, ocr_recognizer):
|
|
|
+ """设置OCR识别器(用于ocr_eval策略)"""
|
|
|
+ self.ocr_recognizer = ocr_recognizer
|
|
|
+
|
|
|
+ def _detect_raw(
|
|
|
+ self,
|
|
|
+ image: Union[np.ndarray, Image.Image],
|
|
|
+ ocr_spans: Optional[List[Dict[str, Any]]] = None
|
|
|
+ ) -> List[Dict[str, Any]]:
|
|
|
+ """
|
|
|
+ 原始检测方法(SmartLayoutRouter 实现)
|
|
|
+
|
|
|
+ 注意:SmartLayoutRouter 重写了 detect() 方法,所以基类的模板方法不会被调用。
|
|
|
+ 但为了满足抽象方法的要求,这里实现一个版本,直接调用 detect()。
|
|
|
+ 由于 SmartLayoutRouter 的 detect() 已经包含了完整的逻辑(包括后处理),
|
|
|
+ 这里返回的结果已经是后处理过的。
|
|
|
+
|
|
|
+ Args:
|
|
|
+ image: 输入图像
|
|
|
+ ocr_spans: OCR结果(可选)
|
|
|
+
|
|
|
+ Returns:
|
|
|
+ 布局检测结果(已包含后处理)
|
|
|
+ """
|
|
|
+ # SmartLayoutRouter 重写了 detect(),所以这里直接调用它
|
|
|
+ # 注意:这会返回已经后处理过的结果
|
|
|
+ return self.detect(image, ocr_spans=ocr_spans)
|
|
|
+
|
|
|
+ def detect(
|
|
|
+ self,
|
|
|
+ image: Union[np.ndarray, Image.Image],
|
|
|
+ ocr_spans: Optional[List[Dict[str, Any]]] = None,
|
|
|
+ page_name: Optional[str] = None
|
|
|
+ ) -> List[Dict[str, Any]]:
|
|
|
+ """
|
|
|
+ 检测布局(根据策略选择)
|
|
|
+
|
|
|
+ Args:
|
|
|
+ image: 输入图像
|
|
|
+ ocr_spans: OCR结果(可选,如果为None且使用ocr_eval策略,会尝试获取)
|
|
|
+ page_name: 页面名称(用于调试模式输出文件名)
|
|
|
+ """
|
|
|
+ # 设置页面名称(用于调试模式)
|
|
|
+ if page_name is not None:
|
|
|
+ self.page_name = page_name
|
|
|
+
|
|
|
+ if self.strategy == 'ocr_eval':
|
|
|
+ return self._ocr_eval_detect(image, ocr_spans)
|
|
|
+ elif self.strategy == 'auto':
|
|
|
+ return self._auto_select_detect(image)
|
|
|
+ else:
|
|
|
+ raise ValueError(f"Unknown strategy: {self.strategy}")
|
|
|
+
|
|
|
+ def _ocr_eval_detect(
|
|
|
+ self,
|
|
|
+ image: Union[np.ndarray, Image.Image],
|
|
|
+ ocr_spans: Optional[List[Dict[str, Any]]] = None
|
|
|
+ ) -> List[Dict[str, Any]]:
|
|
|
+ """
|
|
|
+ OCR评估策略:运行所有模型,用OCR评估器选择最佳结果
|
|
|
+
|
|
|
+ 流程:
|
|
|
+ 1. 运行所有模型获取结果(使用 detect() 方法,自动执行后处理)
|
|
|
+ 2. 获取OCR spans(如果没有提供)
|
|
|
+ 3. 使用评估器评估每个模型的后处理结果
|
|
|
+ 4. 选择得分最高的模型结果
|
|
|
+
|
|
|
+ 注意:直接调用 detect() 方法,因为基类的 detect() 已经实现了后处理逻辑
|
|
|
+ """
|
|
|
+ # 1. 运行所有模型(使用 detect() 方法,会自动执行后处理)
|
|
|
+ all_postprocessed_results = {}
|
|
|
+ for model_name, model in self.models.items():
|
|
|
+ if model_name == 'fallback':
|
|
|
+ continue # 跳过回退模型(除非所有模型都失败)
|
|
|
+ try:
|
|
|
+ # 调用 detect() 方法,基类会自动执行后处理
|
|
|
+ results = model.detect(image)
|
|
|
+ all_postprocessed_results[model_name] = results
|
|
|
+ logger.info(f"✅ Model {model_name} detected {len(results)} elements (post-processed)")
|
|
|
+ except Exception as e:
|
|
|
+ logger.warning(f"⚠️ Model {model_name} failed: {e}")
|
|
|
+ all_postprocessed_results[model_name] = []
|
|
|
+
|
|
|
+ if not all_postprocessed_results:
|
|
|
+ # 如果所有模型都失败,尝试回退模型
|
|
|
+ if 'fallback' in self.models:
|
|
|
+ logger.info("🔄 All models failed, using fallback model")
|
|
|
+ # 回退模型使用 detect() 方法(会自动执行后处理)
|
|
|
+ fallback_result = self.models['fallback'].detect(image)
|
|
|
+ return fallback_result
|
|
|
+ return []
|
|
|
+
|
|
|
+ # 2. 获取OCR spans(如果没有提供)
|
|
|
+ if ocr_spans is None:
|
|
|
+ ocr_spans = self._get_ocr_spans(image)
|
|
|
+
|
|
|
+ if not ocr_spans:
|
|
|
+ logger.warning("⚠️ No OCR spans available, falling back to auto strategy")
|
|
|
+ return self._auto_select_detect(image)
|
|
|
+
|
|
|
+ # 3. 评估每个模型的后处理结果
|
|
|
+ evaluations = {}
|
|
|
+ for model_name, results in all_postprocessed_results.items():
|
|
|
+ if not results:
|
|
|
+ continue
|
|
|
+ try:
|
|
|
+ eval_result = self.evaluator.evaluate_with_ocr(results, ocr_spans, image)
|
|
|
+ evaluations[model_name] = {
|
|
|
+ 'results': results,
|
|
|
+ 'score': eval_result.get('overall_score', 0.0),
|
|
|
+ 'metrics': eval_result
|
|
|
+ }
|
|
|
+ logger.info(f"📊 Model {model_name} evaluation score: {eval_result.get('overall_score', 0.0):.3f}")
|
|
|
+ except Exception as e:
|
|
|
+ logger.warning(f"⚠️ Failed to evaluate {model_name}: {e}")
|
|
|
+ evaluations[model_name] = {
|
|
|
+ 'results': results,
|
|
|
+ 'score': 0.0,
|
|
|
+ 'metrics': {}
|
|
|
+ }
|
|
|
+
|
|
|
+ if not evaluations:
|
|
|
+ # 如果所有评估都失败,使用第一个可用结果
|
|
|
+ first_model = next(iter(all_postprocessed_results.keys()))
|
|
|
+ logger.warning(f"⚠️ All evaluations failed, using first model: {first_model}")
|
|
|
+ return all_postprocessed_results[first_model]
|
|
|
+
|
|
|
+ # 4. 选择最佳模型结果
|
|
|
+ # 如果分数差距小于阈值,优先选择 docling
|
|
|
+ if len(evaluations) >= 2:
|
|
|
+ # 按分数降序排序
|
|
|
+ sorted_models = sorted(evaluations.items(), key=lambda x: x[1]['score'], reverse=True)
|
|
|
+ highest_score = sorted_models[0][1]['score']
|
|
|
+ second_score = sorted_models[1][1]['score']
|
|
|
+ score_diff = highest_score - second_score
|
|
|
+
|
|
|
+ # 如果分数差距小于阈值,且 docling 在可用模型中,优先选择 docling
|
|
|
+ if score_diff < self.score_diff_threshold and 'docling' in evaluations:
|
|
|
+ best_model_name = 'docling'
|
|
|
+ best_model_data = evaluations['docling']
|
|
|
+ logger.info(
|
|
|
+ f"🎯 Selected docling (preferred when score diff < {self.score_diff_threshold:.3f}): "
|
|
|
+ f"docling={best_model_data['score']:.3f}, "
|
|
|
+ f"highest={highest_score:.3f}, diff={score_diff:.3f}"
|
|
|
+ )
|
|
|
+ else:
|
|
|
+ # 选择最高分模型
|
|
|
+ best_model_name, best_model_data = sorted_models[0]
|
|
|
+ logger.info(
|
|
|
+ f"🎯 Selected best model: {best_model_name} "
|
|
|
+ f"(score: {best_model_data['score']:.3f}, diff: {score_diff:.3f})"
|
|
|
+ )
|
|
|
+ else:
|
|
|
+ # 只有一个模型,直接选择
|
|
|
+ best_model = max(evaluations.items(), key=lambda x: x[1]['score'])
|
|
|
+ best_model_name, best_model_data = best_model
|
|
|
+ logger.info(f"🎯 Selected model: {best_model_name} (score: {best_model_data['score']:.3f})")
|
|
|
+
|
|
|
+ # 5. 调试模式:对比多个模型的输出
|
|
|
+ if self.debug_mode and self.output_dir:
|
|
|
+ self._compare_layout_models(
|
|
|
+ image=image,
|
|
|
+ all_postprocessed_results=all_postprocessed_results,
|
|
|
+ evaluations=evaluations,
|
|
|
+ best_model_name=best_model_name
|
|
|
+ )
|
|
|
+
|
|
|
+ # 记录选择信息到结果中
|
|
|
+ results = best_model_data['results']
|
|
|
+ for result in results:
|
|
|
+ result['selected_model'] = best_model_name
|
|
|
+ result['evaluation_score'] = best_model_data['score']
|
|
|
+
|
|
|
+ return results
|
|
|
+
|
|
|
+ def _auto_select_detect(self, image: Union[np.ndarray, Image.Image]) -> List[Dict[str, Any]]:
|
|
|
+ """
|
|
|
+ 自动选择策略:基于文档特征自动选择最佳模型
|
|
|
+
|
|
|
+ 流程:
|
|
|
+ 1. 分析文档特征
|
|
|
+ 2. 根据特征选择模型
|
|
|
+ 3. 使用选中的模型进行检测
|
|
|
+ 4. 对检测结果执行后处理(去除重叠框、文本转表格等)
|
|
|
+ """
|
|
|
+ # 分析文档特征
|
|
|
+ features = self._analyze_document_features(image)
|
|
|
+
|
|
|
+ # 根据特征选择模型
|
|
|
+ selected_model = self._select_best_model(features)
|
|
|
+
|
|
|
+ logger.info(f"🎯 Auto-selected model: {selected_model} (features: {features})")
|
|
|
+
|
|
|
+ # 使用选中的模型进行检测(使用 detect() 方法,会自动执行后处理)
|
|
|
+ if selected_model in self.models:
|
|
|
+ model = self.models[selected_model]
|
|
|
+ results = model.detect(image)
|
|
|
+ else:
|
|
|
+ # 回退到第一个可用模型
|
|
|
+ first_model = next(iter(self.models.values()))
|
|
|
+ results = first_model.detect(image)
|
|
|
+
|
|
|
+ return results
|
|
|
+
|
|
|
+ def _get_ocr_spans(self, image: Union[np.ndarray, Image.Image]) -> List[Dict[str, Any]]:
|
|
|
+ """
|
|
|
+ 获取OCR spans(只检测文本框,不识别文字)
|
|
|
+
|
|
|
+ 参考 paddle_ori_cls.py 的做法,使用 detect_text_boxes 只运行检测模型
|
|
|
+ 这样可以大幅提升性能,因为评估器只需要 bbox 信息,不需要文字内容
|
|
|
+ """
|
|
|
+ if self.ocr_recognizer is None:
|
|
|
+ logger.warning("⚠️ OCR recognizer not set, cannot get OCR spans")
|
|
|
+ return []
|
|
|
+
|
|
|
+ try:
|
|
|
+ # 使用 detect_text_boxes 只检测文本框,不识别文字
|
|
|
+ if hasattr(self.ocr_recognizer, 'detect_text_boxes'):
|
|
|
+ ocr_spans = self.ocr_recognizer.detect_text_boxes(image)
|
|
|
+ else:
|
|
|
+ # 如果没有 detect_text_boxes 方法,回退到完整识别
|
|
|
+ logger.warning("⚠️ OCR recognizer doesn't support detect_text_boxes, using recognize_text")
|
|
|
+ ocr_spans = self.ocr_recognizer.recognize_text(image)
|
|
|
+
|
|
|
+ logger.info(f"📝 Got {len(ocr_spans)} text boxes (detection only, no recognition)")
|
|
|
+ return ocr_spans
|
|
|
+ except Exception as e:
|
|
|
+ logger.warning(f"⚠️ Failed to get OCR spans: {e}")
|
|
|
+ return []
|
|
|
+
|
|
|
+ def _analyze_document_features(self, image: Union[np.ndarray, Image.Image]) -> Dict[str, float]:
|
|
|
+ """分析文档特征"""
|
|
|
+ if isinstance(image, Image.Image):
|
|
|
+ img_array = np.array(image)
|
|
|
+ else:
|
|
|
+ img_array = image
|
|
|
+
|
|
|
+ if len(img_array.shape) == 3:
|
|
|
+ gray = cv2.cvtColor(img_array, cv2.COLOR_BGR2GRAY)
|
|
|
+ else:
|
|
|
+ gray = img_array
|
|
|
+
|
|
|
+ h, w = gray.shape
|
|
|
+
|
|
|
+ # 计算表格密度(基于边缘检测)
|
|
|
+ edges = cv2.Canny(gray, 50, 150)
|
|
|
+ horizontal_lines = np.sum(np.sum(edges, axis=1) > 0)
|
|
|
+ vertical_lines = np.sum(np.sum(edges, axis=0) > 0)
|
|
|
+ table_density = (horizontal_lines + vertical_lines) / (h + w) if (h + w) > 0 else 0
|
|
|
+
|
|
|
+ # 计算文本密度(基于二值化)
|
|
|
+ _, binary = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
|
|
|
+ text_density = np.sum(binary < 128) / (h * w) if (h * w) > 0 else 0
|
|
|
+
|
|
|
+ # 计算图像复杂度(基于方差)
|
|
|
+ gray_array = np.asarray(gray)
|
|
|
+ image_complexity = float(np.std(gray_array)) # type: ignore
|
|
|
+
|
|
|
+ return {
|
|
|
+ 'table_density': float(table_density),
|
|
|
+ 'text_density': float(text_density),
|
|
|
+ 'image_complexity': float(image_complexity),
|
|
|
+ 'aspect_ratio': float(w / h) if h > 0 else 1.0
|
|
|
+ }
|
|
|
+
|
|
|
+ def _select_best_model(self, features: Dict[str, float]) -> str:
|
|
|
+ """根据特征选择最佳模型"""
|
|
|
+ # 经验规则:根据特征选择模型
|
|
|
+ # 高表格密度 → docling 通常更好
|
|
|
+ # 高文本密度 → mineru 通常更好
|
|
|
+ # 复杂图像 → 尝试多个模型
|
|
|
+
|
|
|
+ table_density = features.get('table_density', 0)
|
|
|
+ text_density = features.get('text_density', 0)
|
|
|
+
|
|
|
+ # 优先检查是否有 'docling' 和 'mineru' 模型
|
|
|
+ has_docling = 'docling' in self.models
|
|
|
+ has_mineru = 'mineru' in self.models
|
|
|
+
|
|
|
+ if has_docling and has_mineru:
|
|
|
+ # 表格密度高,优先使用 docling
|
|
|
+ if table_density > 0.01:
|
|
|
+ return 'docling'
|
|
|
+ # 文本密度高,优先使用 mineru
|
|
|
+ elif text_density > 0.3:
|
|
|
+ return 'mineru'
|
|
|
+ # 默认使用 docling
|
|
|
+ else:
|
|
|
+ return 'docling'
|
|
|
+ elif has_docling:
|
|
|
+ return 'docling'
|
|
|
+ elif has_mineru:
|
|
|
+ return 'mineru'
|
|
|
+ else:
|
|
|
+ # 返回第一个可用模型
|
|
|
+ return next(iter(self.models.keys()))
|
|
|
+
|
|
|
+ def _compare_layout_models(
|
|
|
+ self,
|
|
|
+ image: Union[np.ndarray, Image.Image],
|
|
|
+ all_postprocessed_results: Dict[str, List[Dict[str, Any]]],
|
|
|
+ evaluations: Dict[str, Dict[str, Any]],
|
|
|
+ best_model_name: str
|
|
|
+ ):
|
|
|
+ """
|
|
|
+ 对比多个 layout 模型的输出,并生成调试信息
|
|
|
+
|
|
|
+ 参考 pipeline_manager_v2.py 的 _compare_ocr_and_pdf_text 方法实现
|
|
|
+
|
|
|
+ Args:
|
|
|
+ image: 输入图像
|
|
|
+ all_postprocessed_results: 所有模型的后处理结果
|
|
|
+ evaluations: 所有模型的评估结果
|
|
|
+ best_model_name: 选中的最佳模型名称
|
|
|
+ """
|
|
|
+ if not self.output_dir or not self.page_name:
|
|
|
+ return
|
|
|
+
|
|
|
+ try:
|
|
|
+ # 转换为 numpy 数组
|
|
|
+ if isinstance(image, Image.Image):
|
|
|
+ vis_image = np.array(image)
|
|
|
+ if len(vis_image.shape) == 3 and vis_image.shape[2] == 3:
|
|
|
+ # PIL RGB -> OpenCV BGR
|
|
|
+ vis_image = cv2.cvtColor(vis_image, cv2.COLOR_RGB2BGR)
|
|
|
+ else:
|
|
|
+ vis_image = image.copy()
|
|
|
+ if len(vis_image.shape) == 3 and vis_image.shape[2] == 3:
|
|
|
+ # 如果是 RGB,转换为 BGR
|
|
|
+ vis_image = cv2.cvtColor(vis_image, cv2.COLOR_RGB2BGR)
|
|
|
+
|
|
|
+ # 定义模型颜色方案
|
|
|
+ model_colors = {
|
|
|
+ 'docling': (255, 0, 0), # 蓝色 (BGR)
|
|
|
+ 'mineru': (0, 255, 0), # 绿色
|
|
|
+ 'paddle': (0, 255, 255), # 黄色
|
|
|
+ 'dit': (255, 0, 255), # 紫色
|
|
|
+ }
|
|
|
+ best_color = (0, 0, 255) # 红色,用于最佳模型
|
|
|
+
|
|
|
+ # 绘制每个模型的检测框
|
|
|
+ for model_name, results in all_postprocessed_results.items():
|
|
|
+ if not results:
|
|
|
+ continue
|
|
|
+
|
|
|
+ # 选择颜色:最佳模型用红色加粗,其他用对应颜色
|
|
|
+ if model_name == best_model_name:
|
|
|
+ color = best_color
|
|
|
+ thickness = 3
|
|
|
+ else:
|
|
|
+ color = model_colors.get(model_name, (128, 128, 128)) # 默认灰色
|
|
|
+ thickness = 2
|
|
|
+
|
|
|
+ # 绘制检测框
|
|
|
+ for result in results:
|
|
|
+ bbox = result.get('bbox', [])
|
|
|
+ if not bbox or len(bbox) < 4:
|
|
|
+ continue
|
|
|
+
|
|
|
+ x1, y1, x2, y2 = int(bbox[0]), int(bbox[1]), int(bbox[2]), int(bbox[3])
|
|
|
+ cv2.rectangle(vis_image, (x1, y1), (x2, y2), color, thickness)
|
|
|
+
|
|
|
+ # 为最佳模型添加标签
|
|
|
+ if model_name == best_model_name:
|
|
|
+ label = f"{model_name} (best)"
|
|
|
+ # 计算文本大小
|
|
|
+ font = cv2.FONT_HERSHEY_SIMPLEX
|
|
|
+ font_scale = 0.5
|
|
|
+ text_thickness = 1
|
|
|
+ (text_width, text_height), baseline = cv2.getTextSize(label, font, font_scale, text_thickness)
|
|
|
+ # 在框的上方绘制文本背景
|
|
|
+ cv2.rectangle(vis_image, (x1, y1 - text_height - baseline - 2),
|
|
|
+ (x1 + text_width, y1), color, -1)
|
|
|
+ # 绘制文本
|
|
|
+ cv2.putText(vis_image, label, (x1, y1 - baseline - 1),
|
|
|
+ font, font_scale, (255, 255, 255), text_thickness)
|
|
|
+
|
|
|
+ # 保存对比图像
|
|
|
+ debug_dir = Path(self.output_dir) / "debug_comparison" / "layout_comparison"
|
|
|
+ debug_dir.mkdir(parents=True, exist_ok=True)
|
|
|
+ output_path = debug_dir / f"{self.page_name}_layout_comparison.jpg"
|
|
|
+ cv2.imwrite(str(output_path), vis_image)
|
|
|
+ logger.info(f"📊 Saved layout comparison image: {output_path}")
|
|
|
+
|
|
|
+ # 准备对比 JSON 数据
|
|
|
+ comparison_data = {
|
|
|
+ 'page_name': self.page_name,
|
|
|
+ 'best_model': best_model_name,
|
|
|
+ 'best_score': evaluations.get(best_model_name, {}).get('score', 0.0),
|
|
|
+ 'models': {}
|
|
|
+ }
|
|
|
+
|
|
|
+ # 收集每个模型的信息
|
|
|
+ for model_name, results in all_postprocessed_results.items():
|
|
|
+ eval_data = evaluations.get(model_name, {})
|
|
|
+ comparison_data['models'][model_name] = {
|
|
|
+ 'count': len(results),
|
|
|
+ 'score': eval_data.get('score', 0.0),
|
|
|
+ 'metrics': eval_data.get('metrics', {}),
|
|
|
+ 'is_best': model_name == best_model_name,
|
|
|
+ 'results': [
|
|
|
+ {
|
|
|
+ 'category': r.get('category'),
|
|
|
+ 'bbox': r.get('bbox'),
|
|
|
+ 'confidence': r.get('confidence', 0.0)
|
|
|
+ }
|
|
|
+ for r in results[:10] # 只保存前10个结果,避免JSON过大
|
|
|
+ ]
|
|
|
+ }
|
|
|
+
|
|
|
+ # 保存对比 JSON
|
|
|
+ json_path = debug_dir / f"{self.page_name}_layout_comparison.json"
|
|
|
+ with open(json_path, 'w', encoding='utf-8') as f:
|
|
|
+ json.dump(comparison_data, f, ensure_ascii=False, indent=2)
|
|
|
+ logger.info(f"📊 Saved layout comparison JSON: {json_path}")
|
|
|
+
|
|
|
+ except Exception as e:
|
|
|
+ logger.warning(f"⚠️ Failed to generate layout comparison: {e}")
|