瀏覽代碼

Merge pull request #3425 from opendatalab/release-2.2.0

Release 2.2.0
Xiaomeng Zhao 2 月之前
父節點
當前提交
a9f28b4436
共有 67 個文件被更改,包括 6690 次插入527 次删除
  1. 114 39
      README.md
  2. 114 39
      README_zh-CN.md
  3. 4 4
      docker/china/Dockerfile
  4. 2 2
      docker/global/Dockerfile
  5. 2 2
      docs/en/quick_start/docker_deployment.md
  6. 2 17
      docs/en/reference/output_files.md
  7. 1 1
      docs/en/usage/cli_tools.md
  8. 2 2
      docs/zh/quick_start/docker_deployment.md
  9. 2 17
      docs/zh/reference/output_files.md
  10. 1 1
      docs/zh/usage/cli_tools.md
  11. 176 65
      mineru/backend/pipeline/batch_analyze.py
  12. 84 18
      mineru/backend/pipeline/model_init.py
  13. 7 3
      mineru/backend/pipeline/model_json_to_middle_json.py
  14. 5 1
      mineru/backend/pipeline/model_list.py
  15. 4 4
      mineru/backend/pipeline/pipeline_analyze.py
  16. 14 2
      mineru/backend/pipeline/pipeline_middle_json_mkcontent.py
  17. 10 4
      mineru/backend/vlm/hf_predictor.py
  18. 11 2
      mineru/backend/vlm/token_to_middle_json.py
  19. 3 2
      mineru/backend/vlm/vlm_analyze.py
  20. 14 2
      mineru/backend/vlm/vlm_middle_json_mkcontent.py
  21. 1 1
      mineru/cli/client.py
  22. 5 1
      mineru/cli/common.py
  23. 122 39
      mineru/cli/fast_api.py
  24. 1 1
      mineru/cli/gradio_app.py
  25. 49 3
      mineru/model/layout/doclayoutyolo.py
  26. 55 2
      mineru/model/mfd/yolo_v8.py
  27. 2 2
      mineru/model/mfr/unimernet/Unimernet.py
  28. 8 4
      mineru/model/ocr/paddleocr2pytorch/pytorch_paddle.py
  29. 76 0
      mineru/model/ocr/paddleocr2pytorch/pytorchocr/utils/resources/arch_config.yaml
  30. 354 0
      mineru/model/ocr/paddleocr2pytorch/pytorchocr/utils/resources/dict/ppocrv5_el_dict.txt
  31. 436 0
      mineru/model/ocr/paddleocr2pytorch/pytorchocr/utils/resources/dict/ppocrv5_en_dict.txt
  32. 524 0
      mineru/model/ocr/paddleocr2pytorch/pytorchocr/utils/resources/dict/ppocrv5_th_dict.txt
  33. 14 2
      mineru/model/ocr/paddleocr2pytorch/pytorchocr/utils/resources/models_config.yml
  34. 2 2
      mineru/model/ocr/paddleocr2pytorch/tools/infer/predict_rec.py
  35. 1 0
      mineru/model/ori_cls/__init__.py
  36. 279 0
      mineru/model/ori_cls/paddle_ori_cls.py
  37. 1 0
      mineru/model/table/cls/__init__.py
  38. 148 0
      mineru/model/table/cls/paddle_table_cls.py
  39. 0 89
      mineru/model/table/rapid_table.py
  40. 154 0
      mineru/model/table/rec/RapidTable.py
  41. 1 0
      mineru/model/table/rec/__init__.py
  42. 0 0
      mineru/model/table/rec/slanet_plus/__init__.py
  43. 212 0
      mineru/model/table/rec/slanet_plus/main.py
  44. 198 0
      mineru/model/table/rec/slanet_plus/matcher.py
  45. 246 0
      mineru/model/table/rec/slanet_plus/matcher_utils.py
  46. 109 0
      mineru/model/table/rec/slanet_plus/table_structure.py
  47. 570 0
      mineru/model/table/rec/slanet_plus/table_structure_utils.py
  48. 0 0
      mineru/model/table/rec/unet_table/__init__.py
  49. 341 0
      mineru/model/table/rec/unet_table/main.py
  50. 214 0
      mineru/model/table/rec/unet_table/table_recover.py
  51. 206 0
      mineru/model/table/rec/unet_table/table_structure_unet.py
  52. 492 0
      mineru/model/table/rec/unet_table/utils.py
  53. 347 0
      mineru/model/table/rec/unet_table/utils_table_line_rec.py
  54. 311 0
      mineru/model/table/rec/unet_table/utils_table_recover.py
  55. 111 35
      mineru/utils/draw_bbox.py
  56. 9 1
      mineru/utils/enum_class.py
  57. 51 35
      mineru/utils/model_utils.py
  58. 4 3
      mineru/utils/ocr_utils.py
  59. 19 13
      mineru/utils/pdf_classify.py
  60. 29 6
      mineru/utils/pdf_image_tools.py
  61. 32 18
      mineru/utils/pdf_reader.py
  62. 338 0
      mineru/utils/table_merge.py
  63. 3 0
      mkdocs.yml
  64. 1 1
      projects/multi_gpu_v2/_config_endpoint.py
  65. 6 21
      pyproject.toml
  66. 二進制
      tests/unittest/pdfs/test.pdf
  67. 36 21
      tests/unittest/test_e2e.py

+ 114 - 39
README.md

@@ -43,48 +43,122 @@
 </div>
 
 # Changelog
-- 2025/08/01 2.1.10 Released
-  - Fixed an issue in the `pipeline` backend where block overlap caused the parsing results to deviate from expectations #3232
-- 2025/07/30 2.1.9 Released
-  - `transformers` 4.54.1 version adaptation
-- 2025/07/28 2.1.8 Released
-  - `sglang` 0.4.9.post5 version adaptation
-- 2025/07/27 2.1.7 Released
-  - `transformers` 4.54.0 version adaptation
-- 2025/07/26 2.1.6 Released
-  - Fixed table parsing issues in handwritten documents when using `vlm` backend
-  - Fixed visualization box position drift issue when document is rotated #3175
-- 2025/07/24 2.1.5 Released
-  - `sglang` 0.4.9 version adaptation, synchronously upgrading the dockerfile base image to sglang 0.4.9.post3
-- 2025/07/23 2.1.4 Released
-  - Bug Fixes
-    - Fixed the issue of excessive memory consumption during the `MFR` step in the `pipeline` backend under certain scenarios #2771
-    - Fixed the inaccurate matching between `image`/`table` and `caption`/`footnote` under certain conditions #3129
-- 2025/07/16 2.1.1 Released
-  - Bug fixes
-    - Fixed text block content loss issue that could occur in certain `pipeline` scenarios #3005
-    - Fixed issue where `sglang-client` required unnecessary packages like `torch` #2968
-    - Updated `dockerfile` to fix incomplete text content parsing due to missing fonts in Linux #2915
-  - Usability improvements
-    - Updated `compose.yaml` to facilitate direct startup of `sglang-server`, `mineru-api`, and `mineru-gradio` services
-    - Launched brand new [online documentation site](https://opendatalab.github.io/MinerU/), simplified readme, providing better documentation experience
-- 2025/07/05 Version 2.1.0 Released
-  - This is the first major update of MinerU 2, which includes a large number of new features and improvements, covering significant performance optimizations, user experience enhancements, and bug fixes. The detailed update contents are as follows:
-  - **Performance Optimizations:**
-    - Significantly improved preprocessing speed for documents with specific resolutions (around 2000 pixels on the long side).
-    - Greatly enhanced post-processing speed when the `pipeline` backend handles batch processing of documents with fewer pages (<10 pages).
-    - Layout analysis speed of the `pipeline` backend has been increased by approximately 20%.
-  - **Experience Enhancements:**
-    - Built-in ready-to-use `fastapi service` and `gradio webui`. For detailed usage instructions, please refer to [Documentation](https://opendatalab.github.io/MinerU/usage/quick_usage/#advanced-usage-via-api-webui-sglang-clientserver).
-    - Adapted to `sglang` version `0.4.8`, significantly reducing the GPU memory requirements for the `vlm-sglang` backend. It can now run on graphics cards with as little as `8GB GPU memory` (Turing architecture or newer).
-    - Added transparent parameter passing for all commands related to `sglang`, allowing the `sglang-engine` backend to receive all `sglang` parameters consistently with the `sglang-server`.
-    - Supports feature extensions based on configuration files, including `custom formula delimiters`, `enabling heading classification`, and `customizing local model directories`. For detailed usage instructions, please refer to [Documentation](https://opendatalab.github.io/MinerU/usage/quick_usage/#extending-mineru-functionality-with-configuration-files).
-  - **New Features:**
-    - Updated the `pipeline` backend with the PP-OCRv5 multilingual text recognition model, supporting text recognition in 37 languages such as French, Spanish, Portuguese, Russian, and Korean, with an average accuracy improvement of over 30%. [Details](https://paddlepaddle.github.io/PaddleOCR/latest/en/version3.x/algorithm/PP-OCRv5/PP-OCRv5_multi_languages.html)
-    - Introduced limited support for vertical text layout in the `pipeline` backend.
+
+- 2025/09/05 2.2.0 Released
+  - Major Updates
+    - In this version, we focused on improving table parsing accuracy by introducing a new [wired table recognition model](https://github.com/RapidAI/TableStructureRec) and a brand-new hybrid table structure parsing algorithm, significantly enhancing the table recognition capabilities of the `pipeline` backend.
+    - We also added support for cross-page table merging, which is supported by both `pipeline` and `vlm` backends, further improving the completeness and accuracy of table parsing.
+  - Other Updates
+    - The `pipeline` backend now supports 270-degree rotated table parsing, bringing support for table parsing in 0/90/270-degree orientations
+    - `pipeline` added OCR capability support for Thai and Greek, and updated the English OCR model to the latest version. English recognition accuracy improved by 11%, Thai recognition model accuracy is 82.68%, and Greek recognition model accuracy is 89.28% (by PPOCRv5)
+    - Added `bbox` field (mapped to 0-1000 range) in the output `content_list.json`, making it convenient for users to directly obtain position information for each content block
+
 
 <details>
   <summary>History Log</summary>
+
+  <details>
+    <summary>2025/08/01 2.1.10 Released</summary>
+    <ul>
+      <li>Fixed an issue in the <code>pipeline</code> backend where block overlap caused the parsing results to deviate from expectations #3232</li>
+    </ul>
+  </details>  
+
+  <details>
+    <summary>2025/07/30 2.1.9 Released</summary>
+    <ul>
+      <li><code>transformers</code> 4.54.1 version adaptation</li>
+    </ul>
+  </details>  
+
+  <details>
+    <summary>2025/07/28 2.1.8 Released</summary>
+    <ul>
+      <li><code>sglang</code> 0.4.9.post5 version adaptation</li>
+    </ul>
+  </details>  
+
+  <details>
+    <summary>2025/07/27 2.1.7 Released</summary>
+    <ul>
+      <li><code>transformers</code> 4.54.0 version adaptation</li>
+    </ul>
+  </details>  
+
+  <details>
+    <summary>2025/07/26 2.1.6 Released</summary>
+    <ul>
+      <li>Fixed table parsing issues in handwritten documents when using <code>vlm</code> backend</li>
+      <li>Fixed visualization box position drift issue when document is rotated #3175</li>
+    </ul>
+  </details>  
+
+  <details>
+    <summary>2025/07/24 2.1.5 Released</summary>
+    <ul>
+      <li><code>sglang</code> 0.4.9 version adaptation, synchronously upgrading the dockerfile base image to sglang 0.4.9.post3</li>
+    </ul>
+  </details>  
+
+  <details>
+    <summary>2025/07/23 2.1.4 Released</summary>
+    <ul>
+      <li><strong>Bug Fixes</strong>
+        <ul>
+          <li>Fixed the issue of excessive memory consumption during the <code>MFR</code> step in the <code>pipeline</code> backend under certain scenarios #2771</li>
+          <li>Fixed the inaccurate matching between <code>image</code>/<code>table</code> and <code>caption</code>/<code>footnote</code> under certain conditions #3129</li>
+        </ul>
+      </li>
+    </ul>
+  </details>  
+
+  <details>
+    <summary>2025/07/16 2.1.1 Released</summary>
+    <ul>
+      <li><strong>Bug fixes</strong>
+        <ul>
+          <li>Fixed text block content loss issue that could occur in certain <code>pipeline</code> scenarios #3005</li>
+          <li>Fixed issue where <code>sglang-client</code> required unnecessary packages like <code>torch</code> #2968</li>
+          <li>Updated <code>dockerfile</code> to fix incomplete text content parsing due to missing fonts in Linux #2915</li>
+        </ul>
+      </li>
+      <li><strong>Usability improvements</strong>
+        <ul>
+          <li>Updated <code>compose.yaml</code> to facilitate direct startup of <code>sglang-server</code>, <code>mineru-api</code>, and <code>mineru-gradio</code> services</li>
+          <li>Launched brand new <a href="https://opendatalab.github.io/MinerU/">online documentation site</a>, simplified readme, providing better documentation experience</li>
+        </ul>
+      </li>
+    </ul>
+  </details>  
+
+  <details>
+    <summary>2025/07/05 2.1.0 Released</summary>
+    <ul>
+      <li>This is the first major update of MinerU 2, which includes a large number of new features and improvements, covering significant performance optimizations, user experience enhancements, and bug fixes. The detailed update contents are as follows:</li>
+      <li><strong>Performance Optimizations:</strong>
+        <ul>
+          <li>Significantly improved preprocessing speed for documents with specific resolutions (around 2000 pixels on the long side).</li>
+          <li>Greatly enhanced post-processing speed when the <code>pipeline</code> backend handles batch processing of documents with fewer pages (&lt;10 pages).</li>
+          <li>Layout analysis speed of the <code>pipeline</code> backend has been increased by approximately 20%.</li>
+        </ul>
+      </li>
+      <li><strong>Experience Enhancements:</strong>
+        <ul>
+          <li>Built-in ready-to-use <code>fastapi service</code> and <code>gradio webui</code>. For detailed usage instructions, please refer to <a href="https://opendatalab.github.io/MinerU/usage/quick_usage/#advanced-usage-via-api-webui-sglang-clientserver">Documentation</a>.</li>
+          <li>Adapted to <code>sglang</code> version <code>0.4.8</code>, significantly reducing the GPU memory requirements for the <code>vlm-sglang</code> backend. It can now run on graphics cards with as little as <code>8GB GPU memory</code> (Turing architecture or newer).</li>
+          <li>Added transparent parameter passing for all commands related to <code>sglang</code>, allowing the <code>sglang-engine</code> backend to receive all <code>sglang</code> parameters consistently with the <code>sglang-server</code>.</li>
+          <li>Supports feature extensions based on configuration files, including <code>custom formula delimiters</code>, <code>enabling heading classification</code>, and <code>customizing local model directories</code>. For detailed usage instructions, please refer to <a href="https://opendatalab.github.io/MinerU/usage/quick_usage/#extending-mineru-functionality-with-configuration-files">Documentation</a>.</li>
+        </ul>
+      </li>
+      <li><strong>New Features:</strong>
+        <ul>
+          <li>Updated the <code>pipeline</code> backend with the PP-OCRv5 multilingual text recognition model, supporting text recognition in 37 languages such as French, Spanish, Portuguese, Russian, and Korean, with an average accuracy improvement of over 30%. <a href="https://paddlepaddle.github.io/PaddleOCR/latest/en/version3.x/algorithm/PP-OCRv5/PP-OCRv5_multi_languages.html">Details</a></li>
+          <li>Introduced limited support for vertical text layout in the <code>pipeline</code> backend.</li>
+        </ul>
+      </li>
+    </ul>
+  </details>
+
   <details>
     <summary>2025/06/20 2.0.6 Released</summary>
     <ul>
@@ -596,6 +670,7 @@ Currently, some models in this project are trained based on YOLO. However, since
 - [DocLayout-YOLO](https://github.com/opendatalab/DocLayout-YOLO)
 - [UniMERNet](https://github.com/opendatalab/UniMERNet)
 - [RapidTable](https://github.com/RapidAI/RapidTable)
+- [TableStructureRec](https://github.com/RapidAI/TableStructureRec)
 - [PaddleOCR](https://github.com/PaddlePaddle/PaddleOCR)
 - [PaddleOCR2Pytorch](https://github.com/frotms/PaddleOCR2Pytorch)
 - [layoutreader](https://github.com/ppaanngggg/layoutreader)

+ 114 - 39
README_zh-CN.md

@@ -43,48 +43,122 @@
 </div>
 
 # 更新记录
-- 2025/08/01 2.1.10 发布
-  - 修复`pipeline`后端因block覆盖导致的解析结果与预期不符  #3232
-- 2025/07/30 2.1.9 发布
-  - `transformers` 4.54.1 版本适配
-- 2025/07/28 2.1.8 发布
-  - `sglang` 0.4.9.post5 版本适配
-- 2025/07/27 2.1.7 发布
-  - `transformers` 4.54.0 版本适配
-- 2025/07/26 2.1.6 发布
-  - 修复`vlm`后端解析部分手写文档时的表格异常问题
-  - 修复文档旋转时可视化框位置漂移问题 #3175
-- 2025/07/24 2.1.5 发布
-  - `sglang` 0.4.9 版本适配,同步升级dockerfile基础镜像为sglang 0.4.9.post3
-- 2025/07/23 2.1.4 发布
-  - bug修复
-    - 修复`pipeline`后端中`MFR`步骤在某些情况下显存消耗过大的问题 #2771
-    - 修复某些情况下`image`/`table`与`caption`/`footnote`匹配不准确的问题 #3129
-- 2025/07/16 2.1.1 发布
-  - bug修复 
-    - 修复`pipeline`在某些情况可能发生的文本块内容丢失问题 #3005
-    - 修复`sglang-client`需要安装`torch`等不必要的包的问题 #2968
-    - 更新`dockerfile`以修复linux字体缺失导致的解析文本内容不完整问题 #2915
-  - 易用性更新
-    - 更新`compose.yaml`,便于用户直接启动`sglang-server`、`mineru-api`、`mineru-gradio`服务
-    - 启用全新的[在线文档站点](https://opendatalab.github.io/MinerU/zh/),简化readme,提供更好的文档体验
-- 2025/07/05 2.1.0 发布
-  - 这是 MinerU 2 的第一个大版本更新,包含了大量新功能和改进,包含众多性能优化、体验优化和bug修复,具体更新内容如下: 
-  - 性能优化: 
-    - 大幅提升某些特定分辨率(长边2000像素左右)文档的预处理速度
-    - 大幅提升`pipeline`后端批量处理大量页数较少(<10)文档时的后处理速度
-    - `pipeline`后端的layout分析速度提升约20%
-  - 体验优化:
-    - 内置开箱即用的`fastapi服务`和`gradio webui`,详细使用方法请参考[文档](https://opendatalab.github.io/MinerU/zh/usage/quick_usage/#apiwebuisglang-clientserver)
-    - `sglang`适配`0.4.8`版本,大幅降低`vlm-sglang`后端的显存要求,最低可在`8G显存`(Turing及以后架构)的显卡上运行
-    - 对所有命令增加`sglang`的参数透传,使得`sglang-engine`后端可以与`sglang-server`一致,接收`sglang`的所有参数
-    - 支持基于配置文件的功能扩展,包含`自定义公式标识符`、`开启标题分级功能`、`自定义本地模型目录`,详细使用方法请参考[文档](https://opendatalab.github.io/MinerU/zh/usage/quick_usage/#mineru_1)
-  - 新特性:  
-    - `pipeline`后端更新 PP-OCRv5 多语种文本识别模型,支持法语、西班牙语、葡萄牙语、俄语、韩语等 37 种语言的文字识别,平均精度涨幅超30%。[详情](https://paddlepaddle.github.io/PaddleOCR/latest/version3.x/algorithm/PP-OCRv5/PP-OCRv5_multi_languages.html)
-    - `pipeline`后端增加对竖排文本的有限支持
+
+- 2025/09/05 2.2.0 发布
+  - 主要更新
+    - 在这个版本我们重点提升了表格的解析精度,通过引入新的[有线表识别模型](https://github.com/RapidAI/TableStructureRec)和全新的混合表格结构解析算法,显著提升了`pipeline`后端的表格识别能力。
+    - 另外我们增加了对跨页表格合并的支持,这一功能同时支持`pipeline`和`vlm`后端,进一步提升了表格解析的完整性和准确性。
+  - 其他更新
+    - `pipeline`后端增加270度旋转的表格解析能力,现已支持0/90/270度三个方向的表格解析
+    - `pipeline`增加对泰文、希腊文的ocr能力支持,并更新了英文ocr模型至最新,英文识别精度提升11%,泰文识别模型精度 82.68%,希腊文识别模型精度 89.28%(by PPOCRv5)
+    - 在输出的`content_list.json`中增加了`bbox`字段(映射至0-1000范围内),方便用户直接获取每个内容块的位置信息
+
 
 <details>
   <summary>历史日志</summary>
+
+  <details>
+    <summary>2025/08/01 2.1.10 发布</summary>
+    <ul>
+      <li>修复<code>pipeline</code>后端因block覆盖导致的解析结果与预期不符 #3232</li>
+    </ul>
+  </details>
+
+  <details>
+    <summary>2025/07/30 2.1.9 发布</summary>
+    <ul>
+      <li><code>transformers</code> 4.54.1 版本适配</li>
+    </ul>
+  </details>
+
+  <details>
+    <summary>2025/07/28 2.1.8 发布</summary>
+    <ul>
+      <li><code>sglang</code> 0.4.9.post5 版本适配</li>
+    </ul>
+  </details>
+
+  <details>
+    <summary>2025/07/27 2.1.7 发布</summary>
+    <ul>
+      <li><code>transformers</code> 4.54.0 版本适配</li>
+    </ul>
+  </details>
+
+  <details>
+    <summary>2025/07/26 2.1.6 发布</summary>
+    <ul>
+      <li>修复<code>vlm</code>后端解析部分手写文档时的表格异常问题</li>
+      <li>修复文档旋转时可视化框位置漂移问题 #3175</li>
+    </ul>
+  </details>
+
+  <details>
+    <summary>2025/07/24 2.1.5 发布</summary>
+    <ul>
+      <li><code>sglang</code> 0.4.9 版本适配,同步升级dockerfile基础镜像为sglang 0.4.9.post3</li>
+    </ul>
+  </details>
+
+  <details>
+    <summary>2025/07/23 2.1.4 发布</summary>
+    <ul>
+      <li><strong>bug修复</strong>
+        <ul>
+          <li>修复<code>pipeline</code>后端中<code>MFR</code>步骤在某些情况下显存消耗过大的问题 #2771</li>
+          <li>修复某些情况下<code>image</code>/<code>table</code>与<code>caption</code>/<code>footnote</code>匹配不准确的问题 #3129</li>
+        </ul>
+      </li>
+    </ul>
+  </details>
+
+  <details>
+    <summary>2025/07/16 2.1.1 发布</summary>
+    <ul>
+      <li><strong>bug修复</strong>
+        <ul>
+          <li>修复<code>pipeline</code>在某些情况可能发生的文本块内容丢失问题 #3005</li>
+          <li>修复<code>sglang-client</code>需要安装<code>torch</code>等不必要的包的问题 #2968</li>
+          <li>更新<code>dockerfile</code>以修复linux字体缺失导致的解析文本内容不完整问题 #2915</li>
+        </ul>
+      </li>
+      <li><strong>易用性更新</strong>
+        <ul>
+          <li>更新<code>compose.yaml</code>,便于用户直接启动<code>sglang-server</code>、<code>mineru-api</code>、<code>mineru-gradio</code>服务</li>
+          <li>启用全新的<a href="https://opendatalab.github.io/MinerU/zh/">在线文档站点</a>,简化readme,提供更好的文档体验</li>
+        </ul>
+      </li>
+    </ul>
+  </details>
+
+  <details>
+    <summary>2025/07/05 2.1.0 发布</summary>
+    <p>这是 MinerU 2 的第一个大版本更新,包含了大量新功能和改进,包含众多性能优化、体验优化和bug修复,具体更新内容如下:</p>
+    <ul>
+      <li><strong>性能优化:</strong>
+        <ul>
+          <li>大幅提升某些特定分辨率(长边2000像素左右)文档的预处理速度</li>
+          <li>大幅提升<code>pipeline</code>后端批量处理大量页数较少(&lt;10)文档时的后处理速度</li>
+          <li><code>pipeline</code>后端的layout分析速度提升约20%</li>
+        </ul>
+      </li>
+      <li><strong>体验优化:</strong>
+        <ul>
+          <li>内置开箱即用的<code>fastapi服务</code>和<code>gradio webui</code>,详细使用方法请参考<a href="https://opendatalab.github.io/MinerU/zh/usage/quick_usage/#apiwebuisglang-clientserver">文档</a></li>
+          <li><code>sglang</code>适配<code>0.4.8</code>版本,大幅降低<code>vlm-sglang</code>后端的显存要求,最低可在<code>8G显存</code>(Turing及以后架构)的显卡上运行</li>
+          <li>对所有命令增加<code>sglang</code>的参数透传,使得<code>sglang-engine</code>后端可以与<code>sglang-server</code>一致,接收<code>sglang</code>的所有参数</li>
+          <li>支持基于配置文件的功能扩展,包含<code>自定义公式标识符</code>、<code>开启标题分级功能</code>、<code>自定义本地模型目录</code>,详细使用方法请参考<a href="https://opendatalab.github.io/MinerU/zh/usage/quick_usage/#mineru_1">文档</a></li>
+        </ul>
+      </li>
+      <li><strong>新特性:</strong>
+        <ul>
+          <li><code>pipeline</code>后端更新 PP-OCRv5 多语种文本识别模型,支持法语、西班牙语、葡萄牙语、俄语、韩语等 37 种语言的文字识别,平均精度涨幅超30%。<a href="https://paddlepaddle.github.io/PaddleOCR/latest/version3.x/algorithm/PP-OCRv5/PP-OCRv5_multi_languages.html">详情</a></li>
+          <li><code>pipeline</code>后端增加对竖排文本的有限支持</li>
+        </ul>
+      </li>
+    </ul>
+  </details>
+
   <details>
     <summary>2025/06/20 2.0.6发布</summary>
     <ul>
@@ -584,6 +658,7 @@ mineru -p <input_path> -o <output_path>
 - [DocLayout-YOLO](https://github.com/opendatalab/DocLayout-YOLO)
 - [UniMERNet](https://github.com/opendatalab/UniMERNet)
 - [RapidTable](https://github.com/RapidAI/RapidTable)
+- [TableStructureRec](https://github.com/RapidAI/TableStructureRec)
 - [PaddleOCR](https://github.com/PaddlePaddle/PaddleOCR)
 - [PaddleOCR2Pytorch](https://github.com/frotms/PaddleOCR2Pytorch)
 - [layoutreader](https://github.com/ppaanngggg/layoutreader)

+ 4 - 4
docker/china/Dockerfile

@@ -1,12 +1,12 @@
 # Use DaoCloud mirrored sglang image for China region
-FROM docker.m.daocloud.io/lmsysorg/sglang:v0.4.9.post6-cu126
+FROM docker.m.daocloud.io/lmsysorg/sglang:v0.4.10.post2-cu126
 # For blackwell GPU, use the following line instead:
-# FROM docker.m.daocloud.io/lmsysorg/sglang:v0.4.9.post6-cu128-b200
+# FROM docker.m.daocloud.io/lmsysorg/sglang:v0.4.10.post2-cu128-b200
 
 # Use the official sglang image
-# FROM lmsysorg/sglang:v0.4.9.post6-cu126
+# FROM lmsysorg/sglang:v0.4.10.post2-cu126
 # For blackwell GPU, use the following line instead:
-# FROM lmsysorg/sglang:v0.4.9.post6-cu128-b200
+# FROM lmsysorg/sglang:v0.4.10.post2-cu128-b200
 
 # Install libgl for opencv support & Noto fonts for Chinese characters
 RUN apt-get update && \

+ 2 - 2
docker/global/Dockerfile

@@ -1,7 +1,7 @@
 # Use the official sglang image
-FROM lmsysorg/sglang:v0.4.9.post6-cu126
+FROM lmsysorg/sglang:v0.4.10.post2-cu126
 # For blackwell GPU, use the following line instead:
-# FROM lmsysorg/sglang:v0.4.9.post6-cu128-b200
+# FROM lmsysorg/sglang:v0.4.10.post2-cu128-b200
 
 # Install libgl for opencv support & Noto fonts for Chinese characters
 RUN apt-get update && \

+ 2 - 2
docs/en/quick_start/docker_deployment.md

@@ -10,8 +10,8 @@ docker build -t mineru-sglang:latest -f Dockerfile .
 ```
 
 > [!TIP]
-> The [Dockerfile](https://github.com/opendatalab/MinerU/blob/master/docker/global/Dockerfile) uses `lmsysorg/sglang:v0.4.9.post6-cu126` as the base image by default, supporting Turing/Ampere/Ada Lovelace/Hopper platforms.
-> If you are using the newer `Blackwell` platform, please modify the base image to `lmsysorg/sglang:v0.4.9.post6-cu128-b200` before executing the build operation.
+> The [Dockerfile](https://github.com/opendatalab/MinerU/blob/master/docker/global/Dockerfile) uses `lmsysorg/sglang:v0.4.10.post2-cu126` as the base image by default, supporting Turing/Ampere/Ada Lovelace/Hopper platforms.
+> If you are using the newer `Blackwell` platform, please modify the base image to `lmsysorg/sglang:v0.4.10.post2-cu128-b200` before executing the build operation.
 
 ## Docker Description
 

File diff suppressed because it is too large
+ 2 - 17
docs/en/reference/output_files.md


+ 1 - 1
docs/en/usage/cli_tools.md

@@ -13,7 +13,7 @@ Options:
   -m, --method [auto|txt|ocr]     Parsing method: auto (default), txt, ocr (pipeline backend only)
   -b, --backend [pipeline|vlm-transformers|vlm-sglang-engine|vlm-sglang-client]
                                   Parsing backend (default: pipeline)
-  -l, --lang [ch|ch_server|ch_lite|en|korean|japan|chinese_cht|ta|te|ka|latin|arabic|east_slavic|cyrillic|devanagari]
+  -l, --lang [ch|ch_server|ch_lite|en|korean|japan|chinese_cht|ta|te|ka|th|el|latin|arabic|east_slavic|cyrillic|devanagari]
                                   Specify document language (improves OCR accuracy, pipeline backend only)
   -u, --url TEXT                  Service address when using sglang-client
   -s, --start INTEGER             Starting page number for parsing (0-based)

+ 2 - 2
docs/zh/quick_start/docker_deployment.md

@@ -10,8 +10,8 @@ docker build -t mineru-sglang:latest -f Dockerfile .
 ```
 
 > [!TIP]
-> [Dockerfile](https://github.com/opendatalab/MinerU/blob/master/docker/china/Dockerfile)默认使用`lmsysorg/sglang:v0.4.9.post6-cu126`作为基础镜像,支持Turing/Ampere/Ada Lovelace/Hopper平台,
-> 如您使用较新的`Blackwell`平台,请将基础镜像修改为`lmsysorg/sglang:v0.4.9.post6-cu128-b200` 再执行build操作。
+> [Dockerfile](https://github.com/opendatalab/MinerU/blob/master/docker/china/Dockerfile)默认使用`lmsysorg/sglang:v0.4.10.post2-cu126`作为基础镜像,支持Turing/Ampere/Ada Lovelace/Hopper平台,
+> 如您使用较新的`Blackwell`平台,请将基础镜像修改为`lmsysorg/sglang:v0.4.10.post2-cu128-b200` 再执行build操作。
 
 ## Docker说明
 

File diff suppressed because it is too large
+ 2 - 17
docs/zh/reference/output_files.md


+ 1 - 1
docs/zh/usage/cli_tools.md

@@ -13,7 +13,7 @@ Options:
   -m, --method [auto|txt|ocr]     解析方法:auto(默认)、txt、ocr(仅用于 pipeline 后端)
   -b, --backend [pipeline|vlm-transformers|vlm-sglang-engine|vlm-sglang-client]
                                   解析后端(默认为 pipeline)
-  -l, --lang [ch|ch_server|ch_lite|en|korean|japan|chinese_cht|ta|te|ka|latin|arabic|east_slavic|cyrillic|devanagari]
+  -l, --lang [ch|ch_server|ch_lite|en|korean|japan|chinese_cht|ta|te|ka|th|el|latin|arabic|east_slavic|cyrillic|devanagari]
                                   指定文档语言(可提升 OCR 准确率,仅用于 pipeline 后端)
   -u, --url TEXT                  当使用 sglang-client 时,需指定服务地址
   -s, --start INTEGER             开始解析的页码(从 0 开始)

+ 176 - 65
mineru/backend/pipeline/batch_analyze.py

@@ -1,3 +1,5 @@
+import html
+
 import cv2
 from loguru import logger
 from tqdm import tqdm
@@ -5,14 +7,19 @@ from collections import defaultdict
 import numpy as np
 
 from .model_init import AtomModelSingleton
+from .model_list import AtomicModel
 from ...utils.config_reader import get_formula_enable, get_table_enable
-from ...utils.model_utils import crop_img, get_res_list_from_layout_res
-from ...utils.ocr_utils import get_adjusted_mfdetrec_res, get_ocr_result_list, OcrConfidence
+from ...utils.model_utils import crop_img, get_res_list_from_layout_res, clean_vram
+from ...utils.ocr_utils import merge_det_boxes, update_det_boxes, sorted_boxes
+from ...utils.ocr_utils import get_adjusted_mfdetrec_res, get_ocr_result_list, OcrConfidence, get_rotate_crop_image
+from ...utils.pdf_image_tools import get_crop_np_img
 
-YOLO_LAYOUT_BASE_BATCH_SIZE = 8
+YOLO_LAYOUT_BASE_BATCH_SIZE = 1
 MFD_BASE_BATCH_SIZE = 1
 MFR_BASE_BATCH_SIZE = 16
 OCR_DET_BASE_BATCH_SIZE = 16
+TABLE_ORI_CLS_BATCH_SIZE = 16
+TABLE_Wired_Wireless_CLS_BATCH_SIZE = 16
 
 
 class BatchAnalyze:
@@ -36,44 +43,42 @@ class BatchAnalyze:
         )
         atom_model_manager = AtomModelSingleton()
 
-        images = [image for image, _, _ in images_with_extra_info]
+        pil_images = [image for image, _, _ in images_with_extra_info]
 
-        # doclayout_yolo
-        layout_images = []
-        for image_index, image in enumerate(images):
-            layout_images.append(image)
+        np_images = [np.asarray(image) for image, _, _ in images_with_extra_info]
 
+        # doclayout_yolo
 
         images_layout_res += self.model.layout_model.batch_predict(
-            layout_images, YOLO_LAYOUT_BASE_BATCH_SIZE
+            pil_images, YOLO_LAYOUT_BASE_BATCH_SIZE
         )
 
         if self.formula_enable:
             # 公式检测
             images_mfd_res = self.model.mfd_model.batch_predict(
-                images, MFD_BASE_BATCH_SIZE
+                np_images, MFD_BASE_BATCH_SIZE
             )
 
             # 公式识别
             images_formula_list = self.model.mfr_model.batch_predict(
                 images_mfd_res,
-                images,
+                np_images,
                 batch_size=self.batch_ratio * MFR_BASE_BATCH_SIZE,
             )
             mfr_count = 0
-            for image_index in range(len(images)):
+            for image_index in range(len(np_images)):
                 images_layout_res[image_index] += images_formula_list[image_index]
                 mfr_count += len(images_formula_list[image_index])
 
         # 清理显存
-        # clean_vram(self.model.device, vram_threshold=8)
+        clean_vram(self.model.device, vram_threshold=8)
 
         ocr_res_list_all_page = []
         table_res_list_all_page = []
-        for index in range(len(images)):
+        for index in range(len(np_images)):
             _, ocr_enable, _lang = images_with_extra_info[index]
             layout_res = images_layout_res[index]
-            pil_img = images[index]
+            np_img = np_images[index]
 
             ocr_res_list, table_res_list, single_page_mfdetrec_res = (
                 get_res_list_from_layout_res(layout_res)
@@ -82,19 +87,150 @@ class BatchAnalyze:
             ocr_res_list_all_page.append({'ocr_res_list':ocr_res_list,
                                           'lang':_lang,
                                           'ocr_enable':ocr_enable,
-                                          'pil_img':pil_img,
+                                          'np_img':np_img,
                                           'single_page_mfdetrec_res':single_page_mfdetrec_res,
                                           'layout_res':layout_res,
                                           })
 
             for table_res in table_res_list:
-                table_img, _ = crop_img(table_res, pil_img)
+                def get_crop_table_img(scale):
+                    crop_xmin, crop_ymin = int(table_res['poly'][0]), int(table_res['poly'][1])
+                    crop_xmax, crop_ymax = int(table_res['poly'][4]), int(table_res['poly'][5])
+                    bbox = (int(crop_xmin / scale), int(crop_ymin / scale), int(crop_xmax / scale), int(crop_ymax / scale))
+                    return get_crop_np_img(bbox, np_img, scale=scale)
+
+                wireless_table_img = get_crop_table_img(scale = 1)
+                wired_table_img = get_crop_table_img(scale = 10/3)
+
                 table_res_list_all_page.append({'table_res':table_res,
                                                 'lang':_lang,
-                                                'table_img':table_img,
+                                                'table_img':wireless_table_img,
+                                                'wired_table_img':wired_table_img,
                                               })
 
-        # OCR检测处理
+        # 表格识别 table recognition
+        if self.table_enable:
+
+            # 图片旋转批量处理
+            img_orientation_cls_model = atom_model_manager.get_atom_model(
+                atom_model_name=AtomicModel.ImgOrientationCls,
+            )
+            try:
+                img_orientation_cls_model.batch_predict(table_res_list_all_page,
+                                                        det_batch_size=self.batch_ratio * OCR_DET_BASE_BATCH_SIZE,
+                                                        batch_size=TABLE_ORI_CLS_BATCH_SIZE)
+            except Exception as e:
+                logger.warning(
+                    f"Image orientation classification failed: {e}, using original image"
+                )
+
+            # 表格分类
+            table_cls_model = atom_model_manager.get_atom_model(
+                atom_model_name=AtomicModel.TableCls,
+            )
+            try:
+                table_cls_model.batch_predict(table_res_list_all_page,
+                                              batch_size=TABLE_Wired_Wireless_CLS_BATCH_SIZE)
+            except Exception as e:
+                logger.warning(
+                    f"Table classification failed: {e}, using default model"
+                )
+
+            # OCR det 过程,顺序执行
+            rec_img_lang_group = defaultdict(list)
+            det_ocr_engine = atom_model_manager.get_atom_model(
+                atom_model_name=AtomicModel.OCR,
+                det_db_box_thresh=0.5,
+                det_db_unclip_ratio=1.6,
+                enable_merge_det_boxes=False,
+            )
+            for index, table_res_dict in enumerate(
+                    tqdm(table_res_list_all_page, desc="Table-ocr det")
+            ):
+                bgr_image = cv2.cvtColor(table_res_dict["table_img"], cv2.COLOR_RGB2BGR)
+                ocr_result = det_ocr_engine.ocr(bgr_image, rec=False)[0]
+                # 构造需要 OCR 识别的图片字典,包括cropped_img, dt_box, table_id,并按照语言进行分组
+                for dt_box in ocr_result:
+                    rec_img_lang_group[_lang].append(
+                        {
+                            "cropped_img": get_rotate_crop_image(
+                                bgr_image, np.asarray(dt_box, dtype=np.float32)
+                            ),
+                            "dt_box": np.asarray(dt_box, dtype=np.float32),
+                            "table_id": index,
+                        }
+                    )
+
+            # OCR rec,按照语言分批处理
+            for _lang, rec_img_list in rec_img_lang_group.items():
+                ocr_engine = atom_model_manager.get_atom_model(
+                    atom_model_name=AtomicModel.OCR,
+                    det_db_box_thresh=0.5,
+                    det_db_unclip_ratio=1.6,
+                    lang=_lang,
+                    enable_merge_det_boxes=False,
+                )
+                cropped_img_list = [item["cropped_img"] for item in rec_img_list]
+                ocr_res_list = ocr_engine.ocr(cropped_img_list, det=False, tqdm_enable=True, tqdm_desc=f"Table-ocr rec {_lang}")[0]
+                # 按照 table_id 将识别结果进行回填
+                for img_dict, ocr_res in zip(rec_img_list, ocr_res_list):
+                    if table_res_list_all_page[img_dict["table_id"]].get("ocr_result"):
+                        table_res_list_all_page[img_dict["table_id"]]["ocr_result"].append(
+                            [img_dict["dt_box"], html.escape(ocr_res[0]), ocr_res[1]]
+                        )
+                    else:
+                        table_res_list_all_page[img_dict["table_id"]]["ocr_result"] = [
+                            [img_dict["dt_box"], html.escape(ocr_res[0]), ocr_res[1]]
+                        ]
+
+            clean_vram(self.model.device, vram_threshold=8)
+
+            # 先对所有表格使用无线表格模型,然后对分类为有线的表格使用有线表格模型
+            wireless_table_model = atom_model_manager.get_atom_model(
+                atom_model_name=AtomicModel.WirelessTable,
+            )
+            wireless_table_model.batch_predict(table_res_list_all_page)
+
+            # 单独拿出有线表格进行预测
+            wired_table_res_list = []
+            for table_res_dict in table_res_list_all_page:
+                # logger.debug(f"Table classification result: {table_res_dict["table_res"]["cls_label"]} with confidence {table_res_dict["table_res"]["cls_score"]}")
+                if (
+                    (table_res_dict["table_res"]["cls_label"] == AtomicModel.WirelessTable and table_res_dict["table_res"]["cls_score"] < 0.9)
+                    or table_res_dict["table_res"]["cls_label"] == AtomicModel.WiredTable
+                ):
+                    wired_table_res_list.append(table_res_dict)
+                del table_res_dict["table_res"]["cls_label"]
+                del table_res_dict["table_res"]["cls_score"]
+            if wired_table_res_list:
+                for table_res_dict in tqdm(
+                        wired_table_res_list, desc="Table-wired Predict"
+                ):
+                    if not table_res_dict.get("ocr_result", None):
+                        continue
+
+                    wired_table_model = atom_model_manager.get_atom_model(
+                        atom_model_name=AtomicModel.WiredTable,
+                        lang=table_res_dict["lang"],
+                    )
+                    table_res_dict["table_res"]["html"] = wired_table_model.predict(
+                        table_res_dict["wired_table_img"],
+                        table_res_dict["ocr_result"],
+                        table_res_dict["table_res"].get("html", None)
+                    )
+
+            # 表格格式清理
+            for table_res_dict in table_res_list_all_page:
+                html_code = table_res_dict["table_res"].get("html", "")
+
+                # 检查html_code是否包含'<table>'和'</table>'
+                if "<table>" in html_code and "</table>" in html_code:
+                    # 选用<table>到</table>的内容,放入table_res_dict['table_res']['html']
+                    start_index = html_code.find("<table>")
+                    end_index = html_code.rfind("</table>") + len("</table>")
+                    table_res_dict["table_res"]["html"] = html_code[start_index:end_index]
+
+        # OCR det
         if self.enable_ocr_det_batch:
             # 批处理模式 - 按语言和分辨率分组
             # 收集所有需要OCR检测的裁剪图像
@@ -105,17 +241,17 @@ class BatchAnalyze:
 
                 for res in ocr_res_list_dict['ocr_res_list']:
                     new_image, useful_list = crop_img(
-                        res, ocr_res_list_dict['pil_img'], crop_paste_x=50, crop_paste_y=50
+                        res, ocr_res_list_dict['np_img'], crop_paste_x=50, crop_paste_y=50
                     )
                     adjusted_mfdetrec_res = get_adjusted_mfdetrec_res(
                         ocr_res_list_dict['single_page_mfdetrec_res'], useful_list
                     )
 
                     # BGR转换
-                    new_image = cv2.cvtColor(np.asarray(new_image), cv2.COLOR_RGB2BGR)
+                    bgr_image = cv2.cvtColor(new_image, cv2.COLOR_RGB2BGR)
 
                     all_cropped_images_info.append((
-                        new_image, useful_list, ocr_res_list_dict, res, adjusted_mfdetrec_res, _lang
+                        bgr_image, useful_list, ocr_res_list_dict, res, adjusted_mfdetrec_res, _lang
                     ))
 
             # 按语言分组
@@ -133,20 +269,23 @@ class BatchAnalyze:
 
                 # 获取OCR模型
                 ocr_model = atom_model_manager.get_atom_model(
-                    atom_model_name='ocr',
+                    atom_model_name=AtomicModel.OCR,
                     det_db_box_thresh=0.3,
                     lang=lang
                 )
 
                 # 按分辨率分组并同时完成padding
+                # RESOLUTION_GROUP_STRIDE = 32
+                RESOLUTION_GROUP_STRIDE = 64  # 定义分辨率分组的步进值
+
                 resolution_groups = defaultdict(list)
                 for crop_info in lang_crop_list:
                     cropped_img = crop_info[0]
                     h, w = cropped_img.shape[:2]
                     # 使用更大的分组容差,减少分组数量
                     # 将尺寸标准化到32的倍数
-                    normalized_h = ((h + 32) // 32) * 32  # 向上取整到32的倍数
-                    normalized_w = ((w + 32) // 32) * 32
+                    normalized_h = ((h + RESOLUTION_GROUP_STRIDE) // RESOLUTION_GROUP_STRIDE) * RESOLUTION_GROUP_STRIDE  # 向上取整到32的倍数
+                    normalized_w = ((w + RESOLUTION_GROUP_STRIDE) // RESOLUTION_GROUP_STRIDE) * RESOLUTION_GROUP_STRIDE
                     group_key = (normalized_h, normalized_w)
                     resolution_groups[group_key].append(crop_info)
 
@@ -156,8 +295,8 @@ class BatchAnalyze:
                     # 计算目标尺寸(组内最大尺寸,向上取整到32的倍数)
                     max_h = max(crop_info[0].shape[0] for crop_info in group_crops)
                     max_w = max(crop_info[0].shape[1] for crop_info in group_crops)
-                    target_h = ((max_h + 32 - 1) // 32) * 32
-                    target_w = ((max_w + 32 - 1) // 32) * 32
+                    target_h = ((max_h + RESOLUTION_GROUP_STRIDE - 1) // RESOLUTION_GROUP_STRIDE) * RESOLUTION_GROUP_STRIDE
+                    target_w = ((max_w + RESOLUTION_GROUP_STRIDE - 1) // RESOLUTION_GROUP_STRIDE) * RESOLUTION_GROUP_STRIDE
 
                     # 对所有图像进行padding到统一尺寸
                     batch_images = []
@@ -177,13 +316,10 @@ class BatchAnalyze:
 
                     # 处理批处理结果
                     for i, (crop_info, (dt_boxes, elapse)) in enumerate(zip(group_crops, batch_results)):
-                        new_image, useful_list, ocr_res_list_dict, res, adjusted_mfdetrec_res, _lang = crop_info
+                        bgr_image, useful_list, ocr_res_list_dict, res, adjusted_mfdetrec_res, _lang = crop_info
 
                         if dt_boxes is not None and len(dt_boxes) > 0:
                             # 直接应用原始OCR流程中的关键处理步骤
-                            from mineru.utils.ocr_utils import (
-                                merge_det_boxes, update_det_boxes, sorted_boxes
-                            )
 
                             # 1. 排序检测框
                             if len(dt_boxes) > 0:
@@ -208,7 +344,7 @@ class BatchAnalyze:
 
                             if ocr_res:
                                 ocr_result_list = get_ocr_result_list(
-                                    ocr_res, useful_list, ocr_res_list_dict['ocr_enable'], new_image, _lang
+                                    ocr_res, useful_list, ocr_res_list_dict['ocr_enable'], bgr_image, _lang
                                 )
 
                                 ocr_res_list_dict['layout_res'].extend(ocr_result_list)
@@ -219,58 +355,33 @@ class BatchAnalyze:
                 _lang = ocr_res_list_dict['lang']
                 # Get OCR results for this language's images
                 ocr_model = atom_model_manager.get_atom_model(
-                    atom_model_name='ocr',
+                    atom_model_name=AtomicModel.OCR,
                     ocr_show_log=False,
                     det_db_box_thresh=0.3,
                     lang=_lang
                 )
                 for res in ocr_res_list_dict['ocr_res_list']:
                     new_image, useful_list = crop_img(
-                        res, ocr_res_list_dict['pil_img'], crop_paste_x=50, crop_paste_y=50
+                        res, ocr_res_list_dict['np_img'], crop_paste_x=50, crop_paste_y=50
                     )
                     adjusted_mfdetrec_res = get_adjusted_mfdetrec_res(
                         ocr_res_list_dict['single_page_mfdetrec_res'], useful_list
                     )
                     # OCR-det
-                    new_image = cv2.cvtColor(np.asarray(new_image), cv2.COLOR_RGB2BGR)
+                    bgr_image = cv2.cvtColor(new_image, cv2.COLOR_RGB2BGR)
                     ocr_res = ocr_model.ocr(
-                        new_image, mfd_res=adjusted_mfdetrec_res, rec=False
+                        bgr_image, mfd_res=adjusted_mfdetrec_res, rec=False
                     )[0]
 
                     # Integration results
                     if ocr_res:
                         ocr_result_list = get_ocr_result_list(
-                            ocr_res, useful_list, ocr_res_list_dict['ocr_enable'],new_image, _lang
+                            ocr_res, useful_list, ocr_res_list_dict['ocr_enable'],bgr_image, _lang
                         )
 
                         ocr_res_list_dict['layout_res'].extend(ocr_result_list)
 
-        # 表格识别 table recognition
-        if self.table_enable:
-            for table_res_dict in tqdm(table_res_list_all_page, desc="Table Predict"):
-                _lang = table_res_dict['lang']
-                table_model = atom_model_manager.get_atom_model(
-                    atom_model_name='table',
-                    lang=_lang,
-                )
-                html_code, table_cell_bboxes, logic_points, elapse = table_model.predict(table_res_dict['table_img'])
-                # 判断是否返回正常
-                if html_code:
-                    # 检查html_code是否包含'<table>'和'</table>'
-                    if '<table>' in html_code and '</table>' in html_code:
-                        # 选用<table>到</table>的内容,放入table_res_dict['table_res']['html']
-                        start_index = html_code.find('<table>')
-                        end_index = html_code.rfind('</table>') + len('</table>')
-                        table_res_dict['table_res']['html'] = html_code[start_index:end_index]
-                    else:
-                        logger.warning(
-                            'table recognition processing fails, not found expected HTML table end'
-                        )
-                else:
-                    logger.warning(
-                        'table recognition processing fails, not get html return'
-                    )
-
+        # OCR rec
         # Create dictionaries to store items by language
         need_ocr_lists_by_lang = {}  # Dict of lists for each language
         img_crop_lists_by_lang = {}  # Dict of lists for each language
@@ -305,7 +416,7 @@ class BatchAnalyze:
                     # Get OCR results for this language's images
 
                     ocr_model = atom_model_manager.get_atom_model(
-                        atom_model_name='ocr',
+                        atom_model_name=AtomicModel.OCR,
                         det_db_box_thresh=0.3,
                         lang=lang
                     )
@@ -327,7 +438,7 @@ class BatchAnalyze:
                                                layout_res_item['poly'][4], layout_res_item['poly'][5]]
                             layout_res_width = layout_res_bbox[2] - layout_res_bbox[0]
                             layout_res_height = layout_res_bbox[3] - layout_res_bbox[1]
-                            if ocr_text in ['(204号', '(20', '(2', '(2号', '(20号'] and ocr_score < 0.8 and layout_res_width < layout_res_height:
+                            if ocr_text in ['(204号', '(20', '(2', '(2号', '(20号', '号', '(204'] and ocr_score < 0.8 and layout_res_width < layout_res_height:
                                 layout_res_item['category_id'] = 16
 
                     total_processed += len(img_crop_list)

+ 84 - 18
mineru/backend/pipeline/model_init.py

@@ -4,22 +4,57 @@ import torch
 from loguru import logger
 
 from .model_list import AtomicModel
-from ...model.layout.doclayout_yolo import DocLayoutYOLOModel
+from ...model.layout.doclayoutyolo import DocLayoutYOLOModel
 from ...model.mfd.yolo_v8 import YOLOv8MFDModel
 from ...model.mfr.unimernet.Unimernet import UnimernetModel
 from ...model.ocr.paddleocr2pytorch.pytorch_paddle import PytorchPaddleOCR
-from ...model.table.rapid_table import RapidTableModel
+from ...model.ori_cls.paddle_ori_cls import PaddleOrientationClsModel
+from ...model.table.cls.paddle_table_cls import PaddleTableClsModel
+# from ...model.table.rec.RapidTable import RapidTableModel
+from ...model.table.rec.slanet_plus.main import RapidTableModel
+from ...model.table.rec.unet_table.main import UnetTableModel
 from ...utils.enum_class import ModelPath
 from ...utils.models_download_utils import auto_download_and_get_model_root_path
 
 
-def table_model_init(lang=None):
+def img_orientation_cls_model_init():
     atom_model_manager = AtomModelSingleton()
     ocr_engine = atom_model_manager.get_atom_model(
-        atom_model_name='ocr',
+        atom_model_name=AtomicModel.OCR,
         det_db_box_thresh=0.5,
         det_db_unclip_ratio=1.6,
-        lang=lang
+        lang="ch_lite",
+        enable_merge_det_boxes=False
+    )
+    cls_model = PaddleOrientationClsModel(ocr_engine)
+    return cls_model
+
+
+def table_cls_model_init():
+    return PaddleTableClsModel()
+
+
+def wired_table_model_init(lang=None):
+    atom_model_manager = AtomModelSingleton()
+    ocr_engine = atom_model_manager.get_atom_model(
+        atom_model_name=AtomicModel.OCR,
+        det_db_box_thresh=0.5,
+        det_db_unclip_ratio=1.6,
+        lang=lang,
+        enable_merge_det_boxes=False
+    )
+    table_model = UnetTableModel(ocr_engine)
+    return table_model
+
+
+def wireless_table_model_init(lang=None):
+    atom_model_manager = AtomModelSingleton()
+    ocr_engine = atom_model_manager.get_atom_model(
+        atom_model_name=AtomicModel.OCR,
+        det_db_box_thresh=0.5,
+        det_db_unclip_ratio=1.6,
+        lang=lang,
+        enable_merge_det_boxes=False
     )
     table_model = RapidTableModel(ocr_engine)
     return table_model
@@ -45,21 +80,23 @@ def doclayout_yolo_model_init(weight, device='cpu'):
 
 def ocr_model_init(det_db_box_thresh=0.3,
                    lang=None,
-                   use_dilation=True,
                    det_db_unclip_ratio=1.8,
+                   enable_merge_det_boxes=True
                    ):
     if lang is not None and lang != '':
         model = PytorchPaddleOCR(
             det_db_box_thresh=det_db_box_thresh,
             lang=lang,
-            use_dilation=use_dilation,
+            use_dilation=True,
             det_db_unclip_ratio=det_db_unclip_ratio,
+            enable_merge_det_boxes=enable_merge_det_boxes,
         )
     else:
         model = PytorchPaddleOCR(
             det_db_box_thresh=det_db_box_thresh,
-            use_dilation=use_dilation,
+            use_dilation=True,
             det_db_unclip_ratio=det_db_unclip_ratio,
+            enable_merge_det_boxes=enable_merge_det_boxes,
         )
     return model
 
@@ -76,12 +113,20 @@ class AtomModelSingleton:
     def get_atom_model(self, atom_model_name: str, **kwargs):
 
         lang = kwargs.get('lang', None)
-        table_model_name = kwargs.get('table_model_name', None)
 
-        if atom_model_name in [AtomicModel.OCR]:
-            key = (atom_model_name, lang)
-        elif atom_model_name in [AtomicModel.Table]:
-            key = (atom_model_name, table_model_name, lang)
+        if atom_model_name in [AtomicModel.WiredTable, AtomicModel.WirelessTable]:
+            key = (
+                atom_model_name,
+                lang
+            )
+        elif atom_model_name in [AtomicModel.OCR]:
+            key = (
+                atom_model_name,
+                kwargs.get('det_db_box_thresh', 0.3),
+                lang,
+                kwargs.get('det_db_unclip_ratio', 1.8),
+                kwargs.get('enable_merge_det_boxes', True)
+            )
         else:
             key = atom_model_name
 
@@ -108,13 +153,23 @@ def atom_model_init(model_name: str, **kwargs):
         )
     elif model_name == AtomicModel.OCR:
         atom_model = ocr_model_init(
-            kwargs.get('det_db_box_thresh'),
+            kwargs.get('det_db_box_thresh', 0.3),
             kwargs.get('lang'),
+            kwargs.get('det_db_unclip_ratio', 1.8),
+            kwargs.get('enable_merge_det_boxes', True)
         )
-    elif model_name == AtomicModel.Table:
-        atom_model = table_model_init(
+    elif model_name == AtomicModel.WirelessTable:
+        atom_model = wireless_table_model_init(
             kwargs.get('lang'),
         )
+    elif model_name == AtomicModel.WiredTable:
+        atom_model = wired_table_model_init(
+            kwargs.get('lang'),
+        )
+    elif model_name == AtomicModel.TableCls:
+        atom_model = table_cls_model_init()
+    elif model_name == AtomicModel.ImgOrientationCls:
+        atom_model = img_orientation_cls_model_init()
     else:
         logger.error('model name not allow')
         exit(1)
@@ -174,8 +229,19 @@ class MineruPipelineModel:
         )
         # init table model
         if self.apply_table:
-            self.table_model = atom_model_manager.get_atom_model(
-                atom_model_name=AtomicModel.Table,
+            self.wired_table_model = atom_model_manager.get_atom_model(
+                atom_model_name=AtomicModel.WiredTable,
+                lang=self.lang,
+            )
+            self.wireless_table_model = atom_model_manager.get_atom_model(
+                atom_model_name=AtomicModel.WirelessTable,
+                lang=self.lang,
+            )
+            self.table_cls_model = atom_model_manager.get_atom_model(
+                atom_model_name=AtomicModel.TableCls,
+            )
+            self.img_orientation_cls_model = atom_model_manager.get_atom_model(
+                atom_model_name=AtomicModel.ImgOrientationCls,
                 lang=self.lang,
             )
 

+ 7 - 3
mineru/backend/pipeline/model_json_to_middle_json.py

@@ -20,14 +20,16 @@ from mineru.utils.ocr_utils import OcrConfidence
 from mineru.utils.span_block_fix import fill_spans_in_blocks, fix_discarded_block, fix_block_spans
 from mineru.utils.span_pre_proc import remove_outside_spans, remove_overlaps_low_confidence_spans, \
     remove_overlaps_min_spans, txt_spans_extract
+from mineru.utils.table_merge import merge_table
 from mineru.version import __version__
-from mineru.utils.hash_utils import str_md5
+from mineru.utils.hash_utils import bytes_md5
 
 
 def page_model_info_to_page_info(page_model_info, image_dict, page, image_writer, page_index, ocr_enable=False, formula_enabled=True):
     scale = image_dict["scale"]
     page_pil_img = image_dict["img_pil"]
-    page_img_md5 = str_md5(image_dict["img_base64"])
+    # page_img_md5 = str_md5(image_dict["img_base64"])
+    page_img_md5 = bytes_md5(page_pil_img.tobytes())
     page_w, page_h = map(int, page.get_size())
     magic_model = MagicModel(page_model_info, scale)
 
@@ -210,7 +212,6 @@ def result_to_middle_json(model_list, images_list, pdf_doc, image_writer, lang=N
         atom_model_manager = AtomModelSingleton()
         ocr_model = atom_model_manager.get_atom_model(
             atom_model_name='ocr',
-            ocr_show_log=False,
             det_db_box_thresh=0.3,
             lang=lang
         )
@@ -229,6 +230,9 @@ def result_to_middle_json(model_list, images_list, pdf_doc, image_writer, lang=N
     """分段"""
     para_split(middle_json["pdf_info"])
 
+    """表格跨页合并"""
+    merge_table(middle_json["pdf_info"])
+
     """llm优化"""
     llm_aided_config = get_llm_aided_config()
 

+ 5 - 1
mineru/backend/pipeline/model_list.py

@@ -3,4 +3,8 @@ class AtomicModel:
     MFD = "mfd"
     MFR = "mfr"
     OCR = "ocr"
-    Table = "table"
+    WirelessTable = "wireless_table"
+    WiredTable = "wired_table"
+    TableCls = "table_cls"
+    ImgOrientationCls = "img_ori_cls"
+

+ 4 - 4
mineru/backend/pipeline/pipeline_analyze.py

@@ -1,11 +1,12 @@
 import os
 import time
 from typing import List, Tuple
-import PIL.Image
+from PIL import Image
 from loguru import logger
 
 from .model_init import MineruPipelineModel
 from mineru.utils.config_reader import get_device
+from ...utils.enum_class import ImageType
 from ...utils.pdf_classify import classify
 from ...utils.pdf_image_tools import load_images_from_pdf
 from ...utils.model_utils import get_vram, clean_memory
@@ -98,7 +99,7 @@ def doc_analyze(
         _lang = lang_list[pdf_idx]
 
         # 收集每个数据集中的页面
-        images_list, pdf_doc = load_images_from_pdf(pdf_bytes)
+        images_list, pdf_doc = load_images_from_pdf(pdf_bytes, image_type=ImageType.PIL)
         all_image_lists.append(images_list)
         all_pdf_docs.append(pdf_doc)
         for page_idx in range(len(images_list)):
@@ -147,10 +148,9 @@ def doc_analyze(
 
 
 def batch_image_analyze(
-        images_with_extra_info: List[Tuple[PIL.Image.Image, bool, str]],
+        images_with_extra_info: List[Tuple[Image.Image, bool, str]],
         formula_enable=True,
         table_enable=True):
-    # os.environ['CUDA_VISIBLE_DEVICES'] = str(idx)
 
     from .batch_analyze import BatchAnalyze
 

+ 14 - 2
mineru/backend/pipeline/pipeline_middle_json_mkcontent.py

@@ -188,7 +188,7 @@ def merge_para_with_text(para_block):
     return para_text
 
 
-def make_blocks_to_content_list(para_block, img_buket_path, page_idx):
+def make_blocks_to_content_list(para_block, img_buket_path, page_idx, page_size):
     para_type = para_block['type']
     para_content = {}
     if para_type in [BlockType.TEXT, BlockType.LIST, BlockType.INDEX]:
@@ -245,6 +245,17 @@ def make_blocks_to_content_list(para_block, img_buket_path, page_idx):
             if block['type'] == BlockType.TABLE_FOOTNOTE:
                 para_content[BlockType.TABLE_FOOTNOTE].append(merge_para_with_text(block))
 
+    page_weight, page_height = page_size
+    para_bbox = para_block.get('bbox')
+    if para_bbox:
+        x0, y0, x1, y1 = para_bbox
+        para_content['bbox'] = [
+            int(x0 * 1000 / page_weight),
+            int(y0 * 1000 / page_height),
+            int(x1 * 1000 / page_weight),
+            int(y1 * 1000 / page_height),
+        ]
+
     para_content['page_idx'] = page_idx
 
     return para_content
@@ -258,6 +269,7 @@ def union_make(pdf_info_dict: list,
     for page_info in pdf_info_dict:
         paras_of_layout = page_info.get('para_blocks')
         page_idx = page_info.get('page_idx')
+        page_size = page_info.get('page_size')
         if not paras_of_layout:
             continue
         if make_mode in [MakeMode.MM_MD, MakeMode.NLP_MD]:
@@ -265,7 +277,7 @@ def union_make(pdf_info_dict: list,
             output_content.extend(page_markdown)
         elif make_mode == MakeMode.CONTENT_LIST:
             for para_block in paras_of_layout:
-                para_content = make_blocks_to_content_list(para_block, img_buket_path, page_idx)
+                para_content = make_blocks_to_content_list(para_block, img_buket_path, page_idx, page_size)
                 if para_content:
                     output_content.append(para_content)
 

+ 10 - 4
mineru/backend/vlm/hf_predictor.py

@@ -4,7 +4,7 @@ from typing import Iterable, List, Optional, Union
 import torch
 from PIL import Image
 from tqdm import tqdm
-from transformers import AutoTokenizer, BitsAndBytesConfig
+from transformers import AutoTokenizer, BitsAndBytesConfig, __version__
 
 from ...model.vlm_hf_model import Mineru2QwenForCausalLM
 from ...model.vlm_hf_model.image_processing_mineru2 import process_images
@@ -66,7 +66,11 @@ class HuggingfacePredictor(BasePredictor):
                 bnb_4bit_quant_type="nf4",
             )
         else:
-            kwargs["torch_dtype"] = torch_dtype
+            from packaging import version
+            if version.parse(__version__) >= version.parse("4.56.0"):
+                kwargs["dtype"] = torch_dtype
+            else:
+                kwargs["torch_dtype"] = torch_dtype
 
         if use_flash_attn:
             kwargs["attn_implementation"] = "flash_attention_2"
@@ -137,12 +141,14 @@ class HuggingfacePredictor(BasePredictor):
         image_tensor = image_tensor.to(device=self.model.device, dtype=self.model.dtype)
         image_sizes = [[*image_obj.size]]
 
-        input_ids = self.tokenizer(prompt, return_tensors="pt").input_ids
-        input_ids = input_ids.to(device=self.model.device)
+        encoded_inputs = self.tokenizer(prompt, return_tensors="pt")
+        input_ids = encoded_inputs.input_ids.to(device=self.model.device)
+        attention_mask = encoded_inputs.attention_mask.to(device=self.model.device)
 
         with torch.inference_mode():
             output_ids = self.model.generate(
                 input_ids,
+                attention_mask=attention_mask,
                 images=image_tensor,
                 image_sizes=image_sizes,
                 use_cache=True,

+ 11 - 2
mineru/backend/vlm/token_to_middle_json.py

@@ -1,13 +1,16 @@
+import os
 import time
 from loguru import logger
 import numpy as np
 import cv2
-from mineru.utils.config_reader import get_llm_aided_config
+from mineru.utils.config_reader import get_llm_aided_config, get_table_enable
 from mineru.utils.cut_image import cut_image_and_table
 from mineru.utils.enum_class import ContentType
 from mineru.utils.hash_utils import str_md5
 from mineru.backend.vlm.vlm_magic_model import MagicModel
 from mineru.utils.pdf_image_tools import get_crop_img
+from mineru.utils.pdf_reader import base64_to_pil_image
+from mineru.utils.table_merge import merge_table
 from mineru.version import __version__
 
 heading_level_import_success = False
@@ -32,7 +35,8 @@ def token_to_page_info(token, image_dict, page, image_writer, page_index) -> dic
     # 提取所有完整块,每个块从<|box_start|>开始到<|md_end|>或<|im_end|>结束
 
     scale = image_dict["scale"]
-    page_pil_img = image_dict["img_pil"]
+    # page_pil_img = image_dict["img_pil"]
+    page_pil_img = base64_to_pil_image(image_dict["img_base64"])
     page_img_md5 = str_md5(image_dict["img_base64"])
     width, height = map(int, page.get_size())
 
@@ -90,6 +94,11 @@ def result_to_middle_json(token_list, images_list, pdf_doc, image_writer):
         page_info = token_to_page_info(token, image_dict, page, image_writer, index)
         middle_json["pdf_info"].append(page_info)
 
+    """表格跨页合并"""
+    table_enable = get_table_enable(os.getenv('MINERU_VLM_TABLE_ENABLE', 'True').lower() == 'true')
+    if table_enable:
+        merge_table(middle_json["pdf_info"])
+
     """llm优化标题分级"""
     if heading_level_import_success:
         llm_aided_title_start_time = time.time()

+ 3 - 2
mineru/backend/vlm/vlm_analyze.py

@@ -8,6 +8,7 @@ from mineru.utils.pdf_image_tools import load_images_from_pdf
 from .base_predictor import BasePredictor
 from .predictor import get_predictor
 from .token_to_middle_json import result_to_middle_json
+from ...utils.enum_class import ImageType
 from ...utils.models_download_utils import auto_download_and_get_model_root_path
 
 
@@ -53,7 +54,7 @@ def doc_analyze(
         predictor = ModelSingleton().get_model(backend, model_path, server_url, **kwargs)
 
     # load_images_start = time.time()
-    images_list, pdf_doc = load_images_from_pdf(pdf_bytes)
+    images_list, pdf_doc = load_images_from_pdf(pdf_bytes, image_type=ImageType.BASE64)
     images_base64_list = [image_dict["img_base64"] for image_dict in images_list]
     # load_images_time = round(time.time() - load_images_start, 2)
     # logger.info(f"load images cost: {load_images_time}, speed: {round(len(images_base64_list)/load_images_time, 3)} images/s")
@@ -80,7 +81,7 @@ async def aio_doc_analyze(
         predictor = ModelSingleton().get_model(backend, model_path, server_url, **kwargs)
 
     # load_images_start = time.time()
-    images_list, pdf_doc = load_images_from_pdf(pdf_bytes)
+    images_list, pdf_doc = load_images_from_pdf(pdf_bytes, image_type=ImageType.BASE64)
     images_base64_list = [image_dict["img_base64"] for image_dict in images_list]
     # load_images_time = round(time.time() - load_images_start, 2)
     # logger.info(f"load images cost: {load_images_time}, speed: {round(len(images_base64_list)/load_images_time, 3)} images/s")

+ 14 - 2
mineru/backend/vlm/vlm_middle_json_mkcontent.py

@@ -125,7 +125,7 @@ def mk_blocks_to_markdown(para_blocks, make_mode, formula_enable, table_enable,
 
 
 
-def make_blocks_to_content_list(para_block, img_buket_path, page_idx):
+def make_blocks_to_content_list(para_block, img_buket_path, page_idx, page_size):
     para_type = para_block['type']
     para_content = {}
     if para_type in [BlockType.TEXT, BlockType.LIST, BlockType.INDEX]:
@@ -179,6 +179,17 @@ def make_blocks_to_content_list(para_block, img_buket_path, page_idx):
             if block['type'] == BlockType.TABLE_FOOTNOTE:
                 para_content[BlockType.TABLE_FOOTNOTE].append(merge_para_with_text(block))
 
+    page_weight, page_height = page_size
+    para_bbox = para_block.get('bbox')
+    if para_bbox:
+        x0, y0, x1, y1 = para_bbox
+        para_content['bbox'] = [
+            int(x0 * 1000 / page_weight),
+            int(y0 * 1000 / page_height),
+            int(x1 * 1000 / page_weight),
+            int(y1 * 1000 / page_height),
+        ]
+
     para_content['page_idx'] = page_idx
 
     return para_content
@@ -195,6 +206,7 @@ def union_make(pdf_info_dict: list,
     for page_info in pdf_info_dict:
         paras_of_layout = page_info.get('para_blocks')
         page_idx = page_info.get('page_idx')
+        page_size = page_info.get('page_size')
         if not paras_of_layout:
             continue
         if make_mode in [MakeMode.MM_MD, MakeMode.NLP_MD]:
@@ -202,7 +214,7 @@ def union_make(pdf_info_dict: list,
             output_content.extend(page_markdown)
         elif make_mode == MakeMode.CONTENT_LIST:
             for para_block in paras_of_layout:
-                para_content = make_blocks_to_content_list(para_block, img_buket_path, page_idx)
+                para_content = make_blocks_to_content_list(para_block, img_buket_path, page_idx, page_size)
                 output_content.append(para_content)
 
     if make_mode in [MakeMode.MM_MD, MakeMode.NLP_MD]:

+ 1 - 1
mineru/cli/client.py

@@ -62,7 +62,7 @@ from .common import do_parse, read_fn, pdf_suffixes, image_suffixes
     '-l',
     '--lang',
     'lang',
-    type=click.Choice(['ch', 'ch_server', 'ch_lite', 'en', 'korean', 'japan', 'chinese_cht', 'ta', 'te', 'ka',
+    type=click.Choice(['ch', 'ch_server', 'ch_lite', 'en', 'korean', 'japan', 'chinese_cht', 'ta', 'te', 'ka', 'th', 'el',
                        'latin', 'arabic', 'east_slavic', 'cyrillic', 'devanagari']),
     help="""
     Input the languages in the pdf (if known) to improve OCR accuracy.  Optional.

+ 5 - 1
mineru/cli/common.py

@@ -9,7 +9,7 @@ import pypdfium2 as pdfium
 from loguru import logger
 
 from mineru.data.data_reader_writer import FileBasedDataWriter
-from mineru.utils.draw_bbox import draw_layout_bbox, draw_span_bbox
+from mineru.utils.draw_bbox import draw_layout_bbox, draw_span_bbox, draw_line_sort_bbox
 from mineru.utils.enum_class import MakeMode
 from mineru.utils.pdf_image_tools import images_bytes_to_pdf_bytes
 from mineru.backend.vlm.vlm_middle_json_mkcontent import union_make as vlm_union_make
@@ -102,6 +102,7 @@ def _process_output(
         model_output=None,
         is_pipeline=True
 ):
+    f_draw_line_sort_bbox = False
     from mineru.backend.pipeline.pipeline_middle_json_mkcontent import union_make as pipeline_union_make
     """处理输出文件"""
     if f_draw_layout_bbox:
@@ -116,6 +117,9 @@ def _process_output(
             pdf_bytes,
         )
 
+    if f_draw_line_sort_bbox:
+        draw_line_sort_bbox(pdf_info, pdf_bytes, local_md_dir, f"{pdf_file_name}_line_sort.pdf")
+
     image_dir = str(os.path.basename(local_image_dir))
 
     if f_dump_md:

+ 122 - 39
mineru/cli/fast_api.py

@@ -1,12 +1,17 @@
 import uuid
 import os
+import re
+import tempfile
+import asyncio
 import uvicorn
 import click
+import zipfile
 from pathlib import Path
-from glob import glob
+import glob
 from fastapi import FastAPI, UploadFile, File, Form
 from fastapi.middleware.gzip import GZipMiddleware
-from fastapi.responses import JSONResponse
+from fastapi.responses import JSONResponse, FileResponse
+from starlette.background import BackgroundTask
 from typing import List, Optional
 from loguru import logger
 from base64 import b64encode
@@ -18,6 +23,27 @@ from mineru.version import __version__
 app = FastAPI()
 app.add_middleware(GZipMiddleware, minimum_size=1000)
 
+
+def sanitize_filename(filename: str) -> str:
+    """
+    格式化压缩文件的文件名
+    移除路径遍历字符, 保留 Unicode 字母、数字、._- 
+    禁止隐藏文件
+    """
+    sanitized = re.sub(r'[/\\\.]{2,}|[/\\]', '', filename)
+    sanitized = re.sub(r'[^\w.-]', '_', sanitized, flags=re.UNICODE)
+    if sanitized.startswith('.'):
+        sanitized = '_' + sanitized[1:]
+    return sanitized or 'unnamed'
+
+def cleanup_file(file_path: str) -> None:
+    """清理临时 zip 文件"""
+    try:
+        if os.path.exists(file_path):
+            os.remove(file_path)
+    except Exception as e:
+        logger.warning(f"fail clean file {file_path}: {e}")
+
 def encode_image(image_path: str) -> str:
     """Encode image using base64"""
     with open(image_path, "rb") as f:
@@ -48,6 +74,7 @@ async def parse_pdf(
         return_model_output: bool = Form(False),
         return_content_list: bool = Form(False),
         return_images: bool = Form(False),
+        response_format_zip: bool = Form(False),
         start_page_id: int = Form(0),
         end_page_id: int = Form(99999),
 ):
@@ -121,45 +148,101 @@ async def parse_pdf(
             **config
         )
 
-        # 构建结果路径
-        result_dict = {}
-        for pdf_name in pdf_file_names:
-            result_dict[pdf_name] = {}
-            data = result_dict[pdf_name]
-
-            if backend.startswith("pipeline"):
-                parse_dir = os.path.join(unique_dir, pdf_name, parse_method)
-            else:
-                parse_dir = os.path.join(unique_dir, pdf_name, "vlm")
-
-            if os.path.exists(parse_dir):
-                if return_md:
-                    data["md_content"] = get_infer_result(".md", pdf_name, parse_dir)
-                if return_middle_json:
-                    data["middle_json"] = get_infer_result("_middle.json", pdf_name, parse_dir)
-                if return_model_output:
+        # 根据 response_format_zip 决定返回类型
+        if response_format_zip:
+            zip_fd, zip_path = tempfile.mkstemp(suffix=".zip", prefix="mineru_results_")
+            os.close(zip_fd) 
+            with zipfile.ZipFile(zip_path, "w", compression=zipfile.ZIP_DEFLATED) as zf:
+                for pdf_name in pdf_file_names:
+                    safe_pdf_name = sanitize_filename(pdf_name)
                     if backend.startswith("pipeline"):
-                        data["model_output"] = get_infer_result("_model.json", pdf_name, parse_dir)
+                        parse_dir = os.path.join(unique_dir, pdf_name, parse_method)
                     else:
-                        data["model_output"] = get_infer_result("_model_output.txt", pdf_name, parse_dir)
-                if return_content_list:
-                    data["content_list"] = get_infer_result("_content_list.json", pdf_name, parse_dir)
-                if return_images:
-                    image_paths = glob(f"{parse_dir}/images/*.jpg")
-                    data["images"] = {
-                        os.path.basename(
-                            image_path
-                        ): f"data:image/jpeg;base64,{encode_image(image_path)}"
-                        for image_path in image_paths
-                    }
-        return JSONResponse(
-            status_code=200,
-            content={
-                "backend": backend,
-                "version": __version__,
-                "results": result_dict
-            }
-        )
+                        parse_dir = os.path.join(unique_dir, pdf_name, "vlm")
+
+                    if not os.path.exists(parse_dir):
+                        continue
+
+                    # 写入文本类结果
+                    if return_md:
+                        path = os.path.join(parse_dir, f"{pdf_name}.md")
+                        if os.path.exists(path):
+                            zf.write(path, arcname=os.path.join(safe_pdf_name, f"{safe_pdf_name}.md"))
+
+                    if return_middle_json:
+                        path = os.path.join(parse_dir, f"{pdf_name}_middle.json")
+                        if os.path.exists(path):
+                            zf.write(path, arcname=os.path.join(safe_pdf_name, f"{safe_pdf_name}_middle.json"))
+
+                    if return_model_output:
+                        if backend.startswith("pipeline"):
+                            path = os.path.join(parse_dir, f"{pdf_name}_model.json")
+                        else:
+                            path = os.path.join(parse_dir, f"{pdf_name}_model_output.txt")
+                        if os.path.exists(path): 
+                            zf.write(path, arcname=os.path.join(safe_pdf_name, os.path.basename(path)))
+
+                    if return_content_list:
+                        path = os.path.join(parse_dir, f"{pdf_name}_content_list.json")
+                        if os.path.exists(path):
+                            zf.write(path, arcname=os.path.join(safe_pdf_name, f"{safe_pdf_name}_content_list.json"))
+
+                    # 写入图片
+                    if return_images:
+                        images_dir = os.path.join(parse_dir, "images")
+                        image_paths = glob.glob(os.path.join(glob.escape(images_dir), "*.jpg"))
+                        for image_path in image_paths:
+                            zf.write(image_path, arcname=os.path.join(safe_pdf_name, "images", os.path.basename(image_path)))
+
+            return FileResponse(
+                path=zip_path,
+                media_type="application/zip",
+                filename="results.zip",
+                background=BackgroundTask(cleanup_file, zip_path)
+            )
+        else:
+            # 构建 JSON 结果
+            result_dict = {}
+            for pdf_name in pdf_file_names:
+                result_dict[pdf_name] = {}
+                data = result_dict[pdf_name]
+
+                if backend.startswith("pipeline"):
+                    parse_dir = os.path.join(unique_dir, pdf_name, parse_method)
+                else:
+                    parse_dir = os.path.join(unique_dir, pdf_name, "vlm")
+
+                if os.path.exists(parse_dir):
+                    if return_md:
+                        data["md_content"] = get_infer_result(".md", pdf_name, parse_dir)
+                    if return_middle_json:
+                        data["middle_json"] = get_infer_result("_middle.json", pdf_name, parse_dir)
+                    if return_model_output:
+                        if backend.startswith("pipeline"):
+                            data["model_output"] = get_infer_result("_model.json", pdf_name, parse_dir)
+                        else:
+                            data["model_output"] = get_infer_result("_model_output.txt", pdf_name, parse_dir)
+                    if return_content_list:
+                        data["content_list"] = get_infer_result("_content_list.json", pdf_name, parse_dir)
+                    if return_images:
+                        images_dir = os.path.join(parse_dir, "images")
+                        safe_pattern = os.path.join(glob.escape(images_dir), "*.jpg")
+                        image_paths = glob.glob(safe_pattern)
+                        data["images"] = {
+                            os.path.basename(
+                                image_path
+                            ): f"data:image/jpeg;base64,{encode_image(image_path)}"
+                            for image_path in image_paths
+                        }
+
+            return JSONResponse(
+                status_code=200,
+                content={
+                    "backend": backend,
+                    "version": __version__,
+                    "results": result_dict
+                }
+            )
     except Exception as e:
         logger.exception(e)
         return JSONResponse(

+ 1 - 1
mineru/cli/gradio_app.py

@@ -145,7 +145,7 @@ devanagari_lang = [
         'hi', 'mr', 'ne', 'bh', 'mai', 'ang', 'bho', 'mah', 'sck', 'new', 'gom',  # noqa: E126
         'sa', 'bgc'
 ]
-other_lang = ['ch', 'ch_lite', 'ch_server', 'en', 'korean', 'japan', 'chinese_cht', 'ta', 'te', 'ka']
+other_lang = ['ch', 'ch_lite', 'ch_server', 'en', 'korean', 'japan', 'chinese_cht', 'ta', 'te', 'ka', "el", "th"]
 add_lang = ['latin', 'arabic', 'east_slavic', 'cyrillic', 'devanagari']
 
 # all_lang = ['', 'auto']

+ 49 - 3
mineru/model/layout/doclayout_yolo.py → mineru/model/layout/doclayoutyolo.py

@@ -1,8 +1,13 @@
+import os
 from typing import List, Dict, Union
+
 from doclayout_yolo import YOLOv10
 from tqdm import tqdm
 import numpy as np
-from PIL import Image
+from PIL import Image, ImageDraw
+
+from mineru.utils.enum_class import ModelPath
+from mineru.utils.models_download_utils import auto_download_and_get_model_root_path
 
 
 class DocLayoutYOLOModel:
@@ -60,14 +65,55 @@ class DocLayoutYOLOModel:
         with tqdm(total=len(images), desc="Layout Predict") as pbar:
             for idx in range(0, len(images), batch_size):
                 batch = images[idx: idx + batch_size]
+                if batch_size == 1:
+                    conf = 0.9 * self.conf
+                else:
+                    conf = self.conf
                 predictions = self.model.predict(
                     batch,
                     imgsz=self.imgsz,
-                    conf=self.conf,
+                    conf=conf,
                     iou=self.iou,
                     verbose=False,
                 )
                 for pred in predictions:
                     results.append(self._parse_prediction(pred))
                 pbar.update(len(batch))
-        return results
+        return results
+
+    def visualize(
+            self,
+            image: Union[np.ndarray, Image.Image],
+            results: List
+    ) -> Image.Image:
+
+        if isinstance(image, np.ndarray):
+            image = Image.fromarray(image)
+
+        draw = ImageDraw.Draw(image)
+        for res in results:
+            poly = res['poly']
+            xmin, ymin, xmax, ymax = poly[0], poly[1], poly[4], poly[5]
+            print(
+                f"Detected box: {xmin}, {ymin}, {xmax}, {ymax}, Category ID: {res['category_id']}, Score: {res['score']}")
+            # 使用PIL在图像上画框
+            draw.rectangle([xmin, ymin, xmax, ymax], outline="red", width=2)
+            # 在框旁边画置信度
+            draw.text((xmax + 10, ymin + 10), f"{res['score']:.2f}", fill="red", font_size=22)
+        return image
+
+
+if __name__ == '__main__':
+    image_path = r"C:\Users\zhaoxiaomeng\Downloads\下载1.jpg"
+    doclayout_yolo_weights = os.path.join(auto_download_and_get_model_root_path(ModelPath.doclayout_yolo), ModelPath.doclayout_yolo)
+    device = 'cuda'
+    model = DocLayoutYOLOModel(
+        weight=doclayout_yolo_weights,
+        device=device,
+    )
+    image = Image.open(image_path)
+    results = model.predict(image)
+
+    image = model.visualize(image, results)
+
+    image.show()  # 显示图像

+ 55 - 2
mineru/model/mfd/yolo_v8.py

@@ -1,8 +1,12 @@
+import os
 from typing import List, Union
 from tqdm import tqdm
 from ultralytics import YOLO
 import numpy as np
-from PIL import Image
+from PIL import Image, ImageDraw
+
+from mineru.utils.enum_class import ModelPath
+from mineru.utils.models_download_utils import auto_download_and_get_model_root_path
 
 
 class YOLOv8MFDModel:
@@ -50,4 +54,53 @@ class YOLOv8MFDModel:
                 batch_preds = self._run_predict(batch, is_batch=True)
                 results.extend(batch_preds)
                 pbar.update(len(batch))
-        return results
+        return results
+
+    def visualize(
+        self,
+        image: Union[np.ndarray, Image.Image],
+        results: List
+    ) -> Image.Image:
+
+        if isinstance(image, np.ndarray):
+            image = Image.fromarray(image)
+
+        formula_list = []
+        for xyxy, conf, cla in zip(
+                results.boxes.xyxy.cpu(), results.boxes.conf.cpu(), results.boxes.cls.cpu()
+        ):
+            xmin, ymin, xmax, ymax = [int(p.item()) for p in xyxy]
+            new_item = {
+                "category_id": 13 + int(cla.item()),
+                "poly": [xmin, ymin, xmax, ymin, xmax, ymax, xmin, ymax],
+                "score": round(float(conf.item()), 2),
+            }
+            formula_list.append(new_item)
+
+        draw = ImageDraw.Draw(image)
+        for res in formula_list:
+            poly = res['poly']
+            xmin, ymin, xmax, ymax = poly[0], poly[1], poly[4], poly[5]
+            print(
+                f"Detected box: {xmin}, {ymin}, {xmax}, {ymax}, Category ID: {res['category_id']}, Score: {res['score']}")
+            # 使用PIL在图像上画框
+            draw.rectangle([xmin, ymin, xmax, ymax], outline="red", width=2)
+            # 在框旁边画置信度
+            draw.text((xmax + 10, ymin + 10), f"{res['score']:.2f}", fill="red", font_size=22)
+        return image
+
+if __name__ == '__main__':
+    image_path = r"C:\Users\zhaoxiaomeng\Downloads\screenshot-20250821-192948.png"
+    yolo_v8_mfd_weights = os.path.join(auto_download_and_get_model_root_path(ModelPath.yolo_v8_mfd),
+                                          ModelPath.yolo_v8_mfd)
+    device = 'cuda'
+    model = YOLOv8MFDModel(
+        weight=yolo_v8_mfd_weights,
+        device=device,
+    )
+    image = Image.open(image_path)
+    results = model.predict(image)
+
+    image = model.visualize(image, results)
+
+    image.show()  # 显示图像

+ 2 - 2
mineru/model/mfr/unimernet/Unimernet.py

@@ -70,7 +70,7 @@ class UnimernetModel(object):
         # Collect images with their original indices
         for image_index in range(len(images_mfd_res)):
             mfd_res = images_mfd_res[image_index]
-            pil_img = images[image_index]
+            image = images[image_index]
             formula_list = []
 
             for idx, (xyxy, conf, cla) in enumerate(zip(
@@ -84,7 +84,7 @@ class UnimernetModel(object):
                     "latex": "",
                 }
                 formula_list.append(new_item)
-                bbox_img = pil_img.crop((xmin, ymin, xmax, ymax))
+                bbox_img = image[ymin:ymax, xmin:xmax]
                 area = (xmax - xmin) * (ymax - ymin)
 
                 curr_idx = len(mf_image_list)

+ 8 - 4
mineru/model/ocr/paddleocr2pytorch/pytorch_paddle.py

@@ -56,6 +56,7 @@ class PytorchPaddleOCR(TextSystem):
         args = parser.parse_args(args)
 
         self.lang = kwargs.get('lang', 'ch')
+        self.enable_merge_det_boxes = kwargs.get("enable_merge_det_boxes", True)
 
         device = get_device()
         if device == 'cpu' and self.lang in ['ch', 'ch_server', 'japan', 'chinese_cht']:
@@ -88,7 +89,7 @@ class PytorchPaddleOCR(TextSystem):
         kwargs['det_model_path'] = det_model_path
         kwargs['rec_model_path'] = rec_model_path
         kwargs['rec_char_dict_path'] = os.path.join(root_dir, 'pytorchocr', 'utils', 'resources', 'dict', dict_file)
-        kwargs['rec_batch_num'] = 16
+        kwargs['rec_batch_num'] = 8
 
         kwargs['device'] = device
 
@@ -104,6 +105,7 @@ class PytorchPaddleOCR(TextSystem):
             rec=True,
             mfd_res=None,
             tqdm_enable=False,
+            tqdm_desc="OCR-rec Predict",
             ):
         assert isinstance(img, (np.ndarray, list, str, bytes))
         if isinstance(img, list) and det == True:
@@ -135,7 +137,8 @@ class PytorchPaddleOCR(TextSystem):
                         continue
                     dt_boxes = sorted_boxes(dt_boxes)
                     # merge_det_boxes 和 update_det_boxes 都会把poly转成bbox再转回poly,因此需要过滤所有倾斜程度较大的文本框
-                    dt_boxes = merge_det_boxes(dt_boxes)
+                    if self.enable_merge_det_boxes:
+                        dt_boxes = merge_det_boxes(dt_boxes)
                     if mfd_res:
                         dt_boxes = update_det_boxes(dt_boxes, mfd_res)
                     tmp_res = [box.tolist() for box in dt_boxes]
@@ -147,7 +150,7 @@ class PytorchPaddleOCR(TextSystem):
                     if not isinstance(img, list):
                         img = preprocess_image(img)
                         img = [img]
-                    rec_res, elapse = self.text_recognizer(img, tqdm_enable=tqdm_enable)
+                    rec_res, elapse = self.text_recognizer(img, tqdm_enable=tqdm_enable, tqdm_desc=tqdm_desc)
                     # logger.debug("rec_res num  : {}, elapsed : {}".format(len(rec_res), elapse))
                     ocr_res.append(rec_res)
                 return ocr_res
@@ -172,7 +175,8 @@ class PytorchPaddleOCR(TextSystem):
         dt_boxes = sorted_boxes(dt_boxes)
 
         # merge_det_boxes 和 update_det_boxes 都会把poly转成bbox再转回poly,因此需要过滤所有倾斜程度较大的文本框
-        dt_boxes = merge_det_boxes(dt_boxes)
+        if self.enable_merge_det_boxes:
+            dt_boxes = merge_det_boxes(dt_boxes)
 
         if mfd_res:
             dt_boxes = update_det_boxes(dt_boxes, mfd_res)

+ 76 - 0
mineru/model/ocr/paddleocr2pytorch/pytorchocr/utils/resources/arch_config.yaml

@@ -568,4 +568,80 @@ eslav_PP-OCRv5_rec_infer:
           nrtr_dim: 384
           max_text_length: 25
 
+el_PP-OCRv5_rec_infer:
+  model_type: rec
+  algorithm: SVTR_LCNet
+  Transform:
+  Backbone:
+    name: PPLCNetV3
+    scale: 0.95
+  Head:
+    name: MultiHead
+    out_channels_list:
+      CTCLabelDecode: 356
+    head_list:
+      - CTCHead:
+          Neck:
+            name: svtr
+            dims: 120
+            depth: 2
+            hidden_dims: 120
+            kernel_size: [ 1, 3 ]
+            use_guide: True
+          Head:
+            fc_decay: 0.00001
+      - NRTRHead:
+          nrtr_dim: 384
+          max_text_length: 25
 
+th_PP-OCRv5_rec_infer:
+  model_type: rec
+  algorithm: SVTR_LCNet
+  Transform:
+  Backbone:
+    name: PPLCNetV3
+    scale: 0.95
+  Head:
+    name: MultiHead
+    out_channels_list:
+      CTCLabelDecode: 526
+    head_list:
+      - CTCHead:
+          Neck:
+            name: svtr
+            dims: 120
+            depth: 2
+            hidden_dims: 120
+            kernel_size: [ 1, 3 ]
+            use_guide: True
+          Head:
+            fc_decay: 0.00001
+      - NRTRHead:
+          nrtr_dim: 384
+          max_text_length: 25
+
+en_PP-OCRv5_rec_infer:
+  model_type: rec
+  algorithm: SVTR_LCNet
+  Transform:
+  Backbone:
+    name: PPLCNetV3
+    scale: 0.95
+  Head:
+    name: MultiHead
+    out_channels_list:
+      CTCLabelDecode: 438
+    head_list:
+      - CTCHead:
+          Neck:
+            name: svtr
+            dims: 120
+            depth: 2
+            hidden_dims: 120
+            kernel_size: [ 1, 3 ]
+            use_guide: True
+          Head:
+            fc_decay: 0.00001
+      - NRTRHead:
+          nrtr_dim: 384
+          max_text_length: 25

+ 354 - 0
mineru/model/ocr/paddleocr2pytorch/pytorchocr/utils/resources/dict/ppocrv5_el_dict.txt

@@ -0,0 +1,354 @@
+!
+"
+#
+$
+%
+&
+'
+(
+)
+*
++
+,
+-
+.
+/
+0
+1
+2
+3
+4
+5
+6
+7
+8
+9
+:
+;
+<
+=
+>
+?
+@
+[
+]
+_
+`
+^
+~
+™
+¿
+‖
+‗
+‘
+’
+‚
+‛
+“
+”
+„
+†
+‡
+…
+‰
+′
+″
+‴
+‵
+‶
+‷
+‹
+›
+‼
+‽
+‾
+€
+₤
+₡
+₽
+₴
+₹
+₿
+∓
+≈
+≠
+≡
+≤
+≥
+∑
+∏
+∫
+∬
+∭
+∮
+∯
+∰
+√
+∛
+∜
+∝
+∞
+∂
+∇
+∴
+∵
+∷
+∠
+∡
+∢
+∟
+∦
+∩
+∪
+∗
+∖
+∥
+∧
+∨
+⊂
+⊃
+⊥
+∈
+∋
+∉
+∅
+↑
+→
+↓
+↔
+↕
+←
+⇒
+⇐
+⇔
+∀
+∃
+∄
+Ⅰ
+Ⅱ
+Ⅲ
+Ⅳ
+Ⅴ
+Ⅵ
+Ⅶ
+Ⅷ
+Ⅸ
+Ⅹ
+Ⅺ
+Ⅻ
+ⅰ
+ⅱ
+ⅲ
+ⅳ
+ⅴ
+ⅵ
+ⅶ
+ⅷ
+ⅸ
+ⅹ
+ⅺ
+ⅻ
+①
+②
+③
+④
+⑤
+⑥
+⑦
+⑧
+⑨
+⑩
+❶
+❷
+❸
+❹
+❺
+❻
+❼
+❽
+❾
+❿
+A
+B
+C
+D
+E
+F
+G
+H
+I
+J
+K
+L
+M
+N
+O
+P
+Q
+R
+S
+T
+U
+V
+W
+X
+Y
+Z
+a
+b
+c
+d
+e
+f
+g
+h
+i
+j
+k
+l
+m
+n
+o
+p
+q
+r
+s
+t
+u
+v
+w
+x
+y
+z
+ο

+ 436 - 0
mineru/model/ocr/paddleocr2pytorch/pytorchocr/utils/resources/dict/ppocrv5_en_dict.txt

@@ -0,0 +1,436 @@
+0
+1
+2
+3
+4
+5
+6
+7
+8
+9
+A
+B
+C
+D
+E
+F
+G
+H
+I
+J
+K
+L
+M
+N
+O
+P
+Q
+R
+S
+T
+U
+V
+W
+X
+Y
+Z
+a
+b
+c
+d
+e
+f
+g
+h
+i
+j
+k
+l
+m
+n
+o
+p
+q
+r
+s
+t
+u
+v
+w
+x
+y
+z
+!
+"
+#
+$
+%
+&
+'
+(
+)
+*
++
+,
+-
+.
+/
+:
+;
+<
+=
+>
+?
+@
+[
+\
+]
+_
+`
+{
+|
+}
+^
+~
+℉
+№
+Ω
+℮
+™
+∆
+✓
+✔
+✗
+✘
+✕
+☑
+☒
+●
+▪
+▫
+◼
+▶
+◀
+⬆
+¿
+‐
+‑
+‒
+—
+―
+‖
+‗
+‘
+’
+‚
+‛
+“
+”
+„
+‟
+†
+‡
+‣
+․
+…
+‧
+‰
+‴
+‵
+‶
+‷
+‸
+‹
+›
+※
+‼
+‽
+‾
+−
+₤
+₡
+₹
+₽
+₴
+₿
+€
+Ⅰ
+Ⅱ
+Ⅲ
+Ⅳ
+Ⅴ
+Ⅵ
+Ⅶ
+Ⅷ
+Ⅸ
+Ⅹ
+Ⅺ
+Ⅻ
+ⅰ
+ⅱ
+ⅲ
+ⅳ
+ⅴ
+ⅵ
+ⅶ
+ⅷ
+ⅸ
+ⅹ
+ⅺ
+ⅻ
+➀
+➁
+➂
+➃
+➄
+➅
+➆
+➇
+➈
+➉
+➊
+➋
+➌
+➍
+➎
+➏
+➐
+➑
+➒
+➓
+❶
+❷
+❸
+❹
+❺
+❻
+❼
+❽
+❾
+❿
+①
+②
+③
+④
+⑤
+⑥
+⑦
+⑧
+⑨
+⑩
+↑
+→
+↓
+↕
+←
+↔
+⇒
+⇐
+⇔
+∀
+∃
+∄
+∴
+∵
+∝
+∞
+∩
+∪
+∂
+∫
+∬
+∭
+∮
+∯
+∰
+∑
+∏
+√
+∛
+∜
+∱
+∲
+∳
+∶
+∷
+∼
+∖
+∗
+≈
+≠
+≡
+≤
+≥
+⊂
+⊃
+⊥
+⊾
+⊿
+□
+∥
+∋
+′
+″
+ÿ
+ο
+Å
+ℏ
+⌀
+⍺
+⍵
+𝑢
+𝜓
+०
+‥
+︽
+﹥
+•
+∕
+∙
+⋅
+∓
+∟
+∠
+∡
+∢
+℧
+☺

+ 524 - 0
mineru/model/ocr/paddleocr2pytorch/pytorchocr/utils/resources/dict/ppocrv5_th_dict.txt

@@ -0,0 +1,524 @@
+!
+"
+#
+$
+%
+&
+'
+(
+)
+*
++
+,
+-
+.
+/
+0
+1
+2
+3
+4
+5
+6
+7
+8
+9
+:
+;
+<
+=
+>
+?
+A
+B
+C
+D
+E
+F
+G
+H
+I
+J
+K
+L
+M
+N
+O
+P
+Q
+R
+S
+T
+U
+V
+W
+X
+Y
+Z
+[
+]
+_
+`
+a
+b
+c
+d
+e
+f
+g
+h
+i
+j
+k
+l
+m
+n
+o
+p
+q
+r
+s
+t
+u
+v
+w
+x
+y
+z
+‥
+{
+}
+\
+|
+@
+^
+~
+∕
+∙
+⋅
+∓
+∩
+∪
+□
+←
+↔
+⇒
+⇐
+⇔
+∀
+∃
+∄
+∴
+∵
+∝
+∞
+⊥
+∟
+∠
+∡
+∢
+′
+″
+∥
+⊾
+⊿
+∂
+∫
+∬
+∭
+∮
+∯
+∰
+∑
+∏
+√
+∛
+∜
+∱
+∲
+∳
+∶
+∷
+∼
+℉
+℧
+Å
+⌀
+ℏ
+⅀
+⍺
+⍵
+€
+₿
+Ⅰ
+Ⅱ
+Ⅲ
+Ⅳ
+Ⅴ
+Ⅵ
+Ⅶ
+Ⅷ
+Ⅸ
+Ⅹ
+Ⅺ
+Ⅻ
+ⅰ
+ⅱ
+ⅲ
+ⅳ
+ⅴ
+ⅵ
+ⅶ
+ⅷ
+ⅸ
+ⅹ
+ⅺ
+ⅻ
+➀
+➁
+➂
+➃
+➄
+➅
+➆
+➇
+➈
+➉
+➊
+➋
+➌
+➍
+➎
+➏
+➐
+➑
+➒
+➓
+❶
+❷
+❸
+❹
+❺
+❻
+❼
+❽
+❾
+❿
+①
+②
+③
+④
+⑤
+⑥
+⑦
+⑧
+⑨
+⑩
+●
+▶
+𝑢
+︽
+–
+﹥
+𝜓
+•
+∋
+०
+⬆
+Ạ
+◀
+
+▫
+︾
+ÿ
+¿
+‐
+‑
+‒
+—
+―
+‖
+‗
+‘
+’
+‚
+‛
+“
+”
+„
+‟
+†
+‡
+‣
+․
+…
+‧
+‰
+‴
+‵
+‶
+‷
+‸
+‹
+›
+※
+‼
+‽
+‾
+₤
+₡
+₹
+−
+∖
+∗
+≈
+≠
+≡
+≤
+≥
+⊂
+⊃
+↑
+→
+↓
+↕
+™
+Ω
+℮
+∆
+✓
+✗
+✘
+▪
+◼
+✔
+✕
+☑
+☒
+№
+₽
+₴
+ο
+ก
+ข
+ค
+ฅ
+ฆ
+ง
+จ
+ฉ
+ช
+ซ
+ฌ
+ญ
+ฎ
+ฏ
+ฐ
+ฑ
+ฒ
+ณ
+ด
+ต
+ถ
+ท
+ธ
+น
+บ
+ป
+ผ
+ฝ
+พ
+ฟ
+ภ
+ม
+ย
+ร
+ฤ
+ล
+ฦ
+ว
+ศ
+ษ
+ส
+ห
+ฬ
+อ
+ฮ
+ฯ
+ะ
+ั
+า
+ำ
+ิ
+ี
+ึ
+ื
+ุ
+ู
+ฺ
+฿
+เ
+แ
+โ
+ใ
+ไ
+ๅ
+ๆ
+็
+่
+้
+๊
+๋
+์
+ํ
+๐
+๑
+๒
+๓
+๔
+๕
+๖
+๗
+๘
+๙
+๚

+ 14 - 2
mineru/model/ocr/paddleocr2pytorch/pytorchocr/utils/resources/models_config.yml

@@ -19,7 +19,7 @@ lang:
     det: ch_PP-OCRv5_det_infer.pth
     rec: ch_PP-OCRv4_rec_server_doc_infer.pth
     dict: ppocrv4_doc_dict.txt
-  en:
+  en_v4:
     det: en_PP-OCRv3_det_infer.pth
     rec: en_PP-OCRv4_rec_infer.pth
     dict: en_dict.txt
@@ -66,4 +66,16 @@ lang:
   east_slavic:
     det: ch_PP-OCRv5_det_infer.pth
     rec: eslav_PP-OCRv5_rec_infer.pth
-    dict: ppocrv5_eslav_dict.txt
+    dict: ppocrv5_eslav_dict.txt
+  el:
+    det: ch_PP-OCRv5_det_infer.pth
+    rec: el_PP-OCRv5_rec_infer.pth
+    dict: ppocrv5_el_dict.txt
+  th:
+    det: ch_PP-OCRv5_det_infer.pth
+    rec: th_PP-OCRv5_rec_infer.pth
+    dict: ppocrv5_th_dict.txt
+  en:
+    det: ch_PP-OCRv5_det_infer.pth
+    rec: en_PP-OCRv5_rec_infer.pth
+    dict: ppocrv5_en_dict.txt

+ 2 - 2
mineru/model/ocr/paddleocr2pytorch/tools/infer/predict_rec.py

@@ -288,7 +288,7 @@ class TextRecognizer(BaseOCRV20):
 
         return img
 
-    def __call__(self, img_list, tqdm_enable=False):
+    def __call__(self, img_list, tqdm_enable=False, tqdm_desc="OCR-rec Predict"):
         img_num = len(img_list)
         # Calculate the aspect ratio of all text bars
         width_list = []
@@ -302,7 +302,7 @@ class TextRecognizer(BaseOCRV20):
         batch_num = self.rec_batch_num
         elapse = 0
         # for beg_img_no in range(0, img_num, batch_num):
-        with tqdm(total=img_num, desc='OCR-rec Predict', disable=not tqdm_enable) as pbar:
+        with tqdm(total=img_num, desc=tqdm_desc, disable=not tqdm_enable) as pbar:
             index = 0
             for beg_img_no in range(0, img_num, batch_num):
                 end_img_no = min(img_num, beg_img_no + batch_num)

+ 1 - 0
mineru/model/ori_cls/__init__.py

@@ -0,0 +1 @@
+# Copyright (c) Opendatalab. All rights reserved.

+ 279 - 0
mineru/model/ori_cls/paddle_ori_cls.py

@@ -0,0 +1,279 @@
+# Copyright (c) Opendatalab. All rights reserved.
+import os
+
+from PIL import Image
+from collections import defaultdict
+from typing import List, Dict
+from tqdm import tqdm
+import cv2
+import numpy as np
+import onnxruntime
+
+from mineru.utils.enum_class import ModelPath
+from mineru.utils.models_download_utils import auto_download_and_get_model_root_path
+
+
+class PaddleOrientationClsModel:
+    def __init__(self, ocr_engine):
+        self.sess = onnxruntime.InferenceSession(
+            os.path.join(auto_download_and_get_model_root_path(ModelPath.paddle_orientation_classification), ModelPath.paddle_orientation_classification)
+        )
+        self.ocr_engine = ocr_engine
+        self.less_length = 256
+        self.cw, self.ch = 224, 224
+        self.std = [0.229, 0.224, 0.225]
+        self.scale = 0.00392156862745098
+        self.mean = [0.485, 0.456, 0.406]
+        self.labels = ["0", "90", "180", "270"]
+
+    def preprocess(self, input_img):
+        # 放大图片,使其最短边长为256
+        h, w = input_img.shape[:2]
+        scale = 256 / min(h, w)
+        h_resize = round(h * scale)
+        w_resize = round(w * scale)
+        img = cv2.resize(input_img, (w_resize, h_resize), interpolation=1)
+        # 调整为224*224的正方形
+        h, w = img.shape[:2]
+        cw, ch = 224, 224
+        x1 = max(0, (w - cw) // 2)
+        y1 = max(0, (h - ch) // 2)
+        x2 = min(w, x1 + cw)
+        y2 = min(h, y1 + ch)
+        if w < cw or h < ch:
+            raise ValueError(
+                f"Input image ({w}, {h}) smaller than the target size ({cw}, {ch})."
+            )
+        img = img[y1:y2, x1:x2, ...]
+        # 正则化
+        split_im = list(cv2.split(img))
+        std = [0.229, 0.224, 0.225]
+        scale = 0.00392156862745098
+        mean = [0.485, 0.456, 0.406]
+        alpha = [scale / std[i] for i in range(len(std))]
+        beta = [-mean[i] / std[i] for i in range(len(std))]
+        for c in range(img.shape[2]):
+            split_im[c] = split_im[c].astype(np.float32)
+            split_im[c] *= alpha[c]
+            split_im[c] += beta[c]
+        img = cv2.merge(split_im)
+        # 5. 转换为 CHW 格式
+        img = img.transpose((2, 0, 1))
+        imgs = [img]
+        x = np.stack(imgs, axis=0).astype(dtype=np.float32, copy=False)
+        return x
+
+    def predict(self, input_img):
+        rotate_label = "0"  # Default to 0 if no rotation detected or not portrait
+        if isinstance(input_img, Image.Image):
+            np_img = np.asarray(input_img)
+        elif isinstance(input_img, np.ndarray):
+            np_img = input_img
+        else:
+            raise ValueError("Input must be a pillow object or a numpy array.")
+        bgr_image = cv2.cvtColor(np_img, cv2.COLOR_RGB2BGR)
+        # First check the overall image aspect ratio (height/width)
+        img_height, img_width = bgr_image.shape[:2]
+        img_aspect_ratio = img_height / img_width if img_width > 0 else 1.0
+        img_is_portrait = img_aspect_ratio > 1.2
+
+        if img_is_portrait:
+
+            det_res = self.ocr_engine.ocr(bgr_image, rec=False)[0]
+            # Check if table is rotated by analyzing text box aspect ratios
+            if det_res:
+                vertical_count = 0
+                is_rotated = False
+
+                for box_ocr_res in det_res:
+                    p1, p2, p3, p4 = box_ocr_res
+
+                    # Calculate width and height
+                    width = p3[0] - p1[0]
+                    height = p3[1] - p1[1]
+
+                    aspect_ratio = width / height if height > 0 else 1.0
+
+                    # Count vertical vs horizontal text boxes
+                    if aspect_ratio < 0.8:  # Taller than wide - vertical text
+                        vertical_count += 1
+                    # elif aspect_ratio > 1.2:  # Wider than tall - horizontal text
+                    #     horizontal_count += 1
+
+                if vertical_count >= len(det_res) * 0.28 and vertical_count >= 3:
+                    is_rotated = True
+                # logger.debug(f"Text orientation analysis: vertical={vertical_count}, det_res={len(det_res)}, rotated={is_rotated}")
+
+                # If we have more vertical text boxes than horizontal ones,
+                # and vertical ones are significant, table might be rotated
+                if is_rotated:
+                    x = self.preprocess(np_img)
+                    (result,) = self.sess.run(None, {"x": x})
+                    rotate_label = self.labels[np.argmax(result)]
+                    # logger.debug(f"Orientation classification result: {label}")
+
+        return rotate_label
+
+    def list_2_batch(self, img_list, batch_size=16):
+        """
+        将任意长度的列表按照指定的batch size分成多个batch
+
+        Args:
+            img_list: 输入的列表
+            batch_size: 每个batch的大小,默认为16
+
+        Returns:
+            一个包含多个batch的列表,每个batch都是原列表的一个子列表
+        """
+        batches = []
+        for i in range(0, len(img_list), batch_size):
+            batch = img_list[i : min(i + batch_size, len(img_list))]
+            batches.append(batch)
+        return batches
+
+    def batch_preprocess(self, imgs):
+        res_imgs = []
+        for img_info in imgs:
+            img = np.asarray(img_info["table_img"])
+            # 放大图片,使其最短边长为256
+            h, w = img.shape[:2]
+            scale = 256 / min(h, w)
+            h_resize = round(h * scale)
+            w_resize = round(w * scale)
+            img = cv2.resize(img, (w_resize, h_resize), interpolation=1)
+            # 调整为224*224的正方形
+            h, w = img.shape[:2]
+            cw, ch = 224, 224
+            x1 = max(0, (w - cw) // 2)
+            y1 = max(0, (h - ch) // 2)
+            x2 = min(w, x1 + cw)
+            y2 = min(h, y1 + ch)
+            if w < cw or h < ch:
+                raise ValueError(
+                    f"Input image ({w}, {h}) smaller than the target size ({cw}, {ch})."
+                )
+            img = img[y1:y2, x1:x2, ...]
+            # 正则化
+            split_im = list(cv2.split(img))
+            std = [0.229, 0.224, 0.225]
+            scale = 0.00392156862745098
+            mean = [0.485, 0.456, 0.406]
+            alpha = [scale / std[i] for i in range(len(std))]
+            beta = [-mean[i] / std[i] for i in range(len(std))]
+            for c in range(img.shape[2]):
+                split_im[c] = split_im[c].astype(np.float32)
+                split_im[c] *= alpha[c]
+                split_im[c] += beta[c]
+            img = cv2.merge(split_im)
+            # 5. 转换为 CHW 格式
+            img = img.transpose((2, 0, 1))
+            res_imgs.append(img)
+        x = np.stack(res_imgs, axis=0).astype(dtype=np.float32, copy=False)
+        return x
+
+    def batch_predict(
+        self, imgs: List[Dict], det_batch_size: int, batch_size: int = 16
+    ) -> None:
+
+        import torch
+        from packaging import version
+        if version.parse(torch.__version__) >= version.parse("2.8.0"):
+            return None
+
+        """
+        批量预测传入的包含图片信息列表的旋转信息,并且将旋转过的图片正确地旋转回来
+        """
+        RESOLUTION_GROUP_STRIDE = 128
+        # 跳过长宽比小于1.2的图片
+        resolution_groups = defaultdict(list)
+        for img in imgs:
+            # RGB图像转换BGR
+            bgr_img: np.ndarray = cv2.cvtColor(np.asarray(img["table_img"]), cv2.COLOR_RGB2BGR)
+            img["table_img_bgr"] = bgr_img
+            img_height, img_width = bgr_img.shape[:2]
+            img_aspect_ratio = img_height / img_width if img_width > 0 else 1.0
+            if img_aspect_ratio > 1.2:
+                # 归一化尺寸到RESOLUTION_GROUP_STRIDE的倍数
+                normalized_h = ((img_height + RESOLUTION_GROUP_STRIDE) // RESOLUTION_GROUP_STRIDE) * RESOLUTION_GROUP_STRIDE  # 向上取整到RESOLUTION_GROUP_STRIDE的倍数
+                normalized_w = ((img_width + RESOLUTION_GROUP_STRIDE) // RESOLUTION_GROUP_STRIDE) * RESOLUTION_GROUP_STRIDE
+                group_key = (normalized_h, normalized_w)
+                resolution_groups[group_key].append(img)
+
+        # 对每个分辨率组进行批处理
+        rotated_imgs = []
+        for group_key, group_imgs in tqdm(resolution_groups.items(), desc="Table-ori cls stage1 predict", disable=True):
+            # 计算目标尺寸(组内最大尺寸,向上取整到RESOLUTION_GROUP_STRIDE的倍数)
+            max_h = max(img["table_img_bgr"].shape[0] for img in group_imgs)
+            max_w = max(img["table_img_bgr"].shape[1] for img in group_imgs)
+            target_h = ((max_h + RESOLUTION_GROUP_STRIDE - 1) // RESOLUTION_GROUP_STRIDE) * RESOLUTION_GROUP_STRIDE
+            target_w = ((max_w + RESOLUTION_GROUP_STRIDE - 1) // RESOLUTION_GROUP_STRIDE) * RESOLUTION_GROUP_STRIDE
+
+            # 对所有图像进行padding到统一尺寸
+            batch_images = []
+            for img in group_imgs:
+                bgr_img = img["table_img_bgr"]
+                h, w = bgr_img.shape[:2]
+                # 创建目标尺寸的白色背景
+                padded_img = np.ones((target_h, target_w, 3), dtype=np.uint8) * 255
+                # 将原图像粘贴到左上角
+                padded_img[:h, :w] = bgr_img
+                batch_images.append(padded_img)
+
+            # 批处理检测
+            batch_results = self.ocr_engine.text_detector.batch_predict(
+                batch_images, min(len(batch_images), det_batch_size)
+            )
+
+            # 根据批处理结果检测图像是否旋转,旋转的图像放入列表中,继续进行旋转角度的预测
+
+            for index, (img_info, (dt_boxes, elapse)) in enumerate(
+                zip(group_imgs, batch_results)
+            ):
+                vertical_count = 0
+                for box_ocr_res in dt_boxes:
+                    p1, p2, p3, p4 = box_ocr_res
+
+                    # Calculate width and height
+                    width = p3[0] - p1[0]
+                    height = p3[1] - p1[1]
+
+                    aspect_ratio = width / height if height > 0 else 1.0
+
+                    # Count vertical text boxes
+                    if aspect_ratio < 0.8:  # Taller than wide - vertical text
+                        vertical_count += 1
+
+                if vertical_count >= len(dt_boxes) * 0.28 and vertical_count >= 3:
+                    rotated_imgs.append(img_info)
+
+        # 对旋转的图片进行旋转角度预测
+        if len(rotated_imgs) > 0:
+            imgs = self.list_2_batch(rotated_imgs, batch_size=batch_size)
+            with tqdm(total=len(rotated_imgs), desc="Table-ori cls stage2 predict", disable=True) as pbar:
+                for img_batch in imgs:
+                    x = self.batch_preprocess(img_batch)
+                    results = self.sess.run(None, {"x": x})
+                    for img_info, res in zip(rotated_imgs, results[0]):
+                        label = self.labels[np.argmax(res)]
+                        if label == "270":
+                            img_info["table_img"] = cv2.rotate(
+                                np.asarray(img_info["table_img"]),
+                                cv2.ROTATE_90_CLOCKWISE,
+                            )
+                            img_info["wired_table_img"] = cv2.rotate(
+                                np.asarray(img_info["wired_table_img"]),
+                                cv2.ROTATE_90_CLOCKWISE,
+                            )
+                        elif label == "90":
+                            img_info["table_img"] = cv2.rotate(
+                                np.asarray(img_info["table_img"]),
+                                cv2.ROTATE_90_COUNTERCLOCKWISE,
+                            )
+                            img_info["wired_table_img"] = cv2.rotate(
+                                np.asarray(img_info["wired_table_img"]),
+                                cv2.ROTATE_90_COUNTERCLOCKWISE,
+                            )
+                        else:
+                            # 180度和0度不做处理
+                            pass
+                        pbar.update(1)

+ 1 - 0
mineru/model/table/cls/__init__.py

@@ -0,0 +1 @@
+# Copyright (c) Opendatalab. All rights reserved.

+ 148 - 0
mineru/model/table/cls/paddle_table_cls.py

@@ -0,0 +1,148 @@
+import os
+
+from PIL import Image
+import cv2
+import numpy as np
+import onnxruntime
+from loguru import logger
+from tqdm import tqdm
+
+from mineru.backend.pipeline.model_list import AtomicModel
+from mineru.utils.enum_class import ModelPath
+from mineru.utils.models_download_utils import auto_download_and_get_model_root_path
+
+
+class PaddleTableClsModel:
+    def __init__(self):
+        self.sess = onnxruntime.InferenceSession(
+            os.path.join(auto_download_and_get_model_root_path(ModelPath.paddle_table_cls), ModelPath.paddle_table_cls)
+        )
+        self.less_length = 256
+        self.cw, self.ch = 224, 224
+        self.std = [0.229, 0.224, 0.225]
+        self.scale = 0.00392156862745098
+        self.mean = [0.485, 0.456, 0.406]
+        self.labels = [AtomicModel.WiredTable, AtomicModel.WirelessTable]
+
+    def preprocess(self, input_img):
+        # 放大图片,使其最短边长为256
+        h, w = input_img.shape[:2]
+        scale = 256 / min(h, w)
+        h_resize = round(h * scale)
+        w_resize = round(w * scale)
+        img = cv2.resize(input_img, (w_resize, h_resize), interpolation=1)
+        # 调整为224*224的正方形
+        h, w = img.shape[:2]
+        cw, ch = 224, 224
+        x1 = max(0, (w - cw) // 2)
+        y1 = max(0, (h - ch) // 2)
+        x2 = min(w, x1 + cw)
+        y2 = min(h, y1 + ch)
+        if w < cw or h < ch:
+            raise ValueError(
+                f"Input image ({w}, {h}) smaller than the target size ({cw}, {ch})."
+            )
+        img = img[y1:y2, x1:x2, ...]
+        # 正则化
+        split_im = list(cv2.split(img))
+        std = [0.229, 0.224, 0.225]
+        scale = 0.00392156862745098
+        mean = [0.485, 0.456, 0.406]
+        alpha = [scale / std[i] for i in range(len(std))]
+        beta = [-mean[i] / std[i] for i in range(len(std))]
+        for c in range(img.shape[2]):
+            split_im[c] = split_im[c].astype(np.float32)
+            split_im[c] *= alpha[c]
+            split_im[c] += beta[c]
+        img = cv2.merge(split_im)
+        # 5. 转换为 CHW 格式
+        img = img.transpose((2, 0, 1))
+        imgs = [img]
+        x = np.stack(imgs, axis=0).astype(dtype=np.float32, copy=False)
+        return x
+
+    def predict(self, input_img):
+        if isinstance(input_img, Image.Image):
+            np_img = np.asarray(input_img)
+        elif isinstance(input_img, np.ndarray):
+            np_img = input_img
+        else:
+            raise ValueError("Input must be a pillow object or a numpy array.")
+        x = self.preprocess(np_img)
+        result = self.sess.run(None, {"x": x})
+        idx = np.argmax(result)
+        conf = float(np.max(result))
+        return self.labels[idx], conf
+
+    def list_2_batch(self, img_list, batch_size=16):
+        """
+        将任意长度的列表按照指定的batch size分成多个batch
+
+        Args:
+            img_list: 输入的列表
+            batch_size: 每个batch的大小,默认为16
+
+        Returns:
+            一个包含多个batch的列表,每个batch都是原列表的一个子列表
+        """
+        batches = []
+        for i in range(0, len(img_list), batch_size):
+            batch = img_list[i : min(i + batch_size, len(img_list))]
+            batches.append(batch)
+        return batches
+
+    def batch_preprocess(self, imgs):
+        res_imgs = []
+        for img in imgs:
+            img = np.asarray(img)
+            # 放大图片,使其最短边长为256
+            h, w = img.shape[:2]
+            scale = 256 / min(h, w)
+            h_resize = round(h * scale)
+            w_resize = round(w * scale)
+            img = cv2.resize(img, (w_resize, h_resize), interpolation=1)
+            # 调整为224*224的正方形
+            h, w = img.shape[:2]
+            cw, ch = 224, 224
+            x1 = max(0, (w - cw) // 2)
+            y1 = max(0, (h - ch) // 2)
+            x2 = min(w, x1 + cw)
+            y2 = min(h, y1 + ch)
+            if w < cw or h < ch:
+                raise ValueError(
+                    f"Input image ({w}, {h}) smaller than the target size ({cw}, {ch})."
+                )
+            img = img[y1:y2, x1:x2, ...]
+            # 正则化
+            split_im = list(cv2.split(img))
+            std = [0.229, 0.224, 0.225]
+            scale = 0.00392156862745098
+            mean = [0.485, 0.456, 0.406]
+            alpha = [scale / std[i] for i in range(len(std))]
+            beta = [-mean[i] / std[i] for i in range(len(std))]
+            for c in range(img.shape[2]):
+                split_im[c] = split_im[c].astype(np.float32)
+                split_im[c] *= alpha[c]
+                split_im[c] += beta[c]
+            img = cv2.merge(split_im)
+            # 5. 转换为 CHW 格式
+            img = img.transpose((2, 0, 1))
+            res_imgs.append(img)
+        x = np.stack(res_imgs, axis=0).astype(dtype=np.float32, copy=False)
+        return x
+    def batch_predict(self, img_info_list, batch_size=16):
+        imgs = [item["wired_table_img"] for item in img_info_list]
+        imgs = self.list_2_batch(imgs, batch_size=batch_size)
+        label_res = []
+        with tqdm(total=len(img_info_list), desc="Table-wired/wireless cls predict", disable=True) as pbar:
+            for img_batch in imgs:
+                x = self.batch_preprocess(img_batch)
+                result = self.sess.run(None, {"x": x})
+                for img_res in result[0]:
+                    idx = np.argmax(img_res)
+                    conf = float(np.max(img_res))
+                    label_res.append((self.labels[idx],conf))
+                pbar.update(len(img_batch))
+            for img_info, (label, conf) in zip(img_info_list, label_res):
+                img_info['table_res']["cls_label"] = label
+                img_info['table_res']["cls_score"] = round(conf, 3)

+ 0 - 89
mineru/model/table/rapid_table.py

@@ -1,89 +0,0 @@
-import os
-import html
-import cv2
-import numpy as np
-from loguru import logger
-from rapid_table import RapidTable, RapidTableInput
-
-from mineru.utils.enum_class import ModelPath
-from mineru.utils.models_download_utils import auto_download_and_get_model_root_path
-
-
-def escape_html(input_string):
-    """Escape HTML Entities."""
-    return html.escape(input_string)
-
-
-class RapidTableModel(object):
-    def __init__(self, ocr_engine):
-        slanet_plus_model_path = os.path.join(auto_download_and_get_model_root_path(ModelPath.slanet_plus), ModelPath.slanet_plus)
-        input_args = RapidTableInput(model_type='slanet_plus', model_path=slanet_plus_model_path)
-        self.table_model = RapidTable(input_args)
-        self.ocr_engine = ocr_engine
-
-
-    def predict(self, image):
-        bgr_image = cv2.cvtColor(np.asarray(image), cv2.COLOR_RGB2BGR)
-
-        # First check the overall image aspect ratio (height/width)
-        img_height, img_width = bgr_image.shape[:2]
-        img_aspect_ratio = img_height / img_width if img_width > 0 else 1.0
-        img_is_portrait = img_aspect_ratio > 1.2
-
-        if img_is_portrait:
-
-            det_res = self.ocr_engine.ocr(bgr_image, rec=False)[0]
-            # Check if table is rotated by analyzing text box aspect ratios
-            is_rotated = False
-            if det_res:
-                vertical_count = 0
-
-                for box_ocr_res in det_res:
-                    p1, p2, p3, p4 = box_ocr_res
-
-                    # Calculate width and height
-                    width = p3[0] - p1[0]
-                    height = p3[1] - p1[1]
-
-                    aspect_ratio = width / height if height > 0 else 1.0
-
-                    # Count vertical vs horizontal text boxes
-                    if aspect_ratio < 0.8:  # Taller than wide - vertical text
-                        vertical_count += 1
-                    # elif aspect_ratio > 1.2:  # Wider than tall - horizontal text
-                    #     horizontal_count += 1
-
-                # If we have more vertical text boxes than horizontal ones,
-                # and vertical ones are significant, table might be rotated
-                if vertical_count >= len(det_res) * 0.3:
-                    is_rotated = True
-
-                # logger.debug(f"Text orientation analysis: vertical={vertical_count}, det_res={len(det_res)}, rotated={is_rotated}")
-
-            # Rotate image if necessary
-            if is_rotated:
-                # logger.debug("Table appears to be in portrait orientation, rotating 90 degrees clockwise")
-                image = cv2.rotate(np.asarray(image), cv2.ROTATE_90_CLOCKWISE)
-                bgr_image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
-
-        # Continue with OCR on potentially rotated image
-        ocr_result = self.ocr_engine.ocr(bgr_image)[0]
-        if ocr_result:
-            ocr_result = [[item[0], escape_html(item[1][0]), item[1][1]] for item in ocr_result if
-                      len(item) == 2 and isinstance(item[1], tuple)]
-        else:
-            ocr_result = None
-
-
-        if ocr_result:
-            try:
-                table_results = self.table_model(np.asarray(image), ocr_result)
-                html_code = table_results.pred_html
-                table_cell_bboxes = table_results.cell_bboxes
-                logic_points = table_results.logic_points
-                elapse = table_results.elapse
-                return html_code, table_cell_bboxes, logic_points, elapse
-            except Exception as e:
-                logger.exception(e)
-
-        return None, None, None, None

+ 154 - 0
mineru/model/table/rec/RapidTable.py

@@ -0,0 +1,154 @@
+import html
+import os
+import time
+from pathlib import Path
+from typing import List
+
+import cv2
+import numpy as np
+from loguru import logger
+from rapid_table import ModelType, RapidTable, RapidTableInput
+from rapid_table.utils import RapidTableOutput
+from tqdm import tqdm
+
+from mineru.model.ocr.paddleocr2pytorch.pytorch_paddle import PytorchPaddleOCR
+from mineru.utils.enum_class import ModelPath
+from mineru.utils.models_download_utils import auto_download_and_get_model_root_path
+
+
+def escape_html(input_string):
+    """Escape HTML Entities."""
+    return html.escape(input_string)
+
+
+class CustomRapidTable(RapidTable):
+    def __init__(self, cfg: RapidTableInput):
+        import logging
+        # 通过环境变量控制日志级别
+        logging.disable(logging.INFO)
+        super().__init__(cfg)
+    def __call__(self, img_contents, ocr_results=None, batch_size=1):
+        if not isinstance(img_contents, list):
+            img_contents = [img_contents]
+
+        s = time.perf_counter()
+
+        results = RapidTableOutput()
+
+        total_nums = len(img_contents)
+
+        with tqdm(total=total_nums, desc="Table-wireless Predict") as pbar:
+            for start_i in range(0, total_nums, batch_size):
+                end_i = min(total_nums, start_i + batch_size)
+
+                imgs = self._load_imgs(img_contents[start_i:end_i])
+
+                pred_structures, cell_bboxes = self.table_structure(imgs)
+                logic_points = self.table_matcher.decode_logic_points(pred_structures)
+
+                dt_boxes, rec_res = self.get_ocr_results(imgs, start_i, end_i, ocr_results)
+                pred_htmls = self.table_matcher(
+                    pred_structures, cell_bboxes, dt_boxes, rec_res
+                )
+
+                results.pred_htmls.extend(pred_htmls)
+                # 更新进度条
+                pbar.update(end_i - start_i)
+
+        elapse = time.perf_counter() - s
+        results.elapse = elapse / total_nums
+        return results
+
+
+class RapidTableModel():
+    def __init__(self, ocr_engine):
+        slanet_plus_model_path = os.path.join(
+            auto_download_and_get_model_root_path(ModelPath.slanet_plus),
+            ModelPath.slanet_plus,
+        )
+        input_args = RapidTableInput(
+            model_type=ModelType.SLANETPLUS,
+            model_dir_or_path=slanet_plus_model_path,
+            use_ocr=False
+        )
+        self.table_model = CustomRapidTable(input_args)
+        self.ocr_engine = ocr_engine
+
+    def predict(self, image, ocr_result=None):
+        bgr_image = cv2.cvtColor(np.asarray(image), cv2.COLOR_RGB2BGR)
+        # Continue with OCR on potentially rotated image
+
+        if not ocr_result:
+            raw_ocr_result = self.ocr_engine.ocr(bgr_image)[0]
+            # 分离边界框、文本和置信度
+            boxes = []
+            texts = []
+            scores = []
+            for item in raw_ocr_result:
+                if len(item) == 3:
+                    boxes.append(item[0])
+                    texts.append(escape_html(item[1]))
+                    scores.append(item[2])
+                elif len(item) == 2 and isinstance(item[1], tuple):
+                    boxes.append(item[0])
+                    texts.append(escape_html(item[1][0]))
+                    scores.append(item[1][1])
+            # 按照 rapid_table 期望的格式构建 ocr_results
+            ocr_result = [(boxes, texts, scores)]
+
+        if ocr_result:
+            try:
+                table_results = self.table_model(img_contents=np.asarray(image), ocr_results=ocr_result)
+                html_code = table_results.pred_htmls
+                table_cell_bboxes = table_results.cell_bboxes
+                logic_points = table_results.logic_points
+                elapse = table_results.elapse
+                return html_code, table_cell_bboxes, logic_points, elapse
+            except Exception as e:
+                logger.exception(e)
+
+        return None, None, None, None
+
+    def batch_predict(self, table_res_list: List[dict], batch_size: int = 4):
+        not_none_table_res_list = []
+        for table_res in table_res_list:
+            if table_res.get("ocr_result", None):
+                not_none_table_res_list.append(table_res)
+
+        if not_none_table_res_list:
+            img_contents = [table_res["table_img"] for table_res in not_none_table_res_list]
+            ocr_results = []
+            # ocr_results需要按照rapid_table期望的格式构建
+            for table_res in not_none_table_res_list:
+                raw_ocr_result = table_res["ocr_result"]
+                boxes = []
+                texts = []
+                scores = []
+                for item in raw_ocr_result:
+                    if len(item) == 3:
+                        boxes.append(item[0])
+                        texts.append(escape_html(item[1]))
+                        scores.append(item[2])
+                    elif len(item) == 2 and isinstance(item[1], tuple):
+                        boxes.append(item[0])
+                        texts.append(escape_html(item[1][0]))
+                        scores.append(item[1][1])
+                ocr_results.append((boxes, texts, scores))
+            table_results = self.table_model(img_contents=img_contents, ocr_results=ocr_results, batch_size=batch_size)
+
+            for i, result in enumerate(table_results.pred_htmls):
+                if result:
+                    not_none_table_res_list[i]['table_res']['html'] = result
+
+if __name__ == '__main__':
+    ocr_engine= PytorchPaddleOCR(
+            det_db_box_thresh=0.5,
+            det_db_unclip_ratio=1.6,
+            enable_merge_det_boxes=False,
+    )
+    table_model = RapidTableModel(ocr_engine)
+    img_path = Path(r"D:\project\20240729ocrtest\pythonProject\images\601c939cc6dabaf07af763e2f935f54896d0251f37cc47beb7fc6b069353455d.jpg")
+    image = cv2.imread(str(img_path))
+    html_code, table_cell_bboxes, logic_points, elapse = table_model.predict(image)
+    print(html_code)
+

+ 1 - 0
mineru/model/table/rec/__init__.py

@@ -0,0 +1 @@
+# Copyright (c) Opendatalab. All rights reserved.

+ 0 - 0
mineru/model/table/rec/slanet_plus/__init__.py


+ 212 - 0
mineru/model/table/rec/slanet_plus/main.py

@@ -0,0 +1,212 @@
+import os
+import copy
+import time
+import html
+from dataclasses import asdict, dataclass
+from pathlib import Path
+from typing import Dict, List, Optional, Tuple, Union
+
+import cv2
+import numpy as np
+from loguru import logger
+from tqdm import tqdm
+
+from .matcher import TableMatch
+from .table_structure import TableStructurer
+from mineru.utils.enum_class import ModelPath
+from mineru.utils.models_download_utils import auto_download_and_get_model_root_path
+
+
+@dataclass
+class RapidTableInput:
+    model_type: Optional[str] = "slanet_plus"
+    model_path: Union[str, Path, None, Dict[str, str]] = None
+    use_cuda: bool = False
+    device: str = "cpu"
+
+
+@dataclass
+class RapidTableOutput:
+    pred_html: Optional[str] = None
+    cell_bboxes: Optional[np.ndarray] = None
+    logic_points: Optional[np.ndarray] = None
+    elapse: Optional[float] = None
+
+
+class RapidTable:
+    def __init__(self, config: RapidTableInput):
+        self.table_structure = TableStructurer(asdict(config))
+        self.table_matcher = TableMatch()
+
+    def predict(
+        self,
+        img: np.ndarray,
+        ocr_result: List[Union[List[List[float]], str, str]] = None,
+    ) -> RapidTableOutput:
+        if ocr_result is None:
+            raise ValueError("OCR result is None")
+
+        s = time.perf_counter()
+        h, w = img.shape[:2]
+
+        dt_boxes, rec_res = self.get_boxes_recs(ocr_result, h, w)
+
+        pred_structures, cell_bboxes, _ = self.table_structure.process(
+            copy.deepcopy(img)
+        )
+
+        # 适配slanet-plus模型输出的box缩放还原
+        cell_bboxes = self.adapt_slanet_plus(img, cell_bboxes)
+
+        pred_html = self.table_matcher(pred_structures, cell_bboxes, dt_boxes, rec_res)
+
+        # 过滤掉占位的bbox
+        mask = ~np.all(cell_bboxes == 0, axis=1)
+        cell_bboxes = cell_bboxes[mask]
+
+        logic_points = self.table_matcher.decode_logic_points(pred_structures)
+        elapse = time.perf_counter() - s
+        return RapidTableOutput(pred_html, cell_bboxes, logic_points, elapse)
+
+    def batch_predict(
+        self,
+        images: List[np.ndarray],
+        ocr_results: List[List[Union[List[List[float]], str, str]]],
+        batch_size: int = 4,
+    ) -> List[RapidTableOutput]:
+        """批量处理图像"""
+        s = time.perf_counter()
+
+        batch_dt_boxes = []
+        batch_rec_res = []
+
+        for i, img in enumerate(images):
+            h, w = img.shape[:2]
+            dt_boxes, rec_res = self.get_boxes_recs(ocr_results[i], h, w)
+            batch_dt_boxes.append(dt_boxes)
+            batch_rec_res.append(rec_res)
+
+        # 批量表格结构识别
+        batch_results = self.table_structure.batch_process(images)
+
+        output_results = []
+        for i, (img, ocr_result, (pred_structures, cell_bboxes, _)) in enumerate(
+            zip(images, ocr_results, batch_results)
+        ):
+            # 适配slanet-plus模型输出的box缩放还原
+            cell_bboxes = self.adapt_slanet_plus(img, cell_bboxes)
+            pred_html = self.table_matcher(
+                pred_structures, cell_bboxes, batch_dt_boxes[i], batch_rec_res[i]
+            )
+            # 过滤掉占位的bbox
+            mask = ~np.all(cell_bboxes == 0, axis=1)
+            cell_bboxes = cell_bboxes[mask]
+
+            logic_points = self.table_matcher.decode_logic_points(pred_structures)
+            result = RapidTableOutput(pred_html, cell_bboxes, logic_points, 0)
+            output_results.append(result)
+
+        total_elapse = time.perf_counter() - s
+        for result in output_results:
+            result.elapse = total_elapse / len(output_results)
+
+        return output_results
+
+    def get_boxes_recs(
+        self, ocr_result: List[Union[List[List[float]], str, str]], h: int, w: int
+    ) -> Tuple[np.ndarray, Tuple[str, str]]:
+        dt_boxes, rec_res, scores = list(zip(*ocr_result))
+        rec_res = list(zip(rec_res, scores))
+
+        r_boxes = []
+        for box in dt_boxes:
+            box = np.array(box)
+            x_min = max(0, box[:, 0].min() - 1)
+            x_max = min(w, box[:, 0].max() + 1)
+            y_min = max(0, box[:, 1].min() - 1)
+            y_max = min(h, box[:, 1].max() + 1)
+            box = [x_min, y_min, x_max, y_max]
+            r_boxes.append(box)
+        dt_boxes = np.array(r_boxes)
+        return dt_boxes, rec_res
+
+    def adapt_slanet_plus(self, img: np.ndarray, cell_bboxes: np.ndarray) -> np.ndarray:
+        h, w = img.shape[:2]
+        resized = 488
+        ratio = min(resized / h, resized / w)
+        w_ratio = resized / (w * ratio)
+        h_ratio = resized / (h * ratio)
+        cell_bboxes[:, 0::2] *= w_ratio
+        cell_bboxes[:, 1::2] *= h_ratio
+        return cell_bboxes
+
+
+def escape_html(input_string):
+    """Escape HTML Entities."""
+    return html.escape(input_string)
+
+
+class RapidTableModel(object):
+    def __init__(self, ocr_engine):
+        slanet_plus_model_path = os.path.join(
+            auto_download_and_get_model_root_path(ModelPath.slanet_plus),
+            ModelPath.slanet_plus,
+        )
+        input_args = RapidTableInput(
+            model_type="slanet_plus", model_path=slanet_plus_model_path
+        )
+        self.table_model = RapidTable(input_args)
+        self.ocr_engine = ocr_engine
+
+    def predict(self, image, ocr_result=None):
+        bgr_image = cv2.cvtColor(np.asarray(image), cv2.COLOR_RGB2BGR)
+        # Continue with OCR on potentially rotated image
+
+        if not ocr_result:
+            ocr_result = self.ocr_engine.ocr(bgr_image)[0]
+            ocr_result = [
+                [item[0], escape_html(item[1][0]), item[1][1]]
+                for item in ocr_result
+                if len(item) == 2 and isinstance(item[1], tuple)
+            ]
+
+        if ocr_result:
+            try:
+                table_results = self.table_model.predict(np.asarray(image), ocr_result)
+                html_code = table_results.pred_html
+                table_cell_bboxes = table_results.cell_bboxes
+                logic_points = table_results.logic_points
+                elapse = table_results.elapse
+                return html_code, table_cell_bboxes, logic_points, elapse
+            except Exception as e:
+                logger.exception(e)
+
+        return None, None, None, None
+
+    def batch_predict(self, table_res_list: List[Dict], batch_size: int = 4) -> None:
+        """对传入的字典列表进行批量预测,无返回值"""
+
+        not_none_table_res_list = []
+        for table_res in table_res_list:
+            if table_res.get("ocr_result", None):
+                not_none_table_res_list.append(table_res)
+
+        with tqdm(total=len(not_none_table_res_list), desc="Table-wireless Predict") as pbar:
+            for index in range(0, len(not_none_table_res_list), batch_size):
+                batch_imgs = [
+                    cv2.cvtColor(np.asarray(not_none_table_res_list[i]["table_img"]), cv2.COLOR_RGB2BGR)
+                    for i in range(index, min(index + batch_size, len(not_none_table_res_list)))
+                ]
+                batch_ocrs = [
+                    not_none_table_res_list[i]["ocr_result"]
+                    for i in range(index, min(index + batch_size, len(not_none_table_res_list)))
+                ]
+                results = self.table_model.batch_predict(
+                    batch_imgs, batch_ocrs, batch_size=batch_size
+                )
+                for i, result in enumerate(results):
+                    if result.pred_html:
+                        not_none_table_res_list[index + i]['table_res']['html'] = result.pred_html
+
+                # 更新进度条
+                pbar.update(len(results))

+ 198 - 0
mineru/model/table/rec/slanet_plus/matcher.py

@@ -0,0 +1,198 @@
+# copyright (c) 2022 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import numpy as np
+
+from .matcher_utils import compute_iou, distance
+
+
+class TableMatch:
+    def __init__(self, filter_ocr_result=True, use_master=False):
+        self.filter_ocr_result = filter_ocr_result
+        self.use_master = use_master
+
+    def __call__(self, pred_structures, cell_bboxes, dt_boxes, rec_res):
+        if self.filter_ocr_result:
+            dt_boxes, rec_res = self._filter_ocr_result(cell_bboxes, dt_boxes, rec_res)
+        matched_index = self.match_result(dt_boxes, cell_bboxes)
+        pred_html, pred = self.get_pred_html(pred_structures, matched_index, rec_res)
+        return pred_html
+
+    def match_result(self, dt_boxes, cell_bboxes, min_iou=0.1**8):
+        matched = {}
+        for i, gt_box in enumerate(dt_boxes):
+            distances = []
+            for j, pred_box in enumerate(cell_bboxes):
+                if len(pred_box) == 8:
+                    pred_box = [
+                        np.min(pred_box[0::2]),
+                        np.min(pred_box[1::2]),
+                        np.max(pred_box[0::2]),
+                        np.max(pred_box[1::2]),
+                    ]
+                distances.append(
+                    (distance(gt_box, pred_box), 1.0 - compute_iou(gt_box, pred_box))
+                )  # compute iou and l1 distance
+            sorted_distances = distances.copy()
+            # select det box by iou and l1 distance
+            sorted_distances = sorted(
+                sorted_distances, key=lambda item: (item[1], item[0])
+            )
+            # must > min_iou
+            if sorted_distances[0][1] >= 1 - min_iou:
+                continue
+
+            if distances.index(sorted_distances[0]) not in matched:
+                matched[distances.index(sorted_distances[0])] = [i]
+            else:
+                matched[distances.index(sorted_distances[0])].append(i)
+        return matched
+
+    def get_pred_html(self, pred_structures, matched_index, ocr_contents):
+        end_html = []
+        td_index = 0
+        for tag in pred_structures:
+            if "</td>" not in tag:
+                end_html.append(tag)
+                continue
+
+            if "<td></td>" == tag:
+                end_html.extend("<td>")
+
+            if td_index in matched_index.keys():
+                b_with = False
+                if (
+                    "<b>" in ocr_contents[matched_index[td_index][0]]
+                    and len(matched_index[td_index]) > 1
+                ):
+                    b_with = True
+                    end_html.extend("<b>")
+
+                for i, td_index_index in enumerate(matched_index[td_index]):
+                    content = ocr_contents[td_index_index][0]
+                    if len(matched_index[td_index]) > 1:
+                        if len(content) == 0:
+                            continue
+
+                        if content[0] == " ":
+                            content = content[1:]
+
+                        if "<b>" in content:
+                            content = content[3:]
+
+                        if "</b>" in content:
+                            content = content[:-4]
+
+                        if len(content) == 0:
+                            continue
+
+                        if i != len(matched_index[td_index]) - 1 and " " != content[-1]:
+                            content += " "
+                    end_html.extend(content)
+
+                if b_with:
+                    end_html.extend("</b>")
+
+            if "<td></td>" == tag:
+                end_html.append("</td>")
+            else:
+                end_html.append(tag)
+
+            td_index += 1
+
+        # Filter <thead></thead><tbody></tbody> elements
+        filter_elements = ["<thead>", "</thead>", "<tbody>", "</tbody>"]
+        end_html = [v for v in end_html if v not in filter_elements]
+        return "".join(end_html), end_html
+
+    def decode_logic_points(self, pred_structures):
+        logic_points = []
+        current_row = 0
+        current_col = 0
+        max_rows = 0
+        max_cols = 0
+        occupied_cells = {}  # 用于记录已经被占用的单元格
+
+        def is_occupied(row, col):
+            return (row, col) in occupied_cells
+
+        def mark_occupied(row, col, rowspan, colspan):
+            for r in range(row, row + rowspan):
+                for c in range(col, col + colspan):
+                    occupied_cells[(r, c)] = True
+
+        i = 0
+        while i < len(pred_structures):
+            token = pred_structures[i]
+
+            if token == "<tr>":
+                current_col = 0  # 每次遇到 <tr> 时,重置当前列号
+            elif token == "</tr>":
+                current_row += 1  # 行结束,行号增加
+            elif token.startswith("<td"):
+                colspan = 1
+                rowspan = 1
+                j = i
+                if token != "<td></td>":
+                    j += 1
+                    # 提取 colspan 和 rowspan 属性
+                    while j < len(pred_structures) and not pred_structures[
+                        j
+                    ].startswith(">"):
+                        if "colspan=" in pred_structures[j]:
+                            colspan = int(pred_structures[j].split("=")[1].strip("\"'"))
+                        elif "rowspan=" in pred_structures[j]:
+                            rowspan = int(pred_structures[j].split("=")[1].strip("\"'"))
+                        j += 1
+
+                # 跳过已经处理过的属性 token
+                i = j
+
+                # 找到下一个未被占用的列
+                while is_occupied(current_row, current_col):
+                    current_col += 1
+
+                # 计算逻辑坐标
+                r_start = current_row
+                r_end = current_row + rowspan - 1
+                col_start = current_col
+                col_end = current_col + colspan - 1
+
+                # 记录逻辑坐标
+                logic_points.append([r_start, r_end, col_start, col_end])
+
+                # 标记占用的单元格
+                mark_occupied(r_start, col_start, rowspan, colspan)
+
+                # 更新当前列号
+                current_col += colspan
+
+                # 更新最大行数和列数
+                max_rows = max(max_rows, r_end + 1)
+                max_cols = max(max_cols, col_end + 1)
+
+            i += 1
+
+        return logic_points
+
+    def _filter_ocr_result(self, cell_bboxes, dt_boxes, rec_res):
+        y1 = cell_bboxes[:, 1::2].min()
+        new_dt_boxes = []
+        new_rec_res = []
+
+        for box, rec in zip(dt_boxes, rec_res):
+            if np.max(box[1::2]) < y1:
+                continue
+            new_dt_boxes.append(box)
+            new_rec_res.append(rec)
+        return new_dt_boxes, new_rec_res

+ 246 - 0
mineru/model/table/rec/slanet_plus/matcher_utils.py

@@ -0,0 +1,246 @@
+# copyright (c) 2022 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import copy
+import re
+
+
+def deal_isolate_span(thead_part):
+    """
+    Deal with isolate span cases in this function.
+    It causes by wrong prediction in structure recognition model.
+    eg. predict <td rowspan="2"></td> to <td></td> rowspan="2"></b></td>.
+    :param thead_part:
+    :return:
+    """
+    # 1. find out isolate span tokens.
+    isolate_pattern = (
+        r"<td></td> rowspan='(\d)+' colspan='(\d)+'></b></td>|"
+        r"<td></td> colspan='(\d)+' rowspan='(\d)+'></b></td>|"
+        r"<td></td> rowspan='(\d)+'></b></td>|"
+        r"<td></td> colspan='(\d)+'></b></td>"
+    )
+    isolate_iter = re.finditer(isolate_pattern, thead_part)
+    isolate_list = [i.group() for i in isolate_iter]
+
+    # 2. find out span number, by step 1 result.
+    span_pattern = (
+        r" rowspan='(\d)+' colspan='(\d)+'|"
+        r" colspan='(\d)+' rowspan='(\d)+'|"
+        r" rowspan='(\d)+'|"
+        r" colspan='(\d)+'"
+    )
+    corrected_list = []
+    for isolate_item in isolate_list:
+        span_part = re.search(span_pattern, isolate_item)
+        spanStr_in_isolateItem = span_part.group()
+        # 3. merge the span number into the span token format string.
+        if spanStr_in_isolateItem is not None:
+            corrected_item = f"<td{spanStr_in_isolateItem}></td>"
+            corrected_list.append(corrected_item)
+        else:
+            corrected_list.append(None)
+
+    # 4. replace original isolated token.
+    for corrected_item, isolate_item in zip(corrected_list, isolate_list):
+        if corrected_item is not None:
+            thead_part = thead_part.replace(isolate_item, corrected_item)
+        else:
+            pass
+    return thead_part
+
+
+def deal_duplicate_bb(thead_part):
+    """
+    Deal duplicate <b> or </b> after replace.
+    Keep one <b></b> in a <td></td> token.
+    :param thead_part:
+    :return:
+    """
+    # 1. find out <td></td> in <thead></thead>.
+    td_pattern = (
+        r"<td rowspan='(\d)+' colspan='(\d)+'>(.+?)</td>|"
+        r"<td colspan='(\d)+' rowspan='(\d)+'>(.+?)</td>|"
+        r"<td rowspan='(\d)+'>(.+?)</td>|"
+        r"<td colspan='(\d)+'>(.+?)</td>|"
+        r"<td>(.*?)</td>"
+    )
+    td_iter = re.finditer(td_pattern, thead_part)
+    td_list = [t.group() for t in td_iter]
+
+    # 2. is multiply <b></b> in <td></td> or not?
+    new_td_list = []
+    for td_item in td_list:
+        if td_item.count("<b>") > 1 or td_item.count("</b>") > 1:
+            # multiply <b></b> in <td></td> case.
+            # 1. remove all <b></b>
+            td_item = td_item.replace("<b>", "").replace("</b>", "")
+            # 2. replace <tb> -> <tb><b>, </tb> -> </b></tb>.
+            td_item = td_item.replace("<td>", "<td><b>").replace("</td>", "</b></td>")
+            new_td_list.append(td_item)
+        else:
+            new_td_list.append(td_item)
+
+    # 3. replace original thead part.
+    for td_item, new_td_item in zip(td_list, new_td_list):
+        thead_part = thead_part.replace(td_item, new_td_item)
+    return thead_part
+
+
+def deal_bb(result_token):
+    """
+    In our opinion, <b></b> always occurs in <thead></thead> text's context.
+    This function will find out all tokens in <thead></thead> and insert <b></b> by manual.
+    :param result_token:
+    :return:
+    """
+    # find out <thead></thead> parts.
+    thead_pattern = "<thead>(.*?)</thead>"
+    if re.search(thead_pattern, result_token) is None:
+        return result_token
+    thead_part = re.search(thead_pattern, result_token).group()
+    origin_thead_part = copy.deepcopy(thead_part)
+
+    # check "rowspan" or "colspan" occur in <thead></thead> parts or not .
+    span_pattern = r"<td rowspan='(\d)+' colspan='(\d)+'>|<td colspan='(\d)+' rowspan='(\d)+'>|<td rowspan='(\d)+'>|<td colspan='(\d)+'>"
+    span_iter = re.finditer(span_pattern, thead_part)
+    span_list = [s.group() for s in span_iter]
+    has_span_in_head = True if len(span_list) > 0 else False
+
+    if not has_span_in_head:
+        # <thead></thead> not include "rowspan" or "colspan" branch 1.
+        # 1. replace <td> to <td><b>, and </td> to </b></td>
+        # 2. it is possible to predict text include <b> or </b> by Text-line recognition,
+        #    so we replace <b><b> to <b>, and </b></b> to </b>
+        thead_part = (
+            thead_part.replace("<td>", "<td><b>")
+            .replace("</td>", "</b></td>")
+            .replace("<b><b>", "<b>")
+            .replace("</b></b>", "</b>")
+        )
+    else:
+        # <thead></thead> include "rowspan" or "colspan" branch 2.
+        # Firstly, we deal rowspan or colspan cases.
+        # 1. replace > to ><b>
+        # 2. replace </td> to </b></td>
+        # 3. it is possible to predict text include <b> or </b> by Text-line recognition,
+        #    so we replace <b><b> to <b>, and </b><b> to </b>
+
+        # Secondly, deal ordinary cases like branch 1
+
+        # replace ">" to "<b>"
+        replaced_span_list = []
+        for sp in span_list:
+            replaced_span_list.append(sp.replace(">", "><b>"))
+        for sp, rsp in zip(span_list, replaced_span_list):
+            thead_part = thead_part.replace(sp, rsp)
+
+        # replace "</td>" to "</b></td>"
+        thead_part = thead_part.replace("</td>", "</b></td>")
+
+        # remove duplicated <b> by re.sub
+        mb_pattern = "(<b>)+"
+        single_b_string = "<b>"
+        thead_part = re.sub(mb_pattern, single_b_string, thead_part)
+
+        mgb_pattern = "(</b>)+"
+        single_gb_string = "</b>"
+        thead_part = re.sub(mgb_pattern, single_gb_string, thead_part)
+
+        # ordinary cases like branch 1
+        thead_part = thead_part.replace("<td>", "<td><b>").replace("<b><b>", "<b>")
+
+    # convert <tb><b></b></tb> back to <tb></tb>, empty cell has no <b></b>.
+    # but space cell(<tb> </tb>)  is suitable for <td><b> </b></td>
+    thead_part = thead_part.replace("<td><b></b></td>", "<td></td>")
+    # deal with duplicated <b></b>
+    thead_part = deal_duplicate_bb(thead_part)
+    # deal with isolate span tokens, which causes by wrong predict by structure prediction.
+    # eg.PMC5994107_011_00.png
+    thead_part = deal_isolate_span(thead_part)
+    # replace original result with new thead part.
+    result_token = result_token.replace(origin_thead_part, thead_part)
+    return result_token
+
+
+def deal_eb_token(master_token):
+    """
+    post process with <eb></eb>, <eb1></eb1>, ...
+    emptyBboxTokenDict = {
+        "[]": '<eb></eb>',
+        "[' ']": '<eb1></eb1>',
+        "['<b>', ' ', '</b>']": '<eb2></eb2>',
+        "['\\u2028', '\\u2028']": '<eb3></eb3>',
+        "['<sup>', ' ', '</sup>']": '<eb4></eb4>',
+        "['<b>', '</b>']": '<eb5></eb5>',
+        "['<i>', ' ', '</i>']": '<eb6></eb6>',
+        "['<b>', '<i>', '</i>', '</b>']": '<eb7></eb7>',
+        "['<b>', '<i>', ' ', '</i>', '</b>']": '<eb8></eb8>',
+        "['<i>', '</i>']": '<eb9></eb9>',
+        "['<b>', ' ', '\\u2028', ' ', '\\u2028', ' ', '</b>']": '<eb10></eb10>',
+    }
+    :param master_token:
+    :return:
+    """
+    master_token = master_token.replace("<eb></eb>", "<td></td>")
+    master_token = master_token.replace("<eb1></eb1>", "<td> </td>")
+    master_token = master_token.replace("<eb2></eb2>", "<td><b> </b></td>")
+    master_token = master_token.replace("<eb3></eb3>", "<td>\u2028\u2028</td>")
+    master_token = master_token.replace("<eb4></eb4>", "<td><sup> </sup></td>")
+    master_token = master_token.replace("<eb5></eb5>", "<td><b></b></td>")
+    master_token = master_token.replace("<eb6></eb6>", "<td><i> </i></td>")
+    master_token = master_token.replace("<eb7></eb7>", "<td><b><i></i></b></td>")
+    master_token = master_token.replace("<eb8></eb8>", "<td><b><i> </i></b></td>")
+    master_token = master_token.replace("<eb9></eb9>", "<td><i></i></td>")
+    master_token = master_token.replace(
+        "<eb10></eb10>", "<td><b> \u2028 \u2028 </b></td>"
+    )
+    return master_token
+
+
+def distance(box_1, box_2):
+    x1, y1, x2, y2 = box_1
+    x3, y3, x4, y4 = box_2
+    dis = abs(x3 - x1) + abs(y3 - y1) + abs(x4 - x2) + abs(y4 - y2)
+    dis_2 = abs(x3 - x1) + abs(y3 - y1)
+    dis_3 = abs(x4 - x2) + abs(y4 - y2)
+    return dis + min(dis_2, dis_3)
+
+
+def compute_iou(rec1, rec2):
+    """
+    computing IoU
+    :param rec1: (y0, x0, y1, x1), which reflects
+            (top, left, bottom, right)
+    :param rec2: (y0, x0, y1, x1)
+    :return: scala value of IoU
+    """
+    # computing area of each rectangles
+    S_rec1 = (rec1[2] - rec1[0]) * (rec1[3] - rec1[1])
+    S_rec2 = (rec2[2] - rec2[0]) * (rec2[3] - rec2[1])
+
+    # computing the sum_area
+    sum_area = S_rec1 + S_rec2
+
+    # find the each edge of intersect rectangle
+    left_line = max(rec1[1], rec2[1])
+    right_line = min(rec1[3], rec2[3])
+    top_line = max(rec1[0], rec2[0])
+    bottom_line = min(rec1[2], rec2[2])
+
+    # judge if there is an intersect
+    if left_line >= right_line or top_line >= bottom_line:
+        return 0.0
+
+    intersect = (right_line - left_line) * (bottom_line - top_line)
+    return (intersect / (sum_area - intersect)) * 1.0

+ 109 - 0
mineru/model/table/rec/slanet_plus/table_structure.py

@@ -0,0 +1,109 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import time
+from typing import Any, Dict, List, Tuple
+
+import numpy as np
+
+from .table_structure_utils import (
+    OrtInferSession,
+    TableLabelDecode,
+    TablePreprocess,
+    BatchTablePreprocess,
+)
+
+
+class TableStructurer:
+    def __init__(self, config: Dict[str, Any]):
+        self.preprocess_op = TablePreprocess()
+        self.batch_preprocess_op = BatchTablePreprocess()
+
+        self.session = OrtInferSession(config)
+
+        self.character = self.session.get_metadata()
+        self.postprocess_op = TableLabelDecode(self.character)
+
+    def process(self, img):
+        starttime = time.time()
+        data = {"image": img}
+        data = self.preprocess_op(data)
+        img = data[0]
+        if img is None:
+            return None, 0
+        img = np.expand_dims(img, axis=0)
+        img = img.copy()
+
+        outputs = self.session([img])
+
+        preds = {"loc_preds": outputs[0], "structure_probs": outputs[1]}
+
+        shape_list = np.expand_dims(data[-1], axis=0)
+        post_result = self.postprocess_op(preds, [shape_list])
+
+        bbox_list = post_result["bbox_batch_list"][0]
+
+        structure_str_list = post_result["structure_batch_list"][0]
+        structure_str_list = structure_str_list[0]
+        structure_str_list = (
+            ["<html>", "<body>", "<table>"]
+            + structure_str_list
+            + ["</table>", "</body>", "</html>"]
+        )
+        elapse = time.time() - starttime
+        return structure_str_list, bbox_list, elapse
+
+    def batch_process(
+        self, img_list: List[np.ndarray]
+    ) -> List[Tuple[List[str], np.ndarray, float]]:
+        """批量处理图像列表
+        Args:
+            img_list: 图像列表
+        Returns:
+            结果列表,每个元素包含 (table_struct_str, cell_bboxes, elapse)
+        """
+        starttime = time.perf_counter()
+
+        batch_data = self.batch_preprocess_op(img_list)
+        preprocessed_images = batch_data[0]
+        shape_lists = batch_data[1]
+
+        preprocessed_images = np.array(preprocessed_images)
+        bbox_preds, struct_probs = self.session([preprocessed_images])
+
+        batch_size = preprocessed_images.shape[0]
+        results = []
+        for bbox_pred, struct_prob, shape_list in zip(
+            bbox_preds, struct_probs, shape_lists
+        ):
+            preds = {
+                "loc_preds": np.expand_dims(bbox_pred, axis=0),
+                "structure_probs": np.expand_dims(struct_prob, axis=0),
+            }
+            shape_list = np.expand_dims(shape_list, axis=0)
+            post_result = self.postprocess_op(preds, [shape_list])
+            bbox_list = post_result["bbox_batch_list"][0]
+            structure_str_list = post_result["structure_batch_list"][0]
+            structure_str_list = structure_str_list[0]
+            structure_str_list = (
+                ["<html>", "<body>", "<table>"]
+                + structure_str_list
+                + ["</table>", "</body>", "</html>"]
+            )
+            results.append((structure_str_list, bbox_list, 0))
+
+        total_elapse = time.perf_counter() - starttime
+        for i in range(len(results)):
+            results[i] = (results[i][0], results[i][1], total_elapse / batch_size)
+
+        return results

+ 570 - 0
mineru/model/table/rec/slanet_plus/table_structure_utils.py

@@ -0,0 +1,570 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import os
+import platform
+import traceback
+from enum import Enum
+from pathlib import Path
+from typing import Any, Dict, List, Tuple, Union
+
+import cv2
+import numpy as np
+from onnxruntime import (
+    GraphOptimizationLevel,
+    InferenceSession,
+    SessionOptions,
+    get_available_providers,
+    get_device,
+)
+
+from loguru import logger
+
+
+class EP(Enum):
+    CPU_EP = "CPUExecutionProvider"
+    CUDA_EP = "CUDAExecutionProvider"
+    DIRECTML_EP = "DmlExecutionProvider"
+
+
+class OrtInferSession:
+    def __init__(self, config: Dict[str, Any]):
+        self.logger = logger
+
+        model_path = config.get("model_path", None)
+        self._verify_model(model_path)
+
+        self.cfg_use_cuda = config.get("use_cuda", None)
+        self.cfg_use_dml = config.get("use_dml", None)
+
+        self.had_providers: List[str] = get_available_providers()
+        EP_list = self._get_ep_list()
+
+        sess_opt = self._init_sess_opts(config)
+        self.session = InferenceSession(
+            model_path,
+            sess_options=sess_opt,
+            providers=EP_list,
+        )
+        self._verify_providers()
+
+    @staticmethod
+    def _init_sess_opts(config: Dict[str, Any]) -> SessionOptions:
+        sess_opt = SessionOptions()
+        sess_opt.log_severity_level = 4
+        sess_opt.enable_cpu_mem_arena = False
+        sess_opt.graph_optimization_level = GraphOptimizationLevel.ORT_ENABLE_ALL
+
+        cpu_nums = os.cpu_count()
+        intra_op_num_threads = config.get("intra_op_num_threads", -1)
+        if intra_op_num_threads != -1 and 1 <= intra_op_num_threads <= cpu_nums:
+            sess_opt.intra_op_num_threads = intra_op_num_threads
+
+        inter_op_num_threads = config.get("inter_op_num_threads", -1)
+        if inter_op_num_threads != -1 and 1 <= inter_op_num_threads <= cpu_nums:
+            sess_opt.inter_op_num_threads = inter_op_num_threads
+
+        return sess_opt
+
+    def get_metadata(self, key: str = "character") -> list:
+        meta_dict = self.session.get_modelmeta().custom_metadata_map
+        content_list = meta_dict[key].splitlines()
+        return content_list
+
+    def _get_ep_list(self) -> List[Tuple[str, Dict[str, Any]]]:
+        cpu_provider_opts = {
+            "arena_extend_strategy": "kSameAsRequested",
+        }
+        EP_list = [(EP.CPU_EP.value, cpu_provider_opts)]
+
+        cuda_provider_opts = {
+            "device_id": 0,
+            "arena_extend_strategy": "kNextPowerOfTwo",
+            "cudnn_conv_algo_search": "EXHAUSTIVE",
+            "do_copy_in_default_stream": True,
+        }
+        self.use_cuda = self._check_cuda()
+        if self.use_cuda:
+            EP_list.insert(0, (EP.CUDA_EP.value, cuda_provider_opts))
+
+        self.use_directml = self._check_dml()
+        if self.use_directml:
+            self.logger.info(
+                "Windows 10 or above detected, try to use DirectML as primary provider"
+            )
+            directml_options = (
+                cuda_provider_opts if self.use_cuda else cpu_provider_opts
+            )
+            EP_list.insert(0, (EP.DIRECTML_EP.value, directml_options))
+        return EP_list
+
+    def _check_cuda(self) -> bool:
+        if not self.cfg_use_cuda:
+            return False
+
+        cur_device = get_device()
+        if cur_device == "GPU" and EP.CUDA_EP.value in self.had_providers:
+            return True
+
+        self.logger.warning(
+            "%s is not in available providers (%s). Use %s inference by default.",
+            EP.CUDA_EP.value,
+            self.had_providers,
+            self.had_providers[0],
+        )
+        self.logger.info("!!!Recommend to use rapidocr_paddle for inference on GPU.")
+        self.logger.info(
+            "(For reference only) If you want to use GPU acceleration, you must do:"
+        )
+        self.logger.info(
+            "First, uninstall all onnxruntime pakcages in current environment."
+        )
+        self.logger.info(
+            "Second, install onnxruntime-gpu by `pip install onnxruntime-gpu`."
+        )
+        self.logger.info(
+            "\tNote the onnxruntime-gpu version must match your cuda and cudnn version."
+        )
+        self.logger.info(
+            "\tYou can refer this link: https://onnxruntime.ai/docs/execution-providers/CUDA-EP.html"
+        )
+        self.logger.info(
+            "Third, ensure %s is in available providers list. e.g. ['CUDAExecutionProvider', 'CPUExecutionProvider']",
+            EP.CUDA_EP.value,
+        )
+        return False
+
+    def _check_dml(self) -> bool:
+        if not self.cfg_use_dml:
+            return False
+
+        cur_os = platform.system()
+        if cur_os != "Windows":
+            self.logger.warning(
+                "DirectML is only supported in Windows OS. The current OS is %s. Use %s inference by default.",
+                cur_os,
+                self.had_providers[0],
+            )
+            return False
+
+        cur_window_version = int(platform.release().split(".")[0])
+        if cur_window_version < 10:
+            self.logger.warning(
+                "DirectML is only supported in Windows 10 and above OS. The current Windows version is %s. Use %s inference by default.",
+                cur_window_version,
+                self.had_providers[0],
+            )
+            return False
+
+        if EP.DIRECTML_EP.value in self.had_providers:
+            return True
+
+        self.logger.warning(
+            "%s is not in available providers (%s). Use %s inference by default.",
+            EP.DIRECTML_EP.value,
+            self.had_providers,
+            self.had_providers[0],
+        )
+        self.logger.info("If you want to use DirectML acceleration, you must do:")
+        self.logger.info(
+            "First, uninstall all onnxruntime pakcages in current environment."
+        )
+        self.logger.info(
+            "Second, install onnxruntime-directml by `pip install onnxruntime-directml`"
+        )
+        self.logger.info(
+            "Third, ensure %s is in available providers list. e.g. ['DmlExecutionProvider', 'CPUExecutionProvider']",
+            EP.DIRECTML_EP.value,
+        )
+        return False
+
+    def _verify_providers(self):
+        session_providers = self.session.get_providers()
+        first_provider = session_providers[0]
+
+        if self.use_cuda and first_provider != EP.CUDA_EP.value:
+            self.logger.warning(
+                "%s is not avaiable for current env, the inference part is automatically shifted to be executed under %s.",
+                EP.CUDA_EP.value,
+                first_provider,
+            )
+
+        if self.use_directml and first_provider != EP.DIRECTML_EP.value:
+            self.logger.warning(
+                "%s is not available for current env, the inference part is automatically shifted to be executed under %s.",
+                EP.DIRECTML_EP.value,
+                first_provider,
+            )
+
+    def __call__(self, input_content: List[np.ndarray]) -> np.ndarray:
+        input_dict = dict(zip(self.get_input_names(), input_content))
+        try:
+            return self.session.run(None, input_dict)
+        except Exception as e:
+            error_info = traceback.format_exc()
+            raise ONNXRuntimeError(error_info) from e
+
+    def get_input_names(self) -> List[str]:
+        return [v.name for v in self.session.get_inputs()]
+
+    def get_output_names(self) -> List[str]:
+        return [v.name for v in self.session.get_outputs()]
+
+    def get_character_list(self, key: str = "character") -> List[str]:
+        meta_dict = self.session.get_modelmeta().custom_metadata_map
+        return meta_dict[key].splitlines()
+
+    def have_key(self, key: str = "character") -> bool:
+        meta_dict = self.session.get_modelmeta().custom_metadata_map
+        if key in meta_dict.keys():
+            return True
+        return False
+
+    @staticmethod
+    def _verify_model(model_path: Union[str, Path, None]):
+        if model_path is None:
+            raise ValueError("model_path is None!")
+
+        model_path = Path(model_path)
+        if not model_path.exists():
+            raise FileNotFoundError(f"{model_path} does not exists.")
+
+        if not model_path.is_file():
+            raise FileExistsError(f"{model_path} is not a file.")
+
+
+class ONNXRuntimeError(Exception):
+    pass
+
+
+class TableLabelDecode:
+    def __init__(self, dict_character, merge_no_span_structure=True, **kwargs):
+        if merge_no_span_structure:
+            if "<td></td>" not in dict_character:
+                dict_character.append("<td></td>")
+            if "<td>" in dict_character:
+                dict_character.remove("<td>")
+
+        dict_character = self.add_special_char(dict_character)
+        self.dict = {}
+        for i, char in enumerate(dict_character):
+            self.dict[char] = i
+        self.character = dict_character
+        self.td_token = ["<td>", "<td", "<td></td>"]
+
+    def __call__(self, preds, batch=None):
+        structure_probs = preds["structure_probs"]
+        bbox_preds = preds["loc_preds"]
+        shape_list = batch[-1]
+        result = self.decode(structure_probs, bbox_preds, shape_list)
+        if len(batch) == 1:  # only contains shape
+            return result
+
+        label_decode_result = self.decode_label(batch)
+        return result, label_decode_result
+
+    def decode(self, structure_probs, bbox_preds, shape_list):
+        """convert text-label into text-index."""
+        ignored_tokens = self.get_ignored_tokens()
+        end_idx = self.dict[self.end_str]
+
+        structure_idx = structure_probs.argmax(axis=2)
+        structure_probs = structure_probs.max(axis=2)
+
+        structure_batch_list = []
+        bbox_batch_list = []
+        batch_size = len(structure_idx)
+        for batch_idx in range(batch_size):
+            structure_list = []
+            bbox_list = []
+            score_list = []
+            for idx in range(len(structure_idx[batch_idx])):
+                char_idx = int(structure_idx[batch_idx][idx])
+                if idx > 0 and char_idx == end_idx:
+                    break
+
+                if char_idx in ignored_tokens:
+                    continue
+
+                text = self.character[char_idx]
+                if text in self.td_token:
+                    bbox = bbox_preds[batch_idx, idx]
+                    bbox = self._bbox_decode(bbox, shape_list[batch_idx])
+                    bbox_list.append(bbox)
+                structure_list.append(text)
+                score_list.append(structure_probs[batch_idx, idx])
+            structure_batch_list.append([structure_list, np.mean(score_list)])
+            bbox_batch_list.append(np.array(bbox_list))
+        result = {
+            "bbox_batch_list": bbox_batch_list,
+            "structure_batch_list": structure_batch_list,
+        }
+        return result
+
+    def decode_label(self, batch):
+        """convert text-label into text-index."""
+        structure_idx = batch[1]
+        gt_bbox_list = batch[2]
+        shape_list = batch[-1]
+        ignored_tokens = self.get_ignored_tokens()
+        end_idx = self.dict[self.end_str]
+
+        structure_batch_list = []
+        bbox_batch_list = []
+        batch_size = len(structure_idx)
+        for batch_idx in range(batch_size):
+            structure_list = []
+            bbox_list = []
+            for idx in range(len(structure_idx[batch_idx])):
+                char_idx = int(structure_idx[batch_idx][idx])
+                if idx > 0 and char_idx == end_idx:
+                    break
+
+                if char_idx in ignored_tokens:
+                    continue
+
+                structure_list.append(self.character[char_idx])
+
+                bbox = gt_bbox_list[batch_idx][idx]
+                if bbox.sum() != 0:
+                    bbox = self._bbox_decode(bbox, shape_list[batch_idx])
+                    bbox_list.append(bbox)
+
+            structure_batch_list.append(structure_list)
+            bbox_batch_list.append(bbox_list)
+        result = {
+            "bbox_batch_list": bbox_batch_list,
+            "structure_batch_list": structure_batch_list,
+        }
+        return result
+
+    def _bbox_decode(self, bbox, shape):
+        h, w = shape[:2]
+        bbox[0::2] *= w
+        bbox[1::2] *= h
+        return bbox
+
+    def get_ignored_tokens(self):
+        beg_idx = self.get_beg_end_flag_idx("beg")
+        end_idx = self.get_beg_end_flag_idx("end")
+        return [beg_idx, end_idx]
+
+    def get_beg_end_flag_idx(self, beg_or_end):
+        if beg_or_end == "beg":
+            return np.array(self.dict[self.beg_str])
+
+        if beg_or_end == "end":
+            return np.array(self.dict[self.end_str])
+
+        raise TypeError(f"unsupport type {beg_or_end} in get_beg_end_flag_idx")
+
+    def add_special_char(self, dict_character):
+        self.beg_str = "sos"
+        self.end_str = "eos"
+        dict_character = [self.beg_str] + dict_character + [self.end_str]
+        return dict_character
+
+
+class TablePreprocess:
+    def __init__(self):
+        self.table_max_len = 488
+        self.build_pre_process_list()
+        self.ops = self.create_operators()
+
+    def __call__(self, data):
+        """transform"""
+        if self.ops is None:
+            self.ops = []
+
+        for op in self.ops:
+            data = op(data)
+            if data is None:
+                return None
+        return data
+
+    def create_operators(
+        self,
+    ):
+        """
+        create operators based on the config
+
+        Args:
+            params(list): a dict list, used to create some operators
+        """
+        assert isinstance(
+            self.pre_process_list, list
+        ), "operator config should be a list"
+        ops = []
+        for operator in self.pre_process_list:
+            assert (
+                isinstance(operator, dict) and len(operator) == 1
+            ), "yaml format error"
+            op_name = list(operator)[0]
+            param = {} if operator[op_name] is None else operator[op_name]
+            op = eval(op_name)(**param)
+            ops.append(op)
+        return ops
+
+    def build_pre_process_list(self):
+        resize_op = {
+            "ResizeTableImage": {
+                "max_len": self.table_max_len,
+            }
+        }
+        pad_op = {
+            "PaddingTableImage": {"size": [self.table_max_len, self.table_max_len]}
+        }
+        normalize_op = {
+            "NormalizeImage": {
+                "std": [0.229, 0.224, 0.225],
+                "mean": [0.485, 0.456, 0.406],
+                "scale": "1./255.",
+                "order": "hwc",
+            }
+        }
+        to_chw_op = {"ToCHWImage": None}
+        keep_keys_op = {"KeepKeys": {"keep_keys": ["image", "shape"]}}
+        self.pre_process_list = [
+            resize_op,
+            normalize_op,
+            pad_op,
+            to_chw_op,
+            keep_keys_op,
+        ]
+
+
+class BatchTablePreprocess:
+
+    def __init__(self):
+        self.preprocess = TablePreprocess()
+
+    def __call__(
+        self, img_list: List[np.ndarray]
+    ) -> Tuple[List[np.ndarray], List[List[float]]]:
+        """批量处理图像
+
+        Args:
+            img_list: 图像列表
+
+        Returns:
+            预处理后的图像列表和形状信息列表
+        """
+        processed_imgs = []
+        shape_lists = []
+
+        for img in img_list:
+            if img is None:
+                continue
+            data = {"image": img}
+            img_processed, shape_list = self.preprocess(data)
+            processed_imgs.append(img_processed)
+            shape_lists.append(shape_list)
+        return processed_imgs, shape_lists
+
+
+class ResizeTableImage:
+    def __init__(self, max_len, resize_bboxes=False, infer_mode=False):
+        super(ResizeTableImage, self).__init__()
+        self.max_len = max_len
+        self.resize_bboxes = resize_bboxes
+        self.infer_mode = infer_mode
+
+    def __call__(self, data):
+        img = data["image"]
+        height, width = img.shape[0:2]
+        ratio = self.max_len / (max(height, width) * 1.0)
+        resize_h = int(height * ratio)
+        resize_w = int(width * ratio)
+        resize_img = cv2.resize(img, (resize_w, resize_h))
+        if self.resize_bboxes and not self.infer_mode:
+            data["bboxes"] = data["bboxes"] * ratio
+        data["image"] = resize_img
+        data["src_img"] = img
+        data["shape"] = np.array([height, width, ratio, ratio])
+        data["max_len"] = self.max_len
+        return data
+
+
+class PaddingTableImage:
+    def __init__(self, size, **kwargs):
+        super(PaddingTableImage, self).__init__()
+        self.size = size
+
+    def __call__(self, data):
+        img = data["image"]
+        pad_h, pad_w = self.size
+        padding_img = np.zeros((pad_h, pad_w, 3), dtype=np.float32)
+        height, width = img.shape[0:2]
+        padding_img[0:height, 0:width, :] = img.copy()
+        data["image"] = padding_img
+        shape = data["shape"].tolist()
+        shape.extend([pad_h, pad_w])
+        data["shape"] = np.array(shape)
+        return data
+
+
+class NormalizeImage:
+    """normalize image such as substract mean, divide std"""
+
+    def __init__(self, scale=None, mean=None, std=None, order="chw", **kwargs):
+        if isinstance(scale, str):
+            scale = eval(scale)
+        self.scale = np.float32(scale if scale is not None else 1.0 / 255.0)
+        mean = mean if mean is not None else [0.485, 0.456, 0.406]
+        std = std if std is not None else [0.229, 0.224, 0.225]
+
+        shape = (3, 1, 1) if order == "chw" else (1, 1, 3)
+        self.mean = np.array(mean).reshape(shape).astype("float32")
+        self.std = np.array(std).reshape(shape).astype("float32")
+
+    def __call__(self, data):
+        img = np.array(data["image"])
+        assert isinstance(img, np.ndarray), "invalid input 'img' in NormalizeImage"
+        data["image"] = (img.astype("float32") * self.scale - self.mean) / self.std
+        return data
+
+
+class ToCHWImage:
+    """convert hwc image to chw image"""
+
+    def __init__(self, **kwargs):
+        pass
+
+    def __call__(self, data):
+        img = np.array(data["image"])
+        data["image"] = img.transpose((2, 0, 1))
+        return data
+
+
+class KeepKeys:
+    def __init__(self, keep_keys, **kwargs):
+        self.keep_keys = keep_keys
+
+    def __call__(self, data):
+        data_list = []
+        for key in self.keep_keys:
+            data_list.append(data[key])
+        return data_list
+
+
+def trans_char_ocr_res(ocr_res):
+    word_result = []
+    for res in ocr_res:
+        score = res[2]
+        for word_box, word in zip(res[3], res[4]):
+            word_res = []
+            word_res.append(word_box)
+            word_res.append(word)
+            word_res.append(score)
+            word_result.append(word_res)
+    return word_result

+ 0 - 0
mineru/model/table/rec/unet_table/__init__.py


+ 341 - 0
mineru/model/table/rec/unet_table/main.py

@@ -0,0 +1,341 @@
+import html
+import logging
+import os
+import time
+import traceback
+from dataclasses import dataclass, asdict
+
+from typing import List, Optional, Union, Dict, Any
+import numpy as np
+import cv2
+from PIL import Image
+from loguru import logger
+from bs4 import BeautifulSoup
+
+from .table_structure_unet import TSRUnet
+
+from mineru.utils.enum_class import ModelPath
+from mineru.utils.models_download_utils import auto_download_and_get_model_root_path
+from .table_recover import TableRecover
+from .utils import InputType, LoadImage, VisTable
+from .utils_table_recover import (
+    match_ocr_cell,
+    plot_html_table,
+    box_4_2_poly_to_box_4_1,
+    sorted_ocr_boxes,
+    gather_ocr_list_by_row,
+)
+
+
+@dataclass
+class WiredTableInput:
+    model_path: str
+    device: str = "cpu"
+
+
+@dataclass
+class WiredTableOutput:
+    pred_html: Optional[str] = None
+    cell_bboxes: Optional[np.ndarray] = None
+    logic_points: Optional[np.ndarray] = None
+    elapse: Optional[float] = None
+
+
+class WiredTableRecognition:
+    def __init__(self, config: WiredTableInput, ocr_engine=None):
+        self.table_structure = TSRUnet(asdict(config))
+        self.load_img = LoadImage()
+        self.table_recover = TableRecover()
+        self.ocr_engine = ocr_engine
+
+    def __call__(
+        self,
+        img: InputType,
+        ocr_result: Optional[List[Union[List[List[float]], str, str]]] = None,
+        **kwargs,
+    ) -> WiredTableOutput:
+        s = time.perf_counter()
+        need_ocr = True
+        col_threshold = 15
+        row_threshold = 10
+        if kwargs:
+            need_ocr = kwargs.get("need_ocr", True)
+            col_threshold = kwargs.get("col_threshold", 15)
+            row_threshold = kwargs.get("row_threshold", 10)
+        img = self.load_img(img)
+        polygons, rotated_polygons = self.table_structure(img, **kwargs)
+        if polygons is None:
+            # logging.warning("polygons is None.")
+            return WiredTableOutput("", None, None, 0.0)
+
+        try:
+            table_res, logi_points = self.table_recover(
+                rotated_polygons, row_threshold, col_threshold
+            )
+            # 将坐标由逆时针转为顺时针方向,后续处理与无线表格对齐
+            polygons[:, 1, :], polygons[:, 3, :] = (
+                polygons[:, 3, :].copy(),
+                polygons[:, 1, :].copy(),
+            )
+            if not need_ocr:
+                sorted_polygons, idx_list = sorted_ocr_boxes(
+                    [box_4_2_poly_to_box_4_1(box) for box in polygons]
+                )
+                return WiredTableOutput(
+                    "",
+                    sorted_polygons,
+                    logi_points[idx_list],
+                    time.perf_counter() - s,
+                )
+            cell_box_det_map, not_match_orc_boxes = match_ocr_cell(ocr_result, polygons)
+            # 如果有识别框没有ocr结果,直接进行rec补充
+            cell_box_det_map = self.fill_blank_rec(img, polygons, cell_box_det_map)
+            # 转换为中间格式,修正识别框坐标,将物理识别框,逻辑识别框,ocr识别框整合为dict,方便后续处理
+            t_rec_ocr_list = self.transform_res(cell_box_det_map, polygons, logi_points)
+            # 将每个单元格中的ocr识别结果排序和同行合并,输出的html能完整保留文字的换行格式
+            t_rec_ocr_list = self.sort_and_gather_ocr_res(t_rec_ocr_list)
+
+            logi_points = [t_box_ocr["t_logic_box"] for t_box_ocr in t_rec_ocr_list]
+            cell_box_det_map = {
+                i: [ocr_box_and_text[1] for ocr_box_and_text in t_box_ocr["t_ocr_res"]]
+                for i, t_box_ocr in enumerate(t_rec_ocr_list)
+            }
+            pred_html = plot_html_table(logi_points, cell_box_det_map)
+            polygons = np.array(polygons).reshape(-1, 8)
+            logi_points = np.array(logi_points)
+            elapse = time.perf_counter() - s
+
+        except Exception:
+            logging.warning(traceback.format_exc())
+            return WiredTableOutput("", None, None, 0.0)
+        return WiredTableOutput(pred_html, polygons, logi_points, elapse)
+
+    def transform_res(
+        self,
+        cell_box_det_map: Dict[int, List[any]],
+        polygons: np.ndarray,
+        logi_points: List[np.ndarray],
+    ) -> List[Dict[str, any]]:
+        res = []
+        for i in range(len(polygons)):
+            ocr_res_list = cell_box_det_map.get(i)
+            if not ocr_res_list:
+                continue
+            xmin = min([ocr_box[0][0][0] for ocr_box in ocr_res_list])
+            ymin = min([ocr_box[0][0][1] for ocr_box in ocr_res_list])
+            xmax = max([ocr_box[0][2][0] for ocr_box in ocr_res_list])
+            ymax = max([ocr_box[0][2][1] for ocr_box in ocr_res_list])
+            dict_res = {
+                # xmin,xmax,ymin,ymax
+                "t_box": [xmin, ymin, xmax, ymax],
+                # row_start,row_end,col_start,col_end
+                "t_logic_box": logi_points[i].tolist(),
+                # [[xmin,xmax,ymin,ymax], text]
+                "t_ocr_res": [
+                    [box_4_2_poly_to_box_4_1(ocr_det[0]), ocr_det[1]]
+                    for ocr_det in ocr_res_list
+                ],
+            }
+            res.append(dict_res)
+        return res
+
+    def sort_and_gather_ocr_res(self, res):
+        for i, dict_res in enumerate(res):
+            _, sorted_idx = sorted_ocr_boxes(
+                [ocr_det[0] for ocr_det in dict_res["t_ocr_res"]], threhold=0.3
+            )
+            dict_res["t_ocr_res"] = [dict_res["t_ocr_res"][i] for i in sorted_idx]
+            dict_res["t_ocr_res"] = gather_ocr_list_by_row(
+                dict_res["t_ocr_res"], threhold=0.3
+            )
+        return res
+
+    # def fill_blank_rec(
+    #     self,
+    #     img: np.ndarray,
+    #     sorted_polygons: np.ndarray,
+    #     cell_box_map: Dict[int, List[str]],
+    # ) -> Dict[int, List[Any]]:
+    #     """找到poly对应为空的框,尝试将直接将poly框直接送到识别中"""
+    #     for i in range(sorted_polygons.shape[0]):
+    #         if cell_box_map.get(i):
+    #             continue
+    #         box = sorted_polygons[i]
+    #         cell_box_map[i] = [[box, "", 1]]
+    #         continue
+    #     return cell_box_map
+    def fill_blank_rec(
+        self,
+        img: np.ndarray,
+        sorted_polygons: np.ndarray,
+        cell_box_map: Dict[int, List[str]],
+    ) -> Dict[int, List[Any]]:
+        """找到poly对应为空的框,尝试将直接将poly框直接送到识别中"""
+        bgr_img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
+        img_crop_info_list = []
+        img_crop_list = []
+        for i in range(sorted_polygons.shape[0]):
+            if cell_box_map.get(i):
+                continue
+            box = sorted_polygons[i]
+            if self.ocr_engine is None:
+                logger.warning(f"No OCR engine provided for box {i}: {box}")
+                continue
+            # 从img中截取对应的区域
+            x1, y1, x2, y2 = int(box[0][0])+1, int(box[0][1])+1, int(box[2][0])-1, int(box[2][1])-1
+            if x1 >= x2 or y1 >= y2:
+                # logger.warning(f"Invalid box coordinates: {x1, y1, x2, y2}")
+                continue
+            # 判断长宽比
+            if (x2 - x1) / (y2 - y1) > 20 or (y2 - y1) / (x2 - x1) > 20:
+                # logger.warning(f"Box {i} has invalid aspect ratio: {x1, y1, x2, y2}")
+                continue
+            img_crop = bgr_img[int(y1):int(y2), int(x1):int(x2)]
+            img_crop_list.append(img_crop)
+            img_crop_info_list.append([i, box])
+
+        if len(img_crop_list) > 0:
+            # 进行ocr识别
+            ocr_result = self.ocr_engine.ocr(img_crop_list, det=False)
+            # ocr_result = [[]]
+            # for crop_img in img_crop_list:
+            #     tmp_ocr_result = self.ocr_engine.ocr(crop_img)
+            #     if tmp_ocr_result[0] and len(tmp_ocr_result[0]) > 0 and isinstance(tmp_ocr_result[0], list) and len(tmp_ocr_result[0][0]) == 2:
+            #         ocr_result[0].append(tmp_ocr_result[0][0][1])
+            #     else:
+            #         ocr_result[0].append(("", 0.0))
+
+            if not ocr_result or not isinstance(ocr_result, list) or len(ocr_result) == 0:
+                logger.warning("OCR engine returned no results or invalid result for image crops.")
+                return cell_box_map
+            ocr_res_list = ocr_result[0]
+            if not isinstance(ocr_res_list, list) or len(ocr_res_list) != len(img_crop_list):
+                logger.warning("OCR result list length does not match image crop list length.")
+                return cell_box_map
+            for j, ocr_res in enumerate(ocr_res_list):
+                img_crop_info_list[j].append(ocr_res)
+
+            for i, box, ocr_res in img_crop_info_list:
+                # 处理ocr结果
+                ocr_text, ocr_score = ocr_res
+                # logger.debug(f"OCR result for box {i}: {ocr_text} with score {ocr_score}")
+                if ocr_score < 0.6 or ocr_text in ['1','口','■','(204号', '(20', '(2', '(2号', '(20号', '号', '(204']:
+                    # logger.warning(f"Low confidence OCR result for box {i}: {ocr_text} with score {ocr_score}")
+                    box = sorted_polygons[i]
+                    cell_box_map[i] = [[box, "", 0.1]]
+                    continue
+                cell_box_map[i] = [[box, ocr_text, ocr_score]]
+
+        return cell_box_map
+
+
+def escape_html(input_string):
+    """Escape HTML Entities."""
+    return html.escape(input_string)
+
+
+def count_table_cells_physical(html_code):
+    """计算表格的物理单元格数量(合并单元格算一个)"""
+    if not html_code:
+        return 0
+
+    # 简单计数td和th标签的数量
+    html_lower = html_code.lower()
+    td_count = html_lower.count('<td')
+    th_count = html_lower.count('<th')
+    return td_count + th_count
+
+
+class UnetTableModel:
+    def __init__(self, ocr_engine):
+        model_path = os.path.join(auto_download_and_get_model_root_path(ModelPath.unet_structure), ModelPath.unet_structure)
+        wired_input_args = WiredTableInput(model_path=model_path)
+        self.wired_table_model = WiredTableRecognition(wired_input_args, ocr_engine)
+        self.ocr_engine = ocr_engine
+
+    def predict(self, input_img, ocr_result, wireless_html_code):
+        if isinstance(input_img, Image.Image):
+            np_img = np.asarray(input_img)
+        elif isinstance(input_img, np.ndarray):
+            np_img = input_img
+        else:
+            raise ValueError("Input must be a pillow object or a numpy array.")
+        bgr_img = cv2.cvtColor(np_img, cv2.COLOR_RGB2BGR)
+
+        if ocr_result is None:
+            ocr_result = self.ocr_engine.ocr(bgr_img)[0]
+            ocr_result = [
+                [item[0], escape_html(item[1][0]), item[1][1]]
+                for item in ocr_result
+                if len(item) == 2 and isinstance(item[1], tuple)
+            ]
+
+        try:
+            wired_table_results = self.wired_table_model(np_img, ocr_result)
+
+            # viser = VisTable()
+            # save_html_path = f"outputs/output.html"
+            # save_drawed_path = f"outputs/output_table_vis.jpg"
+            # save_logic_path = (
+            #     f"outputs/output_table_vis_logic.jpg"
+            # )
+            # vis_imged = viser(
+            #     np_img, wired_table_results, save_html_path, save_drawed_path, save_logic_path
+            # )
+
+            wired_html_code = wired_table_results.pred_html
+            wired_len = count_table_cells_physical(wired_html_code)
+            wireless_len = count_table_cells_physical(wireless_html_code)
+            # 计算两种模型检测的单元格数量差异
+            gap_of_len = wireless_len - wired_len
+            # logger.debug(f"wired table cell bboxes: {wired_len}, wireless table cell bboxes: {wireless_len}")
+
+            # 使用OCR结果计算两种模型填入的文字数量
+            wireless_text_count = 0
+            wired_text_count = 0
+            for ocr_res in ocr_result:
+                if ocr_res[1] in wireless_html_code:
+                    wireless_text_count += 1
+                if ocr_res[1] in wired_html_code:
+                    wired_text_count += 1
+            # logger.debug(f"wireless table ocr text count: {wireless_text_count}, wired table ocr text count: {wired_text_count}")
+
+            # 使用HTML解析器计算空单元格数量
+            wireless_soup = BeautifulSoup(wireless_html_code, 'html.parser') if wireless_html_code else BeautifulSoup("", 'html.parser')
+            wired_soup = BeautifulSoup(wired_html_code, 'html.parser') if wired_html_code else BeautifulSoup("", 'html.parser')
+            # 计算空单元格数量(没有文本内容或只有空白字符)
+            wireless_blank_count = sum(1 for cell in wireless_soup.find_all(['td', 'th']) if not cell.text.strip())
+            wired_blank_count = sum(1 for cell in wired_soup.find_all(['td', 'th']) if not cell.text.strip())
+            # logger.debug(f"wireless table blank cell count: {wireless_blank_count}, wired table blank cell count: {wired_blank_count}")
+
+            # 计算非空单元格数量
+            wireless_non_blank_count = wireless_len - wireless_blank_count
+            wired_non_blank_count = wired_len - wired_blank_count
+            # 无线表非空格数量大于有线表非空格数量时,才考虑切换
+            switch_flag = False
+            if wireless_non_blank_count > wired_non_blank_count:
+                # 假设非空表格是接近正方表,使用非空单元格数量开平方作为表格规模的估计
+                wired_table_scale = round(wired_non_blank_count ** 0.5)
+                # logger.debug(f"wireless non-blank cell count: {wireless_non_blank_count}, wired non-blank cell count: {wired_non_blank_count}, wired table scale: {wired_table_scale}")
+                # 如果无线表非空格的数量比有线表多一列或以上,需要切换到无线表
+                wired_scale_plus_2_cols = wired_non_blank_count + (wired_table_scale * 2)
+                wired_scale_squared_plus_2_rows = wired_table_scale * (wired_table_scale + 2)
+                if (wireless_non_blank_count + 3) >= max(wired_scale_plus_2_cols, wired_scale_squared_plus_2_rows):
+                    switch_flag = True
+
+            # 判断是否使用无线表格模型的结果
+            if (
+                switch_flag
+                or (0 <= gap_of_len <= 5 and wired_len <= round(wireless_len * 0.75))  # 两者相差不大但有线模型结果较少
+                or (gap_of_len == 0 and wired_len <= 4)  # 单元格数量完全相等且总量小于等于4
+                or (wired_text_count <= wireless_text_count * 0.6 and  wireless_text_count >=10) # 有线模型填入的文字明显少于无线模型
+            ):
+                # logger.debug("fall back to wireless table model")
+                html_code = wireless_html_code
+            else:
+                html_code = wired_html_code
+
+            return html_code
+        except Exception as e:
+            logger.exception(e)
+            return None

+ 214 - 0
mineru/model/table/rec/unet_table/table_recover.py

@@ -0,0 +1,214 @@
+from typing import Dict, List, Tuple
+
+import numpy as np
+
+
+class TableRecover:
+    def __init__(
+        self,
+    ):
+        pass
+
+    def __call__(
+        self, polygons: np.ndarray, rows_thresh=10, col_thresh=15
+    ) -> Dict[int, Dict]:
+        rows = self.get_rows(polygons, rows_thresh)
+        longest_col, each_col_widths, col_nums = self.get_benchmark_cols(
+            rows, polygons, col_thresh
+        )
+        each_row_heights, row_nums = self.get_benchmark_rows(rows, polygons)
+        table_res, logic_points_dict = self.get_merge_cells(
+            polygons,
+            rows,
+            row_nums,
+            col_nums,
+            longest_col,
+            each_col_widths,
+            each_row_heights,
+        )
+        logic_points = np.array(
+            [logic_points_dict[i] for i in range(len(polygons))]
+        ).astype(np.int32)
+        return table_res, logic_points
+
+    @staticmethod
+    def get_rows(polygons: np.array, rows_thresh=10) -> Dict[int, List[int]]:
+        """对每个框进行行分类,框定哪个是一行的"""
+        y_axis = polygons[:, 0, 1]
+        if y_axis.size == 1:
+            return {0: [0]}
+
+        concat_y = np.array(list(zip(y_axis, y_axis[1:])))
+        minus_res = concat_y[:, 1] - concat_y[:, 0]
+
+        result = {}
+        split_idxs = np.argwhere(abs(minus_res) > rows_thresh).squeeze()
+        # 如果都在一行,则将所有下标设置为同一行
+        if split_idxs.size == 0:
+            return {0: [i for i in range(len(y_axis))]}
+        if split_idxs.ndim == 0:
+            split_idxs = split_idxs[None, ...]
+
+        if max(split_idxs) != len(minus_res):
+            split_idxs = np.append(split_idxs, len(minus_res))
+
+        start_idx = 0
+        for row_num, idx in enumerate(split_idxs):
+            if row_num != 0:
+                start_idx = split_idxs[row_num - 1] + 1
+            result.setdefault(row_num, []).extend(range(start_idx, idx + 1))
+
+        # 计算每一行相邻cell的iou,如果大于0.2,则合并为同一个cell
+        return result
+
+    def get_benchmark_cols(
+        self, rows: Dict[int, List], polygons: np.ndarray, col_thresh=15
+    ) -> Tuple[np.ndarray, List[float], int]:
+        longest_col = max(rows.values(), key=lambda x: len(x))
+        longest_col_points = polygons[longest_col]
+        longest_x_start = list(longest_col_points[:, 0, 0])
+        longest_x_end = list(longest_col_points[:, 2, 0])
+        min_x = longest_x_start[0]
+        max_x = longest_x_end[-1]
+
+        # 根据当前col的起始x坐标,更新col的边界
+        # 2025.2.22 --- 解决最长列可能漏掉最后一列的问题
+        def update_longest_col(col_x_list, cur_v, min_x_, max_x_, insert_last):
+            for i, v in enumerate(col_x_list):
+                if cur_v - col_thresh <= v <= cur_v + col_thresh:
+                    break
+                if cur_v < min_x_:
+                    col_x_list.insert(0, cur_v)
+                    min_x_ = cur_v
+                    break
+                if cur_v > max_x_:
+                    if insert_last:
+                        col_x_list.append(cur_v)
+                    max_x_ = cur_v
+                    break
+                if cur_v < v:
+                    col_x_list.insert(i, cur_v)
+                    break
+            return min_x_, max_x_
+
+        for row_value in rows.values():
+            cur_row_start = list(polygons[row_value][:, 0, 0])
+            cur_row_end = list(polygons[row_value][:, 2, 0])
+            for idx, (cur_v_start, cur_v_end) in enumerate(
+                zip(cur_row_start, cur_row_end)
+            ):
+                min_x, max_x = update_longest_col(
+                    longest_x_start, cur_v_start, min_x, max_x, True
+                )
+                min_x, max_x = update_longest_col(
+                    longest_x_start, cur_v_end, min_x, max_x, False
+                )
+
+        longest_x_start = np.array(longest_x_start)
+        each_col_widths = (longest_x_start[1:] - longest_x_start[:-1]).tolist()
+        each_col_widths.append(max_x - longest_x_start[-1])
+        col_nums = longest_x_start.shape[0]
+        return longest_x_start, each_col_widths, col_nums
+
+    def get_benchmark_rows(
+        self, rows: Dict[int, List], polygons: np.ndarray
+    ) -> Tuple[np.ndarray, List[float], int]:
+        leftmost_cell_idxs = [v[0] for v in rows.values()]
+        benchmark_x = polygons[leftmost_cell_idxs][:, 0, 1]
+
+        each_row_widths = (benchmark_x[1:] - benchmark_x[:-1]).tolist()
+
+        # 求出最后一行cell中,最大的高度作为最后一行的高度
+        bottommost_idxs = list(rows.values())[-1]
+        bottommost_boxes = polygons[bottommost_idxs]
+        # fix self.compute_L2(v[3, :], v[0, :]), v为逆时针,即v[3]为右上,v[0]为左上,v[1]为左下
+        max_height = max([self.compute_L2(v[1, :], v[0, :]) for v in bottommost_boxes])
+        each_row_widths.append(max_height)
+
+        row_nums = benchmark_x.shape[0]
+        return each_row_widths, row_nums
+
+    @staticmethod
+    def compute_L2(a1: np.ndarray, a2: np.ndarray) -> float:
+        return np.linalg.norm(a2 - a1)
+
+    def get_merge_cells(
+        self,
+        polygons: np.ndarray,
+        rows: Dict,
+        row_nums: int,
+        col_nums: int,
+        longest_col: np.ndarray,
+        each_col_widths: List[float],
+        each_row_heights: List[float],
+    ) -> Dict[int, Dict[int, int]]:
+        col_res_merge, row_res_merge = {}, {}
+        logic_points = {}
+        merge_thresh = 10
+        for cur_row, col_list in rows.items():
+            one_col_result, one_row_result = {}, {}
+            for one_col in col_list:
+                box = polygons[one_col]
+                box_width = self.compute_L2(box[3, :], box[0, :])
+
+                # 不一定是从0开始的,应该综合已有值和x坐标位置来确定起始位置
+                loc_col_idx = np.argmin(np.abs(longest_col - box[0, 0]))
+                col_start = max(sum(one_col_result.values()), loc_col_idx)
+
+                # 计算合并多少个列方向单元格
+                for i in range(col_start, col_nums):
+                    col_cum_sum = sum(each_col_widths[col_start : i + 1])
+                    if i == col_start and col_cum_sum > box_width:
+                        one_col_result[one_col] = 1
+                        break
+                    elif abs(col_cum_sum - box_width) <= merge_thresh:
+                        one_col_result[one_col] = i + 1 - col_start
+                        break
+                    # 这里必须进行修正,不然会出现超越阈值范围后列交错
+                    elif col_cum_sum > box_width:
+                        idx = (
+                            i
+                            if abs(col_cum_sum - box_width)
+                            < abs(col_cum_sum - each_col_widths[i] - box_width)
+                            else i - 1
+                        )
+                        one_col_result[one_col] = idx + 1 - col_start
+                        break
+                else:
+                    one_col_result[one_col] = col_nums - col_start
+                col_end = one_col_result[one_col] + col_start - 1
+                box_height = self.compute_L2(box[1, :], box[0, :])
+                row_start = cur_row
+                for j in range(row_start, row_nums):
+                    row_cum_sum = sum(each_row_heights[row_start : j + 1])
+                    # box_height 不确定是几行的高度,所以要逐个试验,找一个最近的几行的高
+                    # 如果第一次row_cum_sum就比box_height大,那么意味着?丢失了一行
+                    if j == row_start and row_cum_sum > box_height:
+                        one_row_result[one_col] = 1
+                        break
+                    elif abs(box_height - row_cum_sum) <= merge_thresh:
+                        one_row_result[one_col] = j + 1 - row_start
+                        break
+                    # 这里必须进行修正,不然会出现超越阈值范围后行交错
+                    elif row_cum_sum > box_height:
+                        idx = (
+                            j
+                            if abs(row_cum_sum - box_height)
+                            < abs(row_cum_sum - each_row_heights[j] - box_height)
+                            else j - 1
+                        )
+                        one_row_result[one_col] = idx + 1 - row_start
+                        break
+                else:
+                    one_row_result[one_col] = row_nums - row_start
+                row_end = one_row_result[one_col] + row_start - 1
+                logic_points[one_col] = np.array(
+                    [row_start, row_end, col_start, col_end]
+                )
+            col_res_merge[cur_row] = one_col_result
+            row_res_merge[cur_row] = one_row_result
+
+        res = {}
+        for i, (c, r) in enumerate(zip(col_res_merge.values(), row_res_merge.values())):
+            res[i] = {k: [cc, r[k]] for k, cc in c.items()}
+        return res, logic_points

+ 206 - 0
mineru/model/table/rec/unet_table/table_structure_unet.py

@@ -0,0 +1,206 @@
+import copy
+import math
+from typing import Optional, Dict, Any, Tuple
+
+import cv2
+import numpy as np
+from skimage import measure
+from .utils import OrtInferSession, resize_img
+from .utils_table_line_rec import (
+    get_table_line,
+    final_adjust_lines,
+    min_area_rect_box,
+    draw_lines,
+    adjust_lines,
+)
+from.utils_table_recover import (
+    sorted_ocr_boxes,
+    box_4_2_poly_to_box_4_1,
+)
+
+
+class TSRUnet:
+    def __init__(self, config: Dict):
+        self.K = 1000
+        self.MK = 4000
+        self.mean = np.array([123.675, 116.28, 103.53], dtype=np.float32)
+        self.std = np.array([58.395, 57.12, 57.375], dtype=np.float32)
+        self.inp_height = 1024
+        self.inp_width = 1024
+
+        self.session = OrtInferSession(config)
+
+    def __call__(
+        self, img: np.ndarray, **kwargs
+    ) -> Tuple[Optional[np.ndarray], Optional[np.ndarray]]:
+        img_info = self.preprocess(img)
+        pred = self.infer(img_info)
+        polygons, rotated_polygons = self.postprocess(img, pred, **kwargs)
+        if polygons.size == 0:
+            return None, None
+        polygons = polygons.reshape(polygons.shape[0], 4, 2)
+        polygons[:, 3, :], polygons[:, 1, :] = (
+            polygons[:, 1, :].copy(),
+            polygons[:, 3, :].copy(),
+        )
+        rotated_polygons = rotated_polygons.reshape(rotated_polygons.shape[0], 4, 2)
+        rotated_polygons[:, 3, :], rotated_polygons[:, 1, :] = (
+            rotated_polygons[:, 1, :].copy(),
+            rotated_polygons[:, 3, :].copy(),
+        )
+        _, idx = sorted_ocr_boxes(
+            [box_4_2_poly_to_box_4_1(poly_box) for poly_box in rotated_polygons],
+            threhold=0.4,
+        )
+        polygons = polygons[idx]
+        rotated_polygons = rotated_polygons[idx]
+        return polygons, rotated_polygons
+
+    def preprocess(self, img) -> Dict[str, Any]:
+        scale = (self.inp_height, self.inp_width)
+        img, _, _ = resize_img(img, scale, True)
+        img = img.copy().astype(np.float32)
+        assert img.dtype != np.uint8
+        mean = np.float64(self.mean.reshape(1, -1))
+        stdinv = 1 / np.float64(self.std.reshape(1, -1))
+        cv2.cvtColor(img, cv2.COLOR_BGR2RGB, img)  # inplace
+        cv2.subtract(img, mean, img)  # inplace
+        cv2.multiply(img, stdinv, img)  # inplace
+        img = img.transpose(2, 0, 1)
+        images = img[None, :]
+        return {"img": images}
+
+    def infer(self, input):
+        result = self.session(input["img"][None, ...])[0][0]
+        result = result[0].astype(np.uint8)
+        return result
+
+    def postprocess(self, img, pred, **kwargs):
+        row = kwargs.get("row", 50) if kwargs else 50
+        col = kwargs.get("col", 30) if kwargs else 30
+        h_lines_threshold = kwargs.get("h_lines_threshold", 100) if kwargs else 100
+        v_lines_threshold = kwargs.get("v_lines_threshold", 15) if kwargs else 15
+        angle = kwargs.get("angle", 50) if kwargs else 50
+        enhance_box_line = kwargs.get("enhance_box_line", True) if kwargs else True
+        morph_close = (
+            kwargs.get("morph_close", enhance_box_line) if kwargs else enhance_box_line
+        )  # 是否进行闭合运算以找到更多小的框
+        more_h_lines = (
+            kwargs.get("more_h_lines", enhance_box_line) if kwargs else enhance_box_line
+        )  # 是否调整以找到更多的横线
+        more_v_lines = (
+            kwargs.get("more_v_lines", enhance_box_line) if kwargs else enhance_box_line
+        )  # 是否调整以找到更多的横线
+        extend_line = (
+            kwargs.get("extend_line", enhance_box_line) if kwargs else enhance_box_line
+        )  # 是否进行线段延长使得端点连接
+        # 是否进行旋转修正
+        rotated_fix = kwargs.get("rotated_fix") if kwargs else True
+        ori_shape = img.shape
+        pred = np.uint8(pred)
+        hpred = copy.deepcopy(pred)  # 横线
+        vpred = copy.deepcopy(pred)  # 竖线
+        whereh = np.where(hpred == 1)
+        wherev = np.where(vpred == 2)
+        hpred[wherev] = 0
+        vpred[whereh] = 0
+
+        hpred = cv2.resize(hpred, (ori_shape[1], ori_shape[0]))
+        vpred = cv2.resize(vpred, (ori_shape[1], ori_shape[0]))
+
+        h, w = pred.shape
+        hors_k = int(math.sqrt(w) * 1.2)
+        vert_k = int(math.sqrt(h) * 1.2)
+        hkernel = cv2.getStructuringElement(cv2.MORPH_RECT, (hors_k, 1))
+        vkernel = cv2.getStructuringElement(cv2.MORPH_RECT, (1, vert_k))
+        vpred = cv2.morphologyEx(
+            vpred, cv2.MORPH_CLOSE, vkernel, iterations=1
+        )  # 先膨胀后腐蚀的过程
+        if morph_close:
+            hpred = cv2.morphologyEx(hpred, cv2.MORPH_CLOSE, hkernel, iterations=1)
+        colboxes = get_table_line(vpred, axis=1, lineW=col)  # 竖线
+        rowboxes = get_table_line(hpred, axis=0, lineW=row)  # 横线
+        rboxes_row_, rboxes_col_ = [], []
+        if more_h_lines:
+            rboxes_row_ = adjust_lines(rowboxes, alph=h_lines_threshold, angle=angle)
+        if more_v_lines:
+            rboxes_col_ = adjust_lines(colboxes, alph=v_lines_threshold, angle=angle)
+        rowboxes += rboxes_row_
+        colboxes += rboxes_col_
+        if extend_line:
+            rowboxes, colboxes = final_adjust_lines(rowboxes, colboxes)
+        line_img = np.zeros(img.shape[:2], dtype="uint8")
+        line_img = draw_lines(line_img, rowboxes + colboxes, color=255, lineW=2)
+        rotated_angle = self.cal_rotate_angle(line_img)
+        if rotated_fix and abs(rotated_angle) > 0.3:
+            rotated_line_img = self.rotate_image(line_img, rotated_angle)
+            rotated_polygons = self.cal_region_boxes(rotated_line_img)
+            polygons = self.unrotate_polygons(
+                rotated_polygons, rotated_angle, line_img.shape
+            )
+        else:
+            polygons = self.cal_region_boxes(line_img)
+            rotated_polygons = polygons.copy()
+        return polygons, rotated_polygons
+
+    def cal_region_boxes(self, tmp):
+        labels = measure.label(tmp < 255, connectivity=2)  # 8连通区域标记
+        regions = measure.regionprops(labels)
+        ceilboxes = min_area_rect_box(
+            regions,
+            False,
+            tmp.shape[1],
+            tmp.shape[0],
+            filtersmall=True,
+            adjust_box=False,
+        )  # 最后一个参数改为False
+        return np.array(ceilboxes)
+
+    def cal_rotate_angle(self, tmp):
+        # 计算最外侧的旋转框
+        contours, _ = cv2.findContours(tmp, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
+        if not contours:
+            return 0
+        largest_contour = max(contours, key=cv2.contourArea)
+        rect = cv2.minAreaRect(largest_contour)
+        # 计算旋转角度
+        angle = rect[2]
+        if angle < -45:
+            angle += 90
+        elif angle > 45:
+            angle -= 90
+        return angle
+
+    def rotate_image(self, image, angle):
+        # 获取图像的中心点
+        (h, w) = image.shape[:2]
+        center = (w // 2, h // 2)
+
+        # 计算旋转矩阵
+        M = cv2.getRotationMatrix2D(center, angle, 1.0)
+
+        # 进行旋转
+        rotated_image = cv2.warpAffine(
+            image, M, (w, h), flags=cv2.INTER_NEAREST, borderMode=cv2.BORDER_REPLICATE
+        )
+
+        return rotated_image
+
+    def unrotate_polygons(
+        self, polygons: np.ndarray, angle: float, img_shape: tuple
+    ) -> np.ndarray:
+        # 将多边形旋转回原始位置
+        (h, w) = img_shape
+        center = (w // 2, h // 2)
+        M_inv = cv2.getRotationMatrix2D(center, -angle, 1.0)
+
+        # 将 (N, 8) 转换为 (N, 4, 2)
+        polygons_reshaped = polygons.reshape(-1, 4, 2)
+
+        # 批量逆旋转
+        unrotated_polygons = cv2.transform(polygons_reshaped, M_inv)
+
+        # 将 (N, 4, 2) 转换回 (N, 8)
+        unrotated_polygons = unrotated_polygons.reshape(-1, 8)
+
+        return unrotated_polygons

+ 492 - 0
mineru/model/table/rec/unet_table/utils.py

@@ -0,0 +1,492 @@
+import os
+import traceback
+from enum import Enum
+from io import BytesIO
+from pathlib import Path
+from typing import List, Union, Dict, Any, Tuple, Optional
+
+import cv2
+import loguru
+import numpy as np
+from onnxruntime import (
+    GraphOptimizationLevel,
+    InferenceSession,
+    SessionOptions,
+    get_available_providers,
+)
+from PIL import Image, UnidentifiedImageError
+
+
+root_dir = Path(__file__).resolve().parent
+InputType = Union[str, np.ndarray, bytes, Path]
+
+
+class EP(Enum):
+    CPU_EP = "CPUExecutionProvider"
+
+
+class OrtInferSession:
+    def __init__(self, config: Dict[str, Any]):
+        self.logger = loguru.logger
+
+        model_path = config.get("model_path", None)
+
+        self.had_providers: List[str] = get_available_providers()
+        EP_list = self._get_ep_list()
+
+        sess_opt = self._init_sess_opts(config)
+        self.session = InferenceSession(
+            model_path,
+            sess_options=sess_opt,
+            providers=EP_list,
+        )
+
+    @staticmethod
+    def _init_sess_opts(config: Dict[str, Any]) -> SessionOptions:
+        sess_opt = SessionOptions()
+        sess_opt.log_severity_level = 4
+        sess_opt.enable_cpu_mem_arena = False
+        sess_opt.graph_optimization_level = GraphOptimizationLevel.ORT_ENABLE_ALL
+
+        cpu_nums = os.cpu_count()
+        intra_op_num_threads = config.get("intra_op_num_threads", -1)
+        if intra_op_num_threads != -1 and 1 <= intra_op_num_threads <= cpu_nums:
+            sess_opt.intra_op_num_threads = intra_op_num_threads
+
+        inter_op_num_threads = config.get("inter_op_num_threads", -1)
+        if inter_op_num_threads != -1 and 1 <= inter_op_num_threads <= cpu_nums:
+            sess_opt.inter_op_num_threads = inter_op_num_threads
+
+        return sess_opt
+
+    def _get_ep_list(self) -> List[Tuple[str, Dict[str, Any]]]:
+        cpu_provider_opts = {
+            "arena_extend_strategy": "kSameAsRequested",
+        }
+        EP_list = [(EP.CPU_EP.value, cpu_provider_opts)]
+
+        return EP_list
+
+
+    def __call__(self, input_content: List[np.ndarray]) -> np.ndarray:
+        input_dict = dict(zip(self.get_input_names(), input_content))
+        try:
+            return self.session.run(None, input_dict)
+        except Exception as e:
+            error_info = traceback.format_exc()
+            raise ONNXRuntimeError(error_info) from e
+
+    def get_input_names(self) -> List[str]:
+        return [v.name for v in self.session.get_inputs()]
+
+
+class ONNXRuntimeError(Exception):
+    pass
+
+
+class LoadImage:
+    def __init__(
+        self,
+    ):
+        pass
+
+    def __call__(self, img: InputType) -> np.ndarray:
+        if not isinstance(img, InputType.__args__):
+            raise LoadImageError(
+                f"The img type {type(img)} does not in {InputType.__args__}"
+            )
+
+        img = self.load_img(img)
+        img = self.convert_img(img)
+        return img
+
+    def load_img(self, img: InputType) -> np.ndarray:
+        if isinstance(img, (str, Path)):
+            self.verify_exist(img)
+            try:
+                img = np.array(Image.open(img))
+            except UnidentifiedImageError as e:
+                raise LoadImageError(f"cannot identify image file {img}") from e
+            return img
+
+        if isinstance(img, bytes):
+            img = np.array(Image.open(BytesIO(img)))
+            return img
+
+        if isinstance(img, np.ndarray):
+            return img
+
+        raise LoadImageError(f"{type(img)} is not supported!")
+
+    def convert_img(self, img: np.ndarray):
+        if img.ndim == 2:
+            return cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
+
+        if img.ndim == 3:
+            channel = img.shape[2]
+            if channel == 1:
+                return cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
+
+            if channel == 2:
+                return self.cvt_two_to_three(img)
+
+            if channel == 4:
+                return self.cvt_four_to_three(img)
+
+            if channel == 3:
+                return cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
+
+            raise LoadImageError(
+                f"The channel({channel}) of the img is not in [1, 2, 3, 4]"
+            )
+
+        raise LoadImageError(f"The ndim({img.ndim}) of the img is not in [2, 3]")
+
+    @staticmethod
+    def cvt_four_to_three(img: np.ndarray) -> np.ndarray:
+        """RGBA → BGR"""
+        r, g, b, a = cv2.split(img)
+        new_img = cv2.merge((b, g, r))
+
+        not_a = cv2.bitwise_not(a)
+        not_a = cv2.cvtColor(not_a, cv2.COLOR_GRAY2BGR)
+
+        new_img = cv2.bitwise_and(new_img, new_img, mask=a)
+        new_img = cv2.add(new_img, not_a)
+        return new_img
+
+    @staticmethod
+    def cvt_two_to_three(img: np.ndarray) -> np.ndarray:
+        """gray + alpha → BGR"""
+        img_gray = img[..., 0]
+        img_bgr = cv2.cvtColor(img_gray, cv2.COLOR_GRAY2BGR)
+
+        img_alpha = img[..., 1]
+        not_a = cv2.bitwise_not(img_alpha)
+        not_a = cv2.cvtColor(not_a, cv2.COLOR_GRAY2BGR)
+
+        new_img = cv2.bitwise_and(img_bgr, img_bgr, mask=img_alpha)
+        new_img = cv2.add(new_img, not_a)
+        return new_img
+
+    @staticmethod
+    def verify_exist(file_path: Union[str, Path]):
+        if not Path(file_path).exists():
+            raise LoadImageError(f"{file_path} does not exist.")
+
+
+class LoadImageError(Exception):
+    pass
+
+
+# Pillow >=v9.1.0 use a slightly different naming scheme for filters.
+# Set pillow_interp_codes according to the naming scheme used.
+if Image is not None:
+    if hasattr(Image, "Resampling"):
+        pillow_interp_codes = {
+            "nearest": Image.Resampling.NEAREST,
+            "bilinear": Image.Resampling.BILINEAR,
+            "bicubic": Image.Resampling.BICUBIC,
+            "box": Image.Resampling.BOX,
+            "lanczos": Image.Resampling.LANCZOS,
+            "hamming": Image.Resampling.HAMMING,
+        }
+    else:
+        pillow_interp_codes = {
+            "nearest": Image.NEAREST,
+            "bilinear": Image.BILINEAR,
+            "bicubic": Image.BICUBIC,
+            "box": Image.BOX,
+            "lanczos": Image.LANCZOS,
+            "hamming": Image.HAMMING,
+        }
+
+cv2_interp_codes = {
+    "nearest": cv2.INTER_NEAREST,
+    "bilinear": cv2.INTER_LINEAR,
+    "bicubic": cv2.INTER_CUBIC,
+    "area": cv2.INTER_AREA,
+    "lanczos": cv2.INTER_LANCZOS4,
+}
+
+
+def resize_img(img, scale, keep_ratio=True):
+    if keep_ratio:
+        # 缩小使用area更保真
+        if min(img.shape[:2]) > min(scale):
+            interpolation = "area"
+        else:
+            interpolation = "bicubic"  # bilinear
+        img_new, scale_factor = imrescale(
+            img, scale, return_scale=True, interpolation=interpolation
+        )
+        # the w_scale and h_scale has minor difference
+        # a real fix should be done in the mmcv.imrescale in the future
+        new_h, new_w = img_new.shape[:2]
+        h, w = img.shape[:2]
+        w_scale = new_w / w
+        h_scale = new_h / h
+    else:
+        img_new, w_scale, h_scale = imresize(img, scale, return_scale=True)
+    return img_new, w_scale, h_scale
+
+
+def imrescale(img, scale, return_scale=False, interpolation="bilinear", backend=None):
+    """Resize image while keeping the aspect ratio.
+
+    Args:
+        img (ndarray): The input image.
+        scale (float | tuple[int]): The scaling factor or maximum size.
+            If it is a float number, then the image will be rescaled by this
+            factor, else if it is a tuple of 2 integers, then the image will
+            be rescaled as large as possible within the scale.
+        return_scale (bool): Whether to return the scaling factor besides the
+            rescaled image.
+        interpolation (str): Same as :func:`resize`.
+        backend (str | None): Same as :func:`resize`.
+
+    Returns:
+        ndarray: The rescaled image.
+    """
+    h, w = img.shape[:2]
+    new_size, scale_factor = rescale_size((w, h), scale, return_scale=True)
+    rescaled_img = imresize(img, new_size, interpolation=interpolation, backend=backend)
+    if return_scale:
+        return rescaled_img, scale_factor
+    else:
+        return rescaled_img
+
+
+def imresize(
+    img, size, return_scale=False, interpolation="bilinear", out=None, backend=None
+):
+    """Resize image to a given size.
+
+    Args:
+        img (ndarray): The input image.
+        size (tuple[int]): Target size (w, h).
+        return_scale (bool): Whether to return `w_scale` and `h_scale`.
+        interpolation (str): Interpolation method, accepted values are
+            "nearest", "bilinear", "bicubic", "area", "lanczos" for 'cv2'
+            backend, "nearest", "bilinear" for 'pillow' backend.
+        out (ndarray): The output destination.
+        backend (str | None): The image resize backend type. Options are `cv2`,
+            `pillow`, `None`. If backend is None, the global imread_backend
+            specified by ``mmcv.use_backend()`` will be used. Default: None.
+
+    Returns:
+        tuple | ndarray: (`resized_img`, `w_scale`, `h_scale`) or
+        `resized_img`.
+    """
+    h, w = img.shape[:2]
+    if backend is None:
+        backend = "cv2"
+    if backend not in ["cv2", "pillow"]:
+        raise ValueError(
+            f"backend: {backend} is not supported for resize."
+            f"Supported backends are 'cv2', 'pillow'"
+        )
+
+    if backend == "pillow":
+        assert img.dtype == np.uint8, "Pillow backend only support uint8 type"
+        pil_image = Image.fromarray(img)
+        pil_image = pil_image.resize(size, pillow_interp_codes[interpolation])
+        resized_img = np.array(pil_image)
+    else:
+        resized_img = cv2.resize(
+            img, size, dst=out, interpolation=cv2_interp_codes[interpolation]
+        )
+    if not return_scale:
+        return resized_img
+    else:
+        w_scale = size[0] / w
+        h_scale = size[1] / h
+        return resized_img, w_scale, h_scale
+
+
+def rescale_size(old_size, scale, return_scale=False):
+    """Calculate the new size to be rescaled to.
+
+    Args:
+        old_size (tuple[int]): The old size (w, h) of image.
+        scale (float | tuple[int]): The scaling factor or maximum size.
+            If it is a float number, then the image will be rescaled by this
+            factor, else if it is a tuple of 2 integers, then the image will
+            be rescaled as large as possible within the scale.
+        return_scale (bool): Whether to return the scaling factor besides the
+            rescaled image size.
+
+    Returns:
+        tuple[int]: The new rescaled image size.
+    """
+    w, h = old_size
+    if isinstance(scale, (float, int)):
+        if scale <= 0:
+            raise ValueError(f"Invalid scale {scale}, must be positive.")
+        scale_factor = scale
+    elif isinstance(scale, tuple):
+        max_long_edge = max(scale)
+        max_short_edge = min(scale)
+        scale_factor = min(max_long_edge / max(h, w), max_short_edge / min(h, w))
+    else:
+        raise TypeError(
+            f"Scale must be a number or tuple of int, but got {type(scale)}"
+        )
+
+    new_size = _scale_size((w, h), scale_factor)
+
+    if return_scale:
+        return new_size, scale_factor
+    else:
+        return new_size
+
+
+def _scale_size(size, scale):
+    """Rescale a size by a ratio.
+
+    Args:
+        size (tuple[int]): (w, h).
+        scale (float | tuple(float)): Scaling factor.
+
+    Returns:
+        tuple[int]: scaled size.
+    """
+    if isinstance(scale, (float, int)):
+        scale = (scale, scale)
+    w, h = size
+    return int(w * float(scale[0]) + 0.5), int(h * float(scale[1]) + 0.5)
+
+
+class VisTable:
+    def __init__(self):
+        self.load_img = LoadImage()
+
+    def __call__(
+        self,
+        img_path: Union[str, Path],
+        table_results,
+        save_html_path: Optional[Union[str, Path]] = None,
+        save_drawed_path: Optional[Union[str, Path]] = None,
+        save_logic_path: Optional[Union[str, Path]] = None,
+    ):
+        if save_html_path:
+            html_with_border = self.insert_border_style(table_results.pred_html)
+            self.save_html(save_html_path, html_with_border)
+
+        table_cell_bboxes = table_results.cell_bboxes
+        table_logic_points = table_results.logic_points
+        if table_cell_bboxes is None:
+            return None
+
+        img = self.load_img(img_path)
+
+        dims_bboxes = table_cell_bboxes.shape[1]
+        if dims_bboxes == 4:
+            drawed_img = self.draw_rectangle(img, table_cell_bboxes)
+        elif dims_bboxes == 8:
+            drawed_img = self.draw_polylines(img, table_cell_bboxes)
+        else:
+            raise ValueError("Shape of table bounding boxes is not between in 4 or 8.")
+
+        if save_drawed_path:
+            self.save_img(save_drawed_path, drawed_img)
+
+        if save_logic_path:
+            polygons = [[box[0], box[1], box[4], box[5]] for box in table_cell_bboxes]
+            self.plot_rec_box_with_logic_info(
+                img, save_logic_path, table_logic_points, polygons
+            )
+        return drawed_img
+
+    def insert_border_style(self, table_html_str: str):
+        style_res = """<meta charset="UTF-8"><style>
+        table {
+            border-collapse: collapse;
+            width: 100%;
+        }
+        th, td {
+            border: 1px solid black;
+            padding: 8px;
+            text-align: center;
+        }
+        th {
+            background-color: #f2f2f2;
+        }
+                    </style>"""
+
+        prefix_table, suffix_table = table_html_str.split("<body>")
+        html_with_border = f"{prefix_table}{style_res}<body>{suffix_table}"
+        return html_with_border
+
+    def plot_rec_box_with_logic_info(
+        self, img, output_path, logic_points, sorted_polygons
+    ):
+        """
+        :param img_path
+        :param output_path
+        :param logic_points: [row_start,row_end,col_start,col_end]
+        :param sorted_polygons: [xmin,ymin,xmax,ymax]
+        :return:
+        """
+        # 读取原图
+        img = cv2.copyMakeBorder(
+            img, 0, 0, 0, 100, cv2.BORDER_CONSTANT, value=[255, 255, 255]
+        )
+        # 绘制 polygons 矩形
+        for idx, polygon in enumerate(sorted_polygons):
+            x0, y0, x1, y1 = polygon[0], polygon[1], polygon[2], polygon[3]
+            x0 = round(x0)
+            y0 = round(y0)
+            x1 = round(x1)
+            y1 = round(y1)
+            cv2.rectangle(img, (x0, y0), (x1, y1), (0, 0, 255), 1)
+            # 增大字体大小和线宽
+            font_scale = 0.9  # 原先是0.5
+            thickness = 1  # 原先是1
+            logic_point = logic_points[idx]
+            cv2.putText(
+                img,
+                f"row: {logic_point[0]}-{logic_point[1]}",
+                (x0 + 3, y0 + 8),
+                cv2.FONT_HERSHEY_PLAIN,
+                font_scale,
+                (0, 0, 255),
+                thickness,
+            )
+            cv2.putText(
+                img,
+                f"col: {logic_point[2]}-{logic_point[3]}",
+                (x0 + 3, y0 + 18),
+                cv2.FONT_HERSHEY_PLAIN,
+                font_scale,
+                (0, 0, 255),
+                thickness,
+            )
+            os.makedirs(os.path.dirname(output_path), exist_ok=True)
+            # 保存绘制后的图像
+            self.save_img(output_path, img)
+
+    @staticmethod
+    def draw_rectangle(img: np.ndarray, boxes: np.ndarray) -> np.ndarray:
+        img_copy = img.copy()
+        for box in boxes.astype(int):
+            x1, y1, x2, y2 = box
+            cv2.rectangle(img_copy, (x1, y1), (x2, y2), (255, 0, 0), 2)
+        return img_copy
+
+    @staticmethod
+    def draw_polylines(img: np.ndarray, points) -> np.ndarray:
+        img_copy = img.copy()
+        for point in points.astype(int):
+            point = point.reshape(4, 2)
+            cv2.polylines(img_copy, [point.astype(int)], True, (255, 0, 0), 2)
+        return img_copy
+
+    @staticmethod
+    def save_img(save_path: Union[str, Path], img: np.ndarray):
+        cv2.imwrite(str(save_path), img)
+
+    @staticmethod
+    def save_html(save_path: Union[str, Path], html: str):
+        with open(save_path, "w", encoding="utf-8") as f:
+            f.write(html)

+ 347 - 0
mineru/model/table/rec/unet_table/utils_table_line_rec.py

@@ -0,0 +1,347 @@
+import math
+
+import cv2
+import numpy as np
+from scipy.spatial import distance as dist
+from skimage import measure
+
+
+def transform_preds(coords, center, scale, output_size, rot=0):
+    target_coords = np.zeros(coords.shape)
+    trans = get_affine_transform(center, scale, rot, output_size, inv=1)
+    for p in range(coords.shape[0]):
+        target_coords[p, 0:2] = affine_transform(coords[p, 0:2], trans)
+    return target_coords
+
+
+def get_affine_transform(
+    center, scale, rot, output_size, shift=np.array([0, 0], dtype=np.float32), inv=0
+):
+    if not isinstance(scale, np.ndarray) and not isinstance(scale, list):
+        scale = np.array([scale, scale], dtype=np.float32)
+
+    scale_tmp = scale
+    src_w = scale_tmp[0]
+    dst_w = output_size[0]
+    dst_h = output_size[1]
+
+    rot_rad = np.pi * rot / 180
+    src_dir = get_dir([0, src_w * -0.5], rot_rad)
+    dst_dir = np.array([0, dst_w * -0.5], np.float32)
+
+    src = np.zeros((3, 2), dtype=np.float32)
+    dst = np.zeros((3, 2), dtype=np.float32)
+    src[0, :] = center + scale_tmp * shift
+    src[1, :] = center + src_dir + scale_tmp * shift
+    dst[0, :] = [dst_w * 0.5, dst_h * 0.5]
+    dst[1, :] = np.array([dst_w * 0.5, dst_h * 0.5], np.float32) + dst_dir
+
+    src[2:, :] = get_3rd_point(src[0, :], src[1, :])
+    dst[2:, :] = get_3rd_point(dst[0, :], dst[1, :])
+
+    if inv:
+        trans = cv2.getAffineTransform(np.float32(dst), np.float32(src))
+    else:
+        trans = cv2.getAffineTransform(np.float32(src), np.float32(dst))
+
+    return trans
+
+
+def affine_transform(pt, t):
+    new_pt = np.array([pt[0], pt[1], 1.0], dtype=np.float32).T
+    new_pt = np.dot(t, new_pt)
+    return new_pt[:2]
+
+
+def get_dir(src_point, rot_rad):
+    sn, cs = np.sin(rot_rad), np.cos(rot_rad)
+
+    src_result = [0, 0]
+    src_result[0] = src_point[0] * cs - src_point[1] * sn
+    src_result[1] = src_point[0] * sn + src_point[1] * cs
+
+    return src_result
+
+
+def get_3rd_point(a, b):
+    direct = a - b
+    return b + np.array([-direct[1], direct[0]], dtype=np.float32)
+
+
+def get_table_line(binimg, axis=0, lineW=10):
+    ##获取表格线
+    ##axis=0 横线
+    ##axis=1 竖线
+    labels = measure.label(binimg > 0, connectivity=2)  # 8连通区域标记
+    regions = measure.regionprops(labels)
+    if axis == 1:
+        lineboxes = [
+            min_area_rect(line.coords)
+            for line in regions
+            if line.bbox[2] - line.bbox[0] > lineW
+        ]
+    else:
+        lineboxes = [
+            min_area_rect(line.coords)
+            for line in regions
+            if line.bbox[3] - line.bbox[1] > lineW
+        ]
+    return lineboxes
+
+
+def min_area_rect(coords):
+    """
+    多边形外接矩形
+    """
+    rect = cv2.minAreaRect(coords[:, ::-1])
+    box = cv2.boxPoints(rect)
+    box = box.reshape((8,)).tolist()
+
+    box = image_location_sort_box(box)
+
+    x1, y1, x2, y2, x3, y3, x4, y4 = box
+    degree, w, h, cx, cy = calculate_center_rotate_angle(box)
+    if w < h:
+        xmin = (x1 + x2) / 2
+        xmax = (x3 + x4) / 2
+        ymin = (y1 + y2) / 2
+        ymax = (y3 + y4) / 2
+
+    else:
+        xmin = (x1 + x4) / 2
+        xmax = (x2 + x3) / 2
+        ymin = (y1 + y4) / 2
+        ymax = (y2 + y3) / 2
+    # degree,w,h,cx,cy = solve(box)
+    # x1,y1,x2,y2,x3,y3,x4,y4 = box
+    # return {'degree':degree,'w':w,'h':h,'cx':cx,'cy':cy}
+    return [xmin, ymin, xmax, ymax]
+
+
+def image_location_sort_box(box):
+    x1, y1, x2, y2, x3, y3, x4, y4 = box[:8]
+    pts = (x1, y1), (x2, y2), (x3, y3), (x4, y4)
+    pts = np.array(pts, dtype="float32")
+    (x1, y1), (x2, y2), (x3, y3), (x4, y4) = _order_points(pts)
+    return [x1, y1, x2, y2, x3, y3, x4, y4]
+
+
+def calculate_center_rotate_angle(box):
+    """
+    绕 cx,cy点 w,h 旋转 angle 的坐标,能一定程度缓解图片的内部倾斜,但是还是依赖模型稳妥
+    x = cx-w/2
+    y = cy-h/2
+    x1-cx = -w/2*cos(angle) +h/2*sin(angle)
+    y1 -cy= -w/2*sin(angle) -h/2*cos(angle)
+
+    h(x1-cx) = -wh/2*cos(angle) +hh/2*sin(angle)
+    w(y1 -cy)= -ww/2*sin(angle) -hw/2*cos(angle)
+    (hh+ww)/2sin(angle) = h(x1-cx)-w(y1 -cy)
+
+    """
+    x1, y1, x2, y2, x3, y3, x4, y4 = box[:8]
+    cx = (x1 + x3 + x2 + x4) / 4.0
+    cy = (y1 + y3 + y4 + y2) / 4.0
+    w = (
+        np.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)
+        + np.sqrt((x3 - x4) ** 2 + (y3 - y4) ** 2)
+    ) / 2
+    h = (
+        np.sqrt((x2 - x3) ** 2 + (y2 - y3) ** 2)
+        + np.sqrt((x1 - x4) ** 2 + (y1 - y4) ** 2)
+    ) / 2
+    # x = cx-w/2
+    # y = cy-h/2
+    sinA = (h * (x1 - cx) - w * (y1 - cy)) * 1.0 / (h * h + w * w + 1e-10) * 2
+    angle = np.arcsin(sinA)
+    return angle, w, h, cx, cy
+
+
+def _order_points(pts):
+    # 根据x坐标对点进行排序
+    """
+    ---------------------
+    本项目中是为了排序后得到[(xmin,ymin),(xmax,ymin),(xmax,ymax),(xmin,ymax)]
+    作者:Tong_T
+    来源:CSDN
+    原文:https://blog.csdn.net/Tong_T/article/details/81907132
+    版权声明:本文为博主原创文章,转载请附上博文链接!
+    """
+    x_sorted = pts[np.argsort(pts[:, 0]), :]
+
+    left_most = x_sorted[:2, :]
+    right_most = x_sorted[2:, :]
+    left_most = left_most[np.argsort(left_most[:, 1]), :]
+    (tl, bl) = left_most
+
+    distance = dist.cdist(tl[np.newaxis], right_most, "euclidean")[0]
+    (br, tr) = right_most[np.argsort(distance)[::-1], :]
+
+    return np.array([tl, tr, br, bl], dtype="float32")
+
+
+def sqrt(p1, p2):
+    return np.sqrt((p1[0] - p2[0]) ** 2 + (p1[1] - p2[1]) ** 2)
+
+
+def adjust_lines(lines, alph=50, angle=50):
+    lines_n = len(lines)
+    new_lines = []
+    for i in range(lines_n):
+        x1, y1, x2, y2 = lines[i]
+        cx1, cy1 = (x1 + x2) / 2, (y1 + y2) / 2
+        for j in range(lines_n):
+            if i != j:
+                x3, y3, x4, y4 = lines[j]
+                cx2, cy2 = (x3 + x4) / 2, (y3 + y4) / 2
+                if (x3 < cx1 < x4 or y3 < cy1 < y4) or (
+                    x1 < cx2 < x2 or y1 < cy2 < y2
+                ):  # 判断两个横线在y方向的投影重不重合
+                    continue
+                else:
+                    r = sqrt((x1, y1), (x3, y3))
+                    k = abs((y3 - y1) / (x3 - x1 + 1e-10))
+                    a = math.atan(k) * 180 / math.pi
+                    if r < alph and a < angle:
+                        new_lines.append((x1, y1, x3, y3))
+
+                    r = sqrt((x1, y1), (x4, y4))
+                    k = abs((y4 - y1) / (x4 - x1 + 1e-10))
+                    a = math.atan(k) * 180 / math.pi
+                    if r < alph and a < angle:
+                        new_lines.append((x1, y1, x4, y4))
+
+                    r = sqrt((x2, y2), (x3, y3))
+                    k = abs((y3 - y2) / (x3 - x2 + 1e-10))
+                    a = math.atan(k) * 180 / math.pi
+                    if r < alph and a < angle:
+                        new_lines.append((x2, y2, x3, y3))
+                    r = sqrt((x2, y2), (x4, y4))
+                    k = abs((y4 - y2) / (x4 - x2 + 1e-10))
+                    a = math.atan(k) * 180 / math.pi
+                    if r < alph and a < angle:
+                        new_lines.append((x2, y2, x4, y4))
+    return new_lines
+
+
+def final_adjust_lines(rowboxes, colboxes):
+    nrow = len(rowboxes)
+    ncol = len(colboxes)
+    for i in range(nrow):
+        for j in range(ncol):
+            rowboxes[i] = line_to_line(rowboxes[i], colboxes[j], alpha=20, angle=30)
+            colboxes[j] = line_to_line(colboxes[j], rowboxes[i], alpha=20, angle=30)
+    return rowboxes, colboxes
+
+
+def draw_lines(im, bboxes, color=(0, 0, 0), lineW=3):
+    """
+    boxes: bounding boxes
+    """
+    tmp = np.copy(im)
+    c = color
+    h, w = im.shape[:2]
+
+    for box in bboxes:
+        x1, y1, x2, y2 = box[:4]
+        cv2.line(
+            tmp, (int(x1), int(y1)), (int(x2), int(y2)), c, lineW, lineType=cv2.LINE_AA
+        )
+
+    return tmp
+
+
+def line_to_line(points1, points2, alpha=10, angle=30):
+    """
+    线段之间的距离
+    """
+    x1, y1, x2, y2 = points1
+    ox1, oy1, ox2, oy2 = points2
+    xy = np.array([(x1, y1), (x2, y2)], dtype="float32")
+    A1, B1, C1 = fit_line(xy)
+    oxy = np.array([(ox1, oy1), (ox2, oy2)], dtype="float32")
+    A2, B2, C2 = fit_line(oxy)
+    flag1 = point_line_cor(np.array([x1, y1], dtype="float32"), A2, B2, C2)
+    flag2 = point_line_cor(np.array([x2, y2], dtype="float32"), A2, B2, C2)
+
+    if (flag1 > 0 and flag2 > 0) or (flag1 < 0 and flag2 < 0):  # 横线或者竖线在竖线或者横线的同一侧
+        if (A1 * B2 - A2 * B1) != 0:
+            x = (B1 * C2 - B2 * C1) / (A1 * B2 - A2 * B1)
+            y = (A2 * C1 - A1 * C2) / (A1 * B2 - A2 * B1)
+            # x, y = round(x, 2), round(y, 2)
+            p = (x, y)  # 横线与竖线的交点
+            r0 = sqrt(p, (x1, y1))
+            r1 = sqrt(p, (x2, y2))
+
+            if min(r0, r1) < alpha:  # 若交点与线起点或者终点的距离小于alpha,则延长线到交点
+                if r0 < r1:
+                    k = abs((y2 - p[1]) / (x2 - p[0] + 1e-10))
+                    a = math.atan(k) * 180 / math.pi
+                    if a < angle or abs(90 - a) < angle:
+                        points1 = np.array([p[0], p[1], x2, y2], dtype="float32")
+                else:
+                    k = abs((y1 - p[1]) / (x1 - p[0] + 1e-10))
+                    a = math.atan(k) * 180 / math.pi
+                    if a < angle or abs(90 - a) < angle:
+                        points1 = np.array([x1, y1, p[0], p[1]], dtype="float32")
+    return points1
+
+
+def min_area_rect_box(
+    regions, flag=True, W=0, H=0, filtersmall=False, adjust_box=False
+):
+    """
+    多边形外接矩形
+    """
+    boxes = []
+    for region in regions:
+        if region.bbox_area > H * W * 3 / 4:  # 过滤大的单元格
+            continue
+        rect = cv2.minAreaRect(region.coords[:, ::-1])
+
+        box = cv2.boxPoints(rect)
+        box = box.reshape((8,)).tolist()
+        box = image_location_sort_box(box)
+        x1, y1, x2, y2, x3, y3, x4, y4 = box
+        angle, w, h, cx, cy = calculate_center_rotate_angle(box)
+        # if adjustBox:
+        #     x1, y1, x2, y2, x3, y3, x4, y4 = xy_rotate_box(cx, cy, w + 5, h + 5, angle=0, degree=None)
+        #     x1, x4 = max(x1, 0), max(x4, 0)
+        #     y1, y2 = max(y1, 0), max(y2, 0)
+
+        # if w > 32 and h > 32 and flag:
+        #     if abs(angle / np.pi * 180) < 20:
+        #         if filtersmall and (w < 10 or h < 10):
+        #             continue
+        #         boxes.append([x1, y1, x2, y2, x3, y3, x4, y4])
+        # else:
+        if w * h < 0.5 * W * H:
+            if filtersmall and (
+                w < 15 or h < 15
+            ):  # or w / h > 30 or h / w > 30): # 过滤小的单元格
+                continue
+            boxes.append([x1, y1, x2, y2, x3, y3, x4, y4])
+    return boxes
+
+
+def point_line_cor(p, A, B, C):
+    ##判断点与线之间的位置关系
+    # 一般式直线方程(Ax+By+c)=0
+    x, y = p
+    r = A * x + B * y + C
+    return r
+
+
+def fit_line(p):
+    """A = Y2 - Y1
+       B = X1 - X2
+       C = X2*Y1 - X1*Y2
+       AX+BY+C=0
+    直线一般方程
+    """
+    x1, y1 = p[0]
+    x2, y2 = p[1]
+    A = y2 - y1
+    B = x1 - x2
+    C = x2 * y1 - x1 * y2
+    return A, B, C

+ 311 - 0
mineru/model/table/rec/unet_table/utils_table_recover.py

@@ -0,0 +1,311 @@
+from typing import Any, Dict, List, Union, Tuple
+
+import numpy as np
+
+
+def calculate_iou(
+    box1: Union[np.ndarray, List], box2: Union[np.ndarray, List]
+) -> float:
+    """
+    :param box1: Iterable [xmin,ymin,xmax,ymax]
+    :param box2: Iterable [xmin,ymin,xmax,ymax]
+    :return: iou: float 0-1
+    """
+    b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
+    b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
+    # 不相交直接退出检测
+    if b1_x2 < b2_x1 or b1_x1 > b2_x2 or b1_y2 < b2_y1 or b1_y1 > b2_y2:
+        return 0.0
+    # 计算交集
+    inter_x1 = max(b1_x1, b2_x1)
+    inter_y1 = max(b1_y1, b2_y1)
+    inter_x2 = min(b1_x2, b2_x2)
+    inter_y2 = min(b1_y2, b2_y2)
+    i_area = max(0, inter_x2 - inter_x1) * max(0, inter_y2 - inter_y1)
+
+    # 计算并集
+    b1_area = (b1_x2 - b1_x1) * (b1_y2 - b1_y1)
+    b2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1)
+    u_area = b1_area + b2_area - i_area
+
+    # 避免除零错误,如果区域小到乘积为0,认为是错误识别,直接去掉
+    if u_area == 0:
+        return 1
+        # 检查完全包含
+    iou = i_area / u_area
+    return iou
+
+
+
+def is_box_contained(
+    box1: Union[np.ndarray, List], box2: Union[np.ndarray, List], threshold=0.2
+) -> Union[int, None]:
+    """
+    :param box1: Iterable [xmin,ymin,xmax,ymax]
+    :param box2: Iterable [xmin,ymin,xmax,ymax]
+    :return: 1: box1 is contained 2: box2 is contained None: no contain these
+    """
+    b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
+    b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
+    # 不相交直接退出检测
+    if b1_x2 < b2_x1 or b1_x1 > b2_x2 or b1_y2 < b2_y1 or b1_y1 > b2_y2:
+        return None
+    # 计算box2的总面积
+    b2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1)
+    b1_area = (b1_x2 - b1_x1) * (b1_y2 - b1_y1)
+
+    # 计算box1和box2的交集
+    intersect_x1 = max(b1_x1, b2_x1)
+    intersect_y1 = max(b1_y1, b2_y1)
+    intersect_x2 = min(b1_x2, b2_x2)
+    intersect_y2 = min(b1_y2, b2_y2)
+
+    # 计算交集的面积
+    intersect_area = max(0, intersect_x2 - intersect_x1) * max(
+        0, intersect_y2 - intersect_y1
+    )
+
+    # 计算外面的面积
+    b1_outside_area = b1_area - intersect_area
+    b2_outside_area = b2_area - intersect_area
+
+    # 计算外面的面积占box2总面积的比例
+    ratio_b1 = b1_outside_area / b1_area if b1_area > 0 else 0
+    ratio_b2 = b2_outside_area / b2_area if b2_area > 0 else 0
+
+    if ratio_b1 < threshold:
+        return 1
+    if ratio_b2 < threshold:
+        return 2
+    # 判断比例是否大于阈值
+    return None
+
+
+def is_single_axis_contained(
+    box1: Union[np.ndarray, List],
+    box2: Union[np.ndarray, List],
+    axis="x",
+    threhold: float = 0.2,
+) -> Union[int, None]:
+    """
+    :param box1: Iterable [xmin,ymin,xmax,ymax]
+    :param box2: Iterable [xmin,ymin,xmax,ymax]
+    :return: 1: box1 is contained 2: box2 is contained None: no contain these
+    """
+    b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
+    b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
+
+    # 计算轴重叠大小
+    if axis == "x":
+        b1_area = b1_x2 - b1_x1
+        b2_area = b2_x2 - b2_x1
+        i_area = min(b1_x2, b2_x2) - max(b1_x1, b2_x1)
+    else:
+        b1_area = b1_y2 - b1_y1
+        b2_area = b2_y2 - b2_y1
+        i_area = min(b1_y2, b2_y2) - max(b1_y1, b2_y1)
+        # 计算外面的面积
+    b1_outside_area = b1_area - i_area
+    b2_outside_area = b2_area - i_area
+
+    ratio_b1 = b1_outside_area / b1_area if b1_area > 0 else 0
+    ratio_b2 = b2_outside_area / b2_area if b2_area > 0 else 0
+    if ratio_b1 < threhold:
+        return 1
+    if ratio_b2 < threhold:
+        return 2
+    return None
+
+
+def sorted_ocr_boxes(
+    dt_boxes: Union[np.ndarray, list], threhold: float = 0.2
+) -> Tuple[Union[np.ndarray, list], List[int]]:
+    """
+    Sort text boxes in order from top to bottom, left to right
+    args:
+        dt_boxes(array):detected text boxes with (xmin, ymin, xmax, ymax)
+    return:
+        sorted boxes(array) with (xmin, ymin, xmax, ymax)
+    """
+    num_boxes = len(dt_boxes)
+    if num_boxes <= 0:
+        return dt_boxes, []
+    indexed_boxes = [(box, idx) for idx, box in enumerate(dt_boxes)]
+    sorted_boxes_with_idx = sorted(indexed_boxes, key=lambda x: (x[0][1], x[0][0]))
+    _boxes, indices = zip(*sorted_boxes_with_idx)
+    indices = list(indices)
+    _boxes = [dt_boxes[i] for i in indices]
+    threahold = 20
+    # 避免输出和输入格式不对应,与函数功能不符合
+    if isinstance(dt_boxes, np.ndarray):
+        _boxes = np.array(_boxes)
+    for i in range(num_boxes - 1):
+        for j in range(i, -1, -1):
+            c_idx = is_single_axis_contained(
+                _boxes[j], _boxes[j + 1], axis="y", threhold=threhold
+            )
+            if (
+                c_idx is not None
+                and _boxes[j + 1][0] < _boxes[j][0]
+                and abs(_boxes[j][1] - _boxes[j + 1][1]) < threahold
+            ):
+                _boxes[j], _boxes[j + 1] = _boxes[j + 1].copy(), _boxes[j].copy()
+                indices[j], indices[j + 1] = indices[j + 1], indices[j]
+            else:
+                break
+    return _boxes, indices
+
+
+def box_4_1_poly_to_box_4_2(poly_box: Union[list, np.ndarray]) -> List[List[float]]:
+    xmin, ymin, xmax, ymax = tuple(poly_box)
+    return [[xmin, ymin], [xmax, ymin], [xmax, ymax], [xmin, ymax]]
+
+
+def box_4_2_poly_to_box_4_1(poly_box: Union[list, np.ndarray]) -> List[Any]:
+    """
+    将poly_box转换为box_4_1
+    :param poly_box:
+    :return:
+    """
+    return [poly_box[0][0], poly_box[0][1], poly_box[2][0], poly_box[2][1]]
+
+
+def match_ocr_cell(dt_rec_boxes: List[List[Union[Any, str]]], pred_bboxes: np.ndarray):
+    """
+    :param dt_rec_boxes: [[(4.2), text, score]]
+    :param pred_bboxes: shap (4,2)
+    :return:
+    """
+    matched = {}
+    not_match_orc_boxes = []
+    for i, gt_box in enumerate(dt_rec_boxes):
+        for j, pred_box in enumerate(pred_bboxes):
+            pred_box = [pred_box[0][0], pred_box[0][1], pred_box[2][0], pred_box[2][1]]
+            ocr_boxes = gt_box[0]
+            # xmin,ymin,xmax,ymax
+            ocr_box = (
+                ocr_boxes[0][0],
+                ocr_boxes[0][1],
+                ocr_boxes[2][0],
+                ocr_boxes[2][1],
+            )
+            contained = is_box_contained(ocr_box, pred_box, 0.6)
+            if contained == 1 or calculate_iou(ocr_box, pred_box) > 0.8:
+                if j not in matched:
+                    matched[j] = [gt_box]
+                else:
+                    matched[j].append(gt_box)
+            else:
+                not_match_orc_boxes.append(gt_box)
+
+    return matched, not_match_orc_boxes
+
+
+def gather_ocr_list_by_row(ocr_list: List[Any], threhold: float = 0.2) -> List[Any]:
+    """
+    :param ocr_list: [[[xmin,ymin,xmax,ymax], text]]
+    :return:
+    """
+    threshold = 10
+    for i in range(len(ocr_list)):
+        if not ocr_list[i]:
+            continue
+
+        for j in range(i + 1, len(ocr_list)):
+            if not ocr_list[j]:
+                continue
+            cur = ocr_list[i]
+            next = ocr_list[j]
+            cur_box = cur[0]
+            next_box = next[0]
+            c_idx = is_single_axis_contained(
+                cur[0], next[0], axis="y", threhold=threhold
+            )
+            if c_idx:
+                dis = max(next_box[0] - cur_box[2], 0)
+                blank_str = int(dis / threshold) * " "
+                cur[1] = cur[1] + blank_str + next[1]
+                xmin = min(cur_box[0], next_box[0])
+                xmax = max(cur_box[2], next_box[2])
+                ymin = min(cur_box[1], next_box[1])
+                ymax = max(cur_box[3], next_box[3])
+                cur_box[0] = xmin
+                cur_box[1] = ymin
+                cur_box[2] = xmax
+                cur_box[3] = ymax
+                ocr_list[j] = None
+    ocr_list = [x for x in ocr_list if x]
+    return ocr_list
+
+
+def plot_html_table(
+    logi_points: Union[Union[np.ndarray, List]], cell_box_map: Dict[int, List[str]]
+) -> str:
+    # 初始化最大行数和列数
+    max_row = 0
+    max_col = 0
+    # 计算最大行数和列数
+    for point in logi_points:
+        max_row = max(max_row, point[1] + 1)  # 加1是因为结束下标是包含在内的
+        max_col = max(max_col, point[3] + 1)  # 加1是因为结束下标是包含在内的
+
+    # 创建一个二维数组来存储 sorted_logi_points 中的元素
+    grid = [[None] * max_col for _ in range(max_row)]
+
+    valid_start_row = (1 << 16) - 1
+    valid_start_col = (1 << 16) - 1
+    valid_end_col = 0
+    # 将 sorted_logi_points 中的元素填充到 grid 中
+    for i, logic_point in enumerate(logi_points):
+        row_start, row_end, col_start, col_end = (
+            logic_point[0],
+            logic_point[1],
+            logic_point[2],
+            logic_point[3],
+        )
+        ocr_rec_text_list = cell_box_map.get(i)
+        if ocr_rec_text_list and "".join(ocr_rec_text_list):
+            valid_start_row = min(row_start, valid_start_row)
+            valid_start_col = min(col_start, valid_start_col)
+            valid_end_col = max(col_end, valid_end_col)
+        for row in range(row_start, row_end + 1):
+            for col in range(col_start, col_end + 1):
+                grid[row][col] = (i, row_start, row_end, col_start, col_end)
+
+    # 创建表格
+    table_html = "<html><body><table>"
+
+    # 遍历每行
+    for row in range(max_row):
+        if row < valid_start_row:
+            continue
+        temp = "<tr>"
+        # 遍历每一列
+        for col in range(max_col):
+            if col < valid_start_col or col > valid_end_col:
+                continue
+            if not grid[row][col]:
+                temp += "<td></td>"
+            else:
+                i, row_start, row_end, col_start, col_end = grid[row][col]
+                if not cell_box_map.get(i):
+                    continue
+                if row == row_start and col == col_start:
+                    ocr_rec_text = cell_box_map.get(i)
+                    # text = "<br>".join(ocr_rec_text)
+                    text = "".join(ocr_rec_text)
+                    # 如果是起始单元格
+                    row_span = row_end - row_start + 1
+                    col_span = col_end - col_start + 1
+                    cell_content = (
+                        f"<td rowspan={row_span} colspan={col_span}>{text}</td>"
+                    )
+                    temp += cell_content
+
+        table_html = table_html + temp + "</tr>"
+
+    table_html += "</table></body></html>"
+    return table_html
+
+
+

+ 111 - 35
mineru/utils/draw_bbox.py

@@ -5,7 +5,7 @@ from loguru import logger
 from pypdf import PdfReader, PdfWriter, PageObject
 from reportlab.pdfgen import canvas
 
-from .enum_class import BlockType, ContentType
+from .enum_class import BlockType, ContentType, SplitFlag
 
 
 def cal_canvas_rect(page, bbox):
@@ -24,8 +24,12 @@ def cal_canvas_rect(page, bbox):
     actual_width = page_width    # The width of the final PDF display
     actual_height = page_height  # The height of the final PDF display
     
-    rotation = page.get("/Rotate", 0)
-    rotation = rotation % 360
+    rotation_obj = page.get("/Rotate", 0)
+    try:
+        rotation = int(rotation_obj) % 360  # cast rotation to int to handle IndirectObject
+    except (ValueError, TypeError) as e:
+        logger.warning(f"Invalid /Rotate value {rotation_obj!r} on page; defaulting to 0. Error: {e}")
+        rotation = 0
     
     if rotation in [90, 270]:
         # PDF is rotated 90 degrees or 270 degrees, and the width and height need to be swapped
@@ -35,19 +39,18 @@ def cal_canvas_rect(page, bbox):
     rect_w = abs(x1 - x0)
     rect_h = abs(y1 - y0)
     
-    if 270 == rotation:
+    if rotation == 270:
         rect_w, rect_h = rect_h, rect_w
         x0 = actual_height - y1
         y0 = actual_width - x1
-    elif 180 == rotation:
+    elif rotation == 180:
         x0 = page_width - x1
-        y0 = y0
-    elif 90 == rotation:
+        # y0 stays the same
+    elif rotation == 90:
         rect_w, rect_h = rect_h, rect_w
         x0, y0 = y0, x0 
     else:
-        # 0 == rotation:
-        x0 = x0
+        # rotation == 0
         y0 = page_height - y1
     
     rect = [x0, y0, rect_w, rect_h]        
@@ -91,16 +94,20 @@ def draw_bbox_with_number(i, bbox_list, page, c, rgb_config, fill_config, draw_b
         c.setFontSize(size=10)
         
         c.saveState()
-        rotation = page.get("/Rotate", 0)
-        rotation = rotation % 360
-    
-        if 0 == rotation:
+        rotation_obj = page.get("/Rotate", 0)
+        try:
+            rotation = int(rotation_obj) % 360  # cast rotation to int to handle IndirectObject
+        except (ValueError, TypeError):
+            logger.warning(f"Invalid /Rotate value: {rotation_obj!r}, defaulting to 0")
+            rotation = 0
+
+        if rotation == 0:
             c.translate(rect[0] + rect[2] + 2, rect[1] + rect[3] - 10)
-        elif 90 == rotation:
+        elif rotation == 90:
             c.translate(rect[0] + 10, rect[1] + rect[3] + 2)
-        elif 180 == rotation:
+        elif rotation == 180:
             c.translate(rect[0] - 2, rect[1] + 10)
-        elif 270 == rotation:
+        elif rotation == 270:
             c.translate(rect[0] + rect[2] - 10, rect[1] - 2)
             
         c.rotate(rotation)
@@ -114,8 +121,7 @@ def draw_layout_bbox(pdf_info, pdf_bytes, out_path, filename):
     dropped_bbox_list = []
     tables_list, tables_body_list = [], []
     tables_caption_list, tables_footnote_list = [], []
-    imgs_list, imgs_body_list, imgs_caption_list = [], [], []
-    imgs_footnote_list = []
+    imgs_list, imgs_body_list, imgs_caption_list, imgs_footnote_list = [], [], [], []
     titles_list = []
     texts_list = []
     interequations_list = []
@@ -145,6 +151,8 @@ def draw_layout_bbox(pdf_info, pdf_bytes, out_path, filename):
                     elif nested_block["type"] == BlockType.TABLE_CAPTION:
                         tables_caption.append(bbox)
                     elif nested_block["type"] == BlockType.TABLE_FOOTNOTE:
+                        if nested_block.get(SplitFlag.CROSS_PAGE, False):
+                            continue
                         tables_footnote.append(bbox)
             elif block["type"] == BlockType.IMAGE:
                 imgs.append(bbox)
@@ -203,6 +211,8 @@ def draw_layout_bbox(pdf_info, pdf_bytes, out_path, filename):
             elif block["type"] in [BlockType.TABLE]:
                 sorted_blocks = sorted(block["blocks"], key=lambda x: table_type_order[x["type"]])
                 for sub_block in sorted_blocks:
+                    if sub_block.get(SplitFlag.CROSS_PAGE, False):
+                        continue
                     bbox = sub_block["bbox"]
                     page_block_list.append(bbox)
 
@@ -264,20 +274,12 @@ def draw_span_bbox(pdf_info, pdf_bytes, out_path, filename):
     image_list = []
     table_list = []
     dropped_list = []
-    next_page_text_list = []
-    next_page_inline_equation_list = []
 
     def get_span_info(span):
         if span['type'] == ContentType.TEXT:
-            if span.get('cross_page', False):
-                next_page_text_list.append(span['bbox'])
-            else:
-                page_text_list.append(span['bbox'])
+            page_text_list.append(span['bbox'])
         elif span['type'] == ContentType.INLINE_EQUATION:
-            if span.get('cross_page', False):
-                next_page_inline_equation_list.append(span['bbox'])
-            else:
-                page_inline_equation_list.append(span['bbox'])
+            page_inline_equation_list.append(span['bbox'])
         elif span['type'] == ContentType.INTERLINE_EQUATION:
             page_interline_equation_list.append(span['bbox'])
         elif span['type'] == ContentType.IMAGE:
@@ -293,13 +295,6 @@ def draw_span_bbox(pdf_info, pdf_bytes, out_path, filename):
         page_table_list = []
         page_dropped_list = []
 
-        # 将跨页的span放到移动到下一页的列表中
-        if len(next_page_text_list) > 0:
-            page_text_list.extend(next_page_text_list)
-            next_page_text_list.clear()
-        if len(next_page_inline_equation_list) > 0:
-            page_inline_equation_list.extend(next_page_inline_equation_list)
-            next_page_inline_equation_list.clear()
 
         # 构造dropped_list
         for block in page['discarded_blocks']:
@@ -375,6 +370,87 @@ def draw_span_bbox(pdf_info, pdf_bytes, out_path, filename):
         output_pdf.write(f)
 
 
+def draw_line_sort_bbox(pdf_info, pdf_bytes, out_path, filename):
+    layout_bbox_list = []
+
+    for page in pdf_info:
+        page_line_list = []
+        for block in page['preproc_blocks']:
+            if block['type'] in [BlockType.TEXT]:
+                for line in block['lines']:
+                    bbox = line['bbox']
+                    index = line['index']
+                    page_line_list.append({'index': index, 'bbox': bbox})
+            elif block['type'] in [BlockType.TITLE, BlockType.INTERLINE_EQUATION]:
+                if 'virtual_lines' in block:
+                    if len(block['virtual_lines']) > 0 and block['virtual_lines'][0].get('index', None) is not None:
+                        for line in block['virtual_lines']:
+                            bbox = line['bbox']
+                            index = line['index']
+                            page_line_list.append({'index': index, 'bbox': bbox})
+                else:
+                    for line in block['lines']:
+                        bbox = line['bbox']
+                        index = line['index']
+                        page_line_list.append({'index': index, 'bbox': bbox})
+            elif block['type'] in [BlockType.IMAGE, BlockType.TABLE]:
+                for sub_block in block['blocks']:
+                    if sub_block['type'] in [BlockType.IMAGE_BODY, BlockType.TABLE_BODY]:
+                        if len(sub_block['virtual_lines']) > 0 and sub_block['virtual_lines'][0].get('index', None) is not None:
+                            for line in sub_block['virtual_lines']:
+                                bbox = line['bbox']
+                                index = line['index']
+                                page_line_list.append({'index': index, 'bbox': bbox})
+                        else:
+                            for line in sub_block['lines']:
+                                bbox = line['bbox']
+                                index = line['index']
+                                page_line_list.append({'index': index, 'bbox': bbox})
+                    elif sub_block['type'] in [BlockType.IMAGE_CAPTION, BlockType.TABLE_CAPTION, BlockType.IMAGE_FOOTNOTE, BlockType.TABLE_FOOTNOTE]:
+                        for line in sub_block['lines']:
+                            bbox = line['bbox']
+                            index = line['index']
+                            page_line_list.append({'index': index, 'bbox': bbox})
+        sorted_bboxes = sorted(page_line_list, key=lambda x: x['index'])
+        layout_bbox_list.append(sorted_bbox['bbox'] for sorted_bbox in sorted_bboxes)
+    pdf_bytes_io = BytesIO(pdf_bytes)
+    pdf_docs = PdfReader(pdf_bytes_io)
+    output_pdf = PdfWriter()
+
+    for i, page in enumerate(pdf_docs.pages):
+        # 获取原始页面尺寸
+        page_width, page_height = float(page.cropbox[2]), float(page.cropbox[3])
+        custom_page_size = (page_width, page_height)
+
+        packet = BytesIO()
+        # 使用原始PDF的尺寸创建canvas
+        c = canvas.Canvas(packet, pagesize=custom_page_size)
+
+        # 获取当前页面的数据
+        draw_bbox_with_number(i, layout_bbox_list, page, c, [255, 0, 0], False)
+
+        c.save()
+        packet.seek(0)
+        overlay_pdf = PdfReader(packet)
+
+        # 添加检查确保overlay_pdf.pages不为空
+        if len(overlay_pdf.pages) > 0:
+            new_page = PageObject(pdf=None)
+            new_page.update(page)
+            page = new_page
+            page.merge_page(overlay_pdf.pages[0])
+        else:
+            # 记录日志并继续处理下一个页面
+            # logger.warning(f"span.pdf: 第{i + 1}页未能生成有效的overlay PDF")
+            pass
+
+        output_pdf.add_page(page)
+
+    # Save the PDF
+    with open(f"{out_path}/{filename}", "wb") as f:
+        output_pdf.write(f)
+
+
 if __name__ == "__main__":
     # 读取PDF文件
     pdf_path = "examples/demo1.pdf"

+ 9 - 1
mineru/utils/enum_class.py

@@ -59,8 +59,16 @@ class ModelPath:
     pytorch_paddle = "models/OCR/paddleocr_torch"
     layout_reader = "models/ReadingOrder/layout_reader"
     slanet_plus = "models/TabRec/SlanetPlus/slanet-plus.onnx"
+    unet_structure = "models/TabRec/UnetStructure/unet.onnx"
+    paddle_table_cls = "models/TabCls/paddle_table_cls/PP-LCNet_x1_0_table_cls.onnx"
+    paddle_orientation_classification = "models/OriCls/paddle_orientation_classification/PP-LCNet_x1_0_doc_ori.onnx"
 
 
 class SplitFlag:
     CROSS_PAGE = 'cross_page'
-    LINES_DELETED = 'lines_deleted'
+    LINES_DELETED = 'lines_deleted'
+
+
+class ImageType:
+    PIL = 'pil_img'
+    BASE64 = 'base64_img'

+ 51 - 35
mineru/utils/model_utils.py

@@ -1,3 +1,4 @@
+import os
 import time
 import gc
 from PIL import Image
@@ -201,6 +202,10 @@ def filter_nested_tables(table_res_list, overlap_threshold=0.8, area_threshold=0
 
 
 def remove_overlaps_min_blocks(res_list):
+
+    for res in res_list:
+        res['bbox'] = [int(res['poly'][0]), int(res['poly'][1]), int(res['poly'][4]), int(res['poly'][5])]
+
     # 重叠block,小的不能直接删除,需要和大的那个合并成一个更大的。
     # 删除重叠blocks中较小的那些
     need_remove = []
@@ -219,31 +224,43 @@ def remove_overlaps_min_blocks(res_list):
             )
 
             if overlap_box is not None:
-                res_to_remove = None
-                large_res = None
 
-                # 确定哪个是小块(要移除的)
+                # 根据重叠框确定哪个是小块,哪个是大块
                 if overlap_box == res_list[i]['bbox']:
-                    res_to_remove = res_list[i]
-                    large_res = res_list[j]
+                    small_res, large_res = res_list[i], res_list[j]
                 elif overlap_box == res_list[j]['bbox']:
-                    res_to_remove = res_list[j]
-                    large_res = res_list[i]
-
-                if res_to_remove is not None and res_to_remove not in need_remove:
-                    # 更新大块的边界为两者的并集
-                    x1, y1, x2, y2 = large_res['bbox']
-                    sx1, sy1, sx2, sy2 = res_to_remove['bbox']
-                    x1 = min(x1, sx1)
-                    y1 = min(y1, sy1)
-                    x2 = max(x2, sx2)
-                    y2 = max(y2, sy2)
-                    large_res['bbox'] = [x1, y1, x2, y2]
-                    need_remove.append(res_to_remove)
+                    small_res, large_res = res_list[j], res_list[i]
+                else:
+                    continue  # 如果重叠框与任一块都不匹配,跳过处理
+
+                if small_res['score'] <= large_res['score']:
+                    # 如果小块的分数低于大块,则小块为需要移除的块
+                    if small_res is not None and small_res not in need_remove:
+                        # 更新大块的边界为两者的并集
+                        x1, y1, x2, y2 = large_res['bbox']
+                        sx1, sy1, sx2, sy2 = small_res['bbox']
+                        x1 = min(x1, sx1)
+                        y1 = min(y1, sy1)
+                        x2 = max(x2, sx2)
+                        y2 = max(y2, sy2)
+                        large_res['bbox'] = [x1, y1, x2, y2]
+                        need_remove.append(small_res)
+                else:
+                    # 如果大块的分数低于小块,则大块为需要移除的块, 这时不需要更新小块的边界
+                    if large_res is not None and large_res not in need_remove:
+                        need_remove.append(large_res)
 
     # 从列表中移除标记的元素
     for res in need_remove:
         res_list.remove(res)
+        del res['bbox']  # 删除bbox字段
+
+    for res in res_list:
+        # 将res的poly使用bbox重构
+        res['poly'] = [res['bbox'][0], res['bbox'][1], res['bbox'][2], res['bbox'][1],
+                       res['bbox'][2], res['bbox'][3], res['bbox'][0], res['bbox'][3]]
+        # 删除res的bbox
+        del res['bbox']
 
     return res_list, need_remove
 
@@ -290,7 +307,7 @@ def remove_overlaps_low_confidence_blocks(combined_res_list, overlap_threshold=0
                                                                              overlap_threshold)]
 
         # 如果内部有3个及以上的小block
-        if len(blocks_inside) >= 3:
+        if len(blocks_inside) >= 2:
             # 计算小block的平均分数
             avg_score = sum(s for _, s, _ in blocks_inside) / len(blocks_inside)
 
@@ -348,7 +365,6 @@ def get_res_list_from_layout_res(layout_res, iou_threshold=0.7, overlap_threshol
             table_res_list.append(res)
             table_indices.append(i)
         elif category_id in [1]:  # Text regions
-            res['bbox'] = [int(res['poly'][0]), int(res['poly'][1]), int(res['poly'][4]), int(res['poly'][5])]
             text_res_list.append(res)
 
     # Process tables: merge high IoU tables first, then filter nested tables
@@ -358,29 +374,27 @@ def get_res_list_from_layout_res(layout_res, iou_threshold=0.7, overlap_threshol
     filtered_table_res_list = filter_nested_tables(
         table_res_list, overlap_threshold, area_threshold)
 
+    filtered_table_res_list, table_need_remove = remove_overlaps_min_blocks(filtered_table_res_list)
+
+    for res in table_need_remove:
+        if res in layout_res:
+            layout_res.remove(res)
+
     # Remove filtered out tables from layout_res
     if len(filtered_table_res_list) < len(table_res_list):
         kept_tables = set(id(table) for table in filtered_table_res_list)
-        to_remove = [table_indices[i] for i, table in enumerate(table_res_list)
-                     if id(table) not in kept_tables]
-
-        for idx in sorted(to_remove, reverse=True):
-            del layout_res[idx]
+        tables_to_remove = [table for table in table_res_list if id(table) not in kept_tables]
+        for table in tables_to_remove:
+            if table in layout_res:
+                layout_res.remove(table)
 
     # Remove overlaps in OCR and text regions
     text_res_list, need_remove = remove_overlaps_min_blocks(text_res_list)
-    for res in text_res_list:
-        # 将res的poly使用bbox重构
-        res['poly'] = [res['bbox'][0], res['bbox'][1], res['bbox'][2], res['bbox'][1],
-                       res['bbox'][2], res['bbox'][3], res['bbox'][0], res['bbox'][3]]
-        # 删除res的bbox
-        del res['bbox']
 
     ocr_res_list.extend(text_res_list)
 
-    if len(need_remove) > 0:
-        for res in need_remove:
-            del res['bbox']
+    for res in need_remove:
+        if res in layout_res:
             layout_res.remove(res)
 
     # 检测大block内部是否包含多个小block, 合并ocr和table列表进行检测
@@ -414,11 +428,13 @@ def clean_memory(device='cuda'):
 
 def clean_vram(device, vram_threshold=8):
     total_memory = get_vram(device)
+    if total_memory is not None:
+        total_memory = int(os.getenv('MINERU_VIRTUAL_VRAM_SIZE', round(total_memory)))
     if total_memory and total_memory <= vram_threshold:
         gc_start = time.time()
         clean_memory(device)
         gc_time = round(time.time() - gc_start, 2)
-        logger.info(f"gc time: {gc_time}")
+        # logger.info(f"gc time: {gc_time}")
 
 
 def get_vram(device):

+ 4 - 3
mineru/utils/ocr_utils.py

@@ -330,10 +330,10 @@ def get_adjusted_mfdetrec_res(single_page_mfdetrec_res, useful_list):
     return adjusted_mfdetrec_res
 
 
-def get_ocr_result_list(ocr_res, useful_list, ocr_enable, new_image, lang):
+def get_ocr_result_list(ocr_res, useful_list, ocr_enable, bgr_image, lang):
     paste_x, paste_y, xmin, ymin, xmax, ymax, new_width, new_height = useful_list
     ocr_result_list = []
-    ori_im = new_image.copy()
+    ori_im = bgr_image.copy()
     for box_ocr_res in ocr_res:
 
         if len(box_ocr_res) == 2:
@@ -437,6 +437,7 @@ def get_rotate_crop_image(img, points):
         borderMode=cv2.BORDER_REPLICATE,
         flags=cv2.INTER_CUBIC)
     dst_img_height, dst_img_width = dst_img.shape[0:2]
-    if dst_img_height * 1.0 / dst_img_width >= 1.5:
+    rotate_radio = 2
+    if dst_img_height * 1.0 / dst_img_width >= rotate_radio:
         dst_img = np.rot90(dst_img)
     return dst_img

+ 19 - 13
mineru/utils/pdf_classify.py

@@ -24,11 +24,11 @@ def classify(pdf_bytes):
     Returns:
         str: 'txt' 表示可以直接提取文本,'ocr' 表示需要OCR
     """
-    try:
-        # 从字节数据加载PDF
-        sample_pdf_bytes = extract_pages(pdf_bytes)
-        pdf = pdfium.PdfDocument(sample_pdf_bytes)
 
+    # 从字节数据加载PDF
+    sample_pdf_bytes = extract_pages(pdf_bytes)
+    pdf = pdfium.PdfDocument(sample_pdf_bytes)
+    try:
         # 获取PDF页数
         page_count = len(pdf)
 
@@ -42,19 +42,25 @@ def classify(pdf_bytes):
         # 设置阈值:如果每页平均少于50个有效字符,认为需要OCR
         chars_threshold = 50
 
+        # 检查平均字符数和无效字符
         if (get_avg_cleaned_chars_per_page(pdf, pages_to_check) < chars_threshold) or detect_invalid_chars(sample_pdf_bytes):
             return 'ocr'
-        else:
 
-            if get_high_image_coverage_ratio(sample_pdf_bytes, pages_to_check) >= 0.8:
-                return 'ocr'
+        # 检查图像覆盖率
+        if get_high_image_coverage_ratio(sample_pdf_bytes, pages_to_check) >= 0.8:
+            return 'ocr'
+
+        return 'txt'
 
-            return 'txt'
     except Exception as e:
         logger.error(f"判断PDF类型时出错: {e}")
         # 出错时默认使用OCR
         return 'ocr'
 
+    finally:
+        # 无论执行哪个路径,都确保PDF被关闭
+        pdf.close()
+
 
 def get_avg_cleaned_chars_per_page(pdf_doc, pages_to_check):
     # 总字符数
@@ -78,8 +84,6 @@ def get_avg_cleaned_chars_per_page(pdf_doc, pages_to_check):
 
     # logger.debug(f"PDF分析: 平均每页清理后{avg_cleaned_chars_per_page:.1f}字符")
 
-    pdf_doc.close()  # 关闭PDF文档
-
     return avg_cleaned_chars_per_page
 
 
@@ -158,6 +162,9 @@ def get_high_image_coverage_ratio(sample_pdf_bytes, pages_to_check):
 
         page_count += 1
 
+    # 关闭资源
+    pdf_stream.close()
+
     # 如果没有处理任何页面,返回0
     if page_count == 0:
         return 0.0
@@ -166,9 +173,6 @@ def get_high_image_coverage_ratio(sample_pdf_bytes, pages_to_check):
     high_coverage_ratio = high_image_coverage_pages / page_count
     # logger.debug(f"PDF分析: 高图像覆盖页面比例: {high_coverage_ratio:.2f}")
 
-    # 关闭资源
-    pdf_stream.close()
-
     return high_coverage_ratio
 
 
@@ -205,6 +209,7 @@ def extract_pages(src_pdf_bytes: bytes) -> bytes:
     try:
         # 将选择的页面导入新文档
         sample_docs.import_pages(pdf, page_indices)
+        pdf.close()
 
         # 将新PDF保存到内存缓冲区
         output_buffer = BytesIO()
@@ -213,6 +218,7 @@ def extract_pages(src_pdf_bytes: bytes) -> bytes:
         # 获取字节数据
         return output_buffer.getvalue()
     except Exception as e:
+        pdf.close()
         logger.exception(e)
         return b''  # 出错时返回空字节
 

+ 29 - 6
mineru/utils/pdf_image_tools.py

@@ -1,33 +1,37 @@
 # Copyright (c) Opendatalab. All rights reserved.
 from io import BytesIO
 
+import numpy as np
 import pypdfium2 as pdfium
 from loguru import logger
 from PIL import Image
 
 from mineru.data.data_reader_writer import FileBasedDataWriter
 from mineru.utils.pdf_reader import image_to_b64str, image_to_bytes, page_to_image
+from .enum_class import ImageType
 from .hash_utils import str_sha256
 
 
-def pdf_page_to_image(page: pdfium.PdfPage, dpi=200) -> dict:
+def pdf_page_to_image(page: pdfium.PdfPage, dpi=200, image_type=ImageType.PIL) -> dict:
     """Convert pdfium.PdfDocument to image, Then convert the image to base64.
 
     Args:
         page (_type_): pdfium.PdfPage
         dpi (int, optional): reset the dpi of dpi. Defaults to 200.
+        image_type (ImageType, optional): The type of image to return. Defaults to ImageType.PIL.
 
     Returns:
         dict:  {'img_base64': str, 'img_pil': pil_img, 'scale': float }
     """
     pil_img, scale = page_to_image(page, dpi=dpi)
-    img_base64 = image_to_b64str(pil_img)
-
     image_dict = {
-        "img_base64": img_base64,
-        "img_pil": pil_img,
         "scale": scale,
     }
+    if image_type == ImageType.BASE64:
+        image_dict["img_base64"] = image_to_b64str(pil_img)
+    else:
+        image_dict["img_pil"] = pil_img
+
     return image_dict
 
 
@@ -36,6 +40,7 @@ def load_images_from_pdf(
     dpi=200,
     start_page_id=0,
     end_page_id=None,
+    image_type=ImageType.PIL,  # PIL or BASE64
 ):
     images_list = []
     pdf_doc = pdfium.PdfDocument(pdf_bytes)
@@ -48,7 +53,7 @@ def load_images_from_pdf(
     for index in range(0, pdf_page_num):
         if start_page_id <= index <= end_page_id:
             page = pdf_doc[index]
-            image_dict = pdf_page_to_image(page, dpi=dpi)
+            image_dict = pdf_page_to_image(page, dpi=dpi, image_type=image_type)
             images_list.append(image_dict)
 
     return images_list, pdf_doc
@@ -87,6 +92,24 @@ def get_crop_img(bbox: tuple, pil_img, scale=2):
     return pil_img.crop(scale_bbox)
 
 
+def get_crop_np_img(bbox: tuple, input_img, scale=2):
+
+    if isinstance(input_img, Image.Image):
+        np_img = np.asarray(input_img)
+    elif isinstance(input_img, np.ndarray):
+        np_img = input_img
+    else:
+        raise ValueError("Input must be a pillow object or a numpy array.")
+
+    scale_bbox = (
+        int(bbox[0] * scale),
+        int(bbox[1] * scale),
+        int(bbox[2] * scale),
+        int(bbox[3] * scale),
+    )
+
+    return np_img[scale_bbox[1]:scale_bbox[3], scale_bbox[0]:scale_bbox[2]]
+
 def images_bytes_to_pdf_bytes(image_bytes):
     # 内存缓冲区
     pdf_buffer = BytesIO()

+ 32 - 18
mineru/utils/pdf_reader.py

@@ -9,8 +9,8 @@ from pypdfium2 import PdfBitmap, PdfDocument, PdfPage
 
 def page_to_image(
     page: PdfPage,
-    dpi: int = 144,  # changed from 200 to 144
-    max_width_or_height: int = 2560,  # changed from 4500 to 2560
+    dpi: int = 200,
+    max_width_or_height: int = 3500,  # changed from 4500 to 3500
 ) -> (Image.Image, float):
     scale = dpi / 72
 
@@ -19,19 +19,21 @@ def page_to_image(
         scale = max_width_or_height / long_side_length
 
     bitmap: PdfBitmap = page.render(scale=scale)  # type: ignore
+
+    image = bitmap.to_pil()
     try:
-        image = bitmap.to_pil()
-    finally:
-        try:
-            bitmap.close()
-        except Exception:
-            pass
+        bitmap.close()
+    except Exception as e:
+        logger.error(f"Failed to close bitmap: {e}")
     return image, scale
 
 
+
+
 def image_to_bytes(
     image: Image.Image,
-    image_format: str = "PNG",  # 也可以用 "JPEG"
+    # image_format: str = "PNG",  # 也可以用 "JPEG"
+    image_format: str = "JPEG",
 ) -> bytes:
     with BytesIO() as image_buffer:
         image.save(image_buffer, format=image_format)
@@ -40,16 +42,26 @@ def image_to_bytes(
 
 def image_to_b64str(
     image: Image.Image,
-    image_format: str = "PNG",  # 也可以用 "JPEG"
+    # image_format: str = "PNG",  # 也可以用 "JPEG"
+    image_format: str = "JPEG",
 ) -> str:
     image_bytes = image_to_bytes(image, image_format)
     return base64.b64encode(image_bytes).decode("utf-8")
 
 
+def base64_to_pil_image(
+    base64_str: str,
+) -> Image.Image:
+    """Convert base64 string to PIL Image."""
+    image_bytes = base64.b64decode(base64_str)
+    with BytesIO(image_bytes) as image_buffer:
+        return Image.open(image_buffer).convert("RGB")
+
+
 def pdf_to_images(
     pdf: str | bytes | PdfDocument,
-    dpi: int = 144,
-    max_width_or_height: int = 2560,
+    dpi: int = 200,
+    max_width_or_height: int = 3500,
     start_page_id: int = 0,
     end_page_id: int | None = None,
 ) -> list[Image.Image]:
@@ -76,11 +88,12 @@ def pdf_to_images(
 
 def pdf_to_images_bytes(
     pdf: str | bytes | PdfDocument,
-    dpi: int = 144,
-    max_width_or_height: int = 2560,
+    dpi: int = 200,
+    max_width_or_height: int = 3500,
     start_page_id: int = 0,
     end_page_id: int | None = None,
-    image_format: str = "PNG",
+    # image_format: str = "PNG",  # 也可以用 "JPEG"
+    image_format: str = "JPEG",
 ) -> list[bytes]:
     images = pdf_to_images(pdf, dpi, max_width_or_height, start_page_id, end_page_id)
     return [image_to_bytes(image, image_format) for image in images]
@@ -88,11 +101,12 @@ def pdf_to_images_bytes(
 
 def pdf_to_images_b64strs(
     pdf: str | bytes | PdfDocument,
-    dpi: int = 144,
-    max_width_or_height: int = 2560,
+    dpi: int = 200,
+    max_width_or_height: int = 3500,
     start_page_id: int = 0,
     end_page_id: int | None = None,
-    image_format: str = "PNG",
+    # image_format: str = "PNG",  # 也可以用 "JPEG"
+    image_format: str = "JPEG",
 ) -> list[str]:
     images = pdf_to_images(pdf, dpi, max_width_or_height, start_page_id, end_page_id)
     return [image_to_b64str(image, image_format) for image in images]

+ 338 - 0
mineru/utils/table_merge.py

@@ -0,0 +1,338 @@
+# Copyright (c) Opendatalab. All rights reserved.
+
+from loguru import logger
+from bs4 import BeautifulSoup
+
+from mineru.utils.enum_class import BlockType, SplitFlag
+
+
+def full_to_half(text: str) -> str:
+    """Convert full-width characters to half-width characters using code point manipulation.
+
+    Args:
+        text: String containing full-width characters
+
+    Returns:
+        String with full-width characters converted to half-width
+    """
+    result = []
+    for char in text:
+        code = ord(char)
+        # Full-width letters, numbers and punctuation (FF01-FF5E)
+        if 0xFF01 <= code <= 0xFF5E:
+            result.append(chr(code - 0xFEE0))  # Shift to ASCII range
+        else:
+            result.append(char)
+    return ''.join(result)
+
+
+def calculate_table_total_columns(soup):
+    """计算表格的总列数,通过分析整个表格结构来处理rowspan和colspan
+
+    Args:
+        soup: BeautifulSoup解析的表格
+
+    Returns:
+        int: 表格的总列数
+    """
+    rows = soup.find_all("tr")
+    if not rows:
+        return 0
+
+    # 创建一个矩阵来跟踪每个位置的占用情况
+    max_cols = 0
+    occupied = {}  # {row_idx: {col_idx: True}}
+
+    for row_idx, row in enumerate(rows):
+        col_idx = 0
+        cells = row.find_all(["td", "th"])
+
+        if row_idx not in occupied:
+            occupied[row_idx] = {}
+
+        for cell in cells:
+            # 找到下一个未被占用的列位置
+            while col_idx in occupied[row_idx]:
+                col_idx += 1
+
+            colspan = int(cell.get("colspan", 1))
+            rowspan = int(cell.get("rowspan", 1))
+
+            # 标记被这个单元格占用的所有位置
+            for r in range(row_idx, row_idx + rowspan):
+                if r not in occupied:
+                    occupied[r] = {}
+                for c in range(col_idx, col_idx + colspan):
+                    occupied[r][c] = True
+
+            col_idx += colspan
+            max_cols = max(max_cols, col_idx)
+
+    return max_cols
+
+
+def calculate_row_columns(row):
+    """
+    计算表格行的实际列数,考虑colspan属性
+
+    Args:
+        row: BeautifulSoup的tr元素对象
+
+    Returns:
+        int: 行的实际列数
+    """
+    cells = row.find_all(["td", "th"])
+    column_count = 0
+
+    for cell in cells:
+        colspan = int(cell.get("colspan", 1))
+        column_count += colspan
+
+    return column_count
+
+
+def calculate_visual_columns(row):
+    """
+    计算表格行的视觉列数(实际td/th单元格数量,不考虑colspan)
+
+    Args:
+        row: BeautifulSoup的tr元素对象
+
+    Returns:
+        int: 行的视觉列数(实际单元格数)
+    """
+    cells = row.find_all(["td", "th"])
+    return len(cells)
+
+
+def detect_table_headers(soup1, soup2, max_header_rows=5):
+    """
+    检测并比较两个表格的表头
+
+    Args:
+        soup1: 第一个表格的BeautifulSoup对象
+        soup2: 第二个表格的BeautifulSoup对象
+        max_header_rows: 最大可能的表头行数
+
+    Returns:
+        tuple: (表头行数, 表头是否一致, 表头文本列表)
+    """
+    rows1 = soup1.find_all("tr")
+    rows2 = soup2.find_all("tr")
+
+    min_rows = min(len(rows1), len(rows2), max_header_rows)
+    header_rows = 0
+    headers_match = True
+    header_texts = []
+
+    for i in range(min_rows):
+        # 提取当前行的所有单元格
+        cells1 = rows1[i].find_all(["td", "th"])
+        cells2 = rows2[i].find_all(["td", "th"])
+
+        # 检查两行的结构和内容是否一致
+        structure_match = True
+
+        # 首先检查单元格数量
+        if len(cells1) != len(cells2):
+            structure_match = False
+        else:
+            # 然后检查单元格的属性和内容
+            for cell1, cell2 in zip(cells1, cells2):
+                colspan1 = int(cell1.get("colspan", 1))
+                rowspan1 = int(cell1.get("rowspan", 1))
+                colspan2 = int(cell2.get("colspan", 1))
+                rowspan2 = int(cell2.get("rowspan", 1))
+
+                text1 = full_to_half(cell1.get_text().strip())
+                text2 = full_to_half(cell2.get_text().strip())
+
+                if colspan1 != colspan2 or rowspan1 != rowspan2 or text1 != text2:
+                    structure_match = False
+                    break
+
+        if structure_match:
+            header_rows += 1
+            row_texts = [full_to_half(cell.get_text().strip()) for cell in cells1]
+            header_texts.append(row_texts)  # 添加表头文本
+        else:
+            headers_match = header_rows > 0  # 只有当至少匹配了一行时,才认为表头匹配
+            break
+
+    # 如果没有找到匹配的表头行,则返回失败
+    if header_rows == 0:
+        headers_match = False
+
+    return header_rows, headers_match, header_texts
+
+
+def can_merge_tables(current_table_block, previous_table_block):
+    """判断两个表格是否可以合并"""
+    # 检查表格是否有caption和footnote
+    if any(block["type"] == BlockType.TABLE_CAPTION for block in current_table_block["blocks"]):
+        return False, None, None, None, None
+
+    if any(block["type"] == BlockType.TABLE_FOOTNOTE for block in previous_table_block["blocks"]):
+        return False, None, None, None, None
+
+    # 获取两个表格的HTML内容
+    current_html = ""
+    previous_html = ""
+
+    for block in current_table_block["blocks"]:
+        if (block["type"] == BlockType.TABLE_BODY and block["lines"] and block["lines"][0]["spans"]):
+            current_html = block["lines"][0]["spans"][0].get("html", "")
+
+    for block in previous_table_block["blocks"]:
+        if (block["type"] == BlockType.TABLE_BODY and block["lines"] and block["lines"][0]["spans"]):
+            previous_html = block["lines"][0]["spans"][0].get("html", "")
+
+    if not current_html or not previous_html:
+        return False, None, None, None, None
+
+    # 检查表格宽度差异
+    x0_t1, y0_t1, x1_t1, y1_t1 = current_table_block["bbox"]
+    x0_t2, y0_t2, x1_t2, y1_t2 = previous_table_block["bbox"]
+    table1_width = x1_t1 - x0_t1
+    table2_width = x1_t2 - x0_t2
+
+    if abs(table1_width - table2_width) / min(table1_width, table2_width) >= 0.1:
+        return False, None, None, None, None
+
+    # 解析HTML并检查表格结构
+    soup1 = BeautifulSoup(previous_html, "html.parser")
+    soup2 = BeautifulSoup(current_html, "html.parser")
+
+    # 检查整体列数匹配
+    table_cols1 = calculate_table_total_columns(soup1)
+    table_cols2 = calculate_table_total_columns(soup2)
+    # logger.debug(f"Table columns - Previous: {table_cols1}, Current: {table_cols2}")
+    tables_match = table_cols1 == table_cols2
+
+    # 检查首末行列数匹配
+    rows_match = check_rows_match(soup1, soup2)
+
+    return (tables_match or rows_match), soup1, soup2, current_html, previous_html
+
+
+def check_rows_match(soup1, soup2):
+    """检查表格行是否匹配"""
+    rows1 = soup1.find_all("tr")
+    rows2 = soup2.find_all("tr")
+
+    if not (rows1 and rows2):
+        return False
+
+    # 获取第一个表的最后一行数据行
+    last_row = None
+    for row in reversed(rows1):
+        if row.find_all(["td", "th"]):
+            last_row = row
+            break
+
+    # 检测表头行数,以便获取第二个表的首个数据行
+    header_count, _, _ = detect_table_headers(soup1, soup2)
+
+    # 获取第二个表的首个数据行
+    first_data_row = None
+    if len(rows2) > header_count:
+        first_data_row = rows2[header_count]  # 第一个非表头行
+
+    if not (last_row and first_data_row):
+        return False
+
+    # 计算实际列数(考虑colspan)和视觉列数
+    last_row_cols = calculate_row_columns(last_row)
+    first_row_cols = calculate_row_columns(first_data_row)
+    last_row_visual_cols = calculate_visual_columns(last_row)
+    first_row_visual_cols = calculate_visual_columns(first_data_row)
+
+    # logger.debug(f"行列数 - 前表最后一行: {last_row_cols}(视觉列数:{last_row_visual_cols}), 当前表首行: {first_row_cols}(视觉列数:{first_row_visual_cols})")
+
+    # 同时考虑实际列数匹配和视觉列数匹配
+    return last_row_cols == first_row_cols or last_row_visual_cols == first_row_visual_cols
+
+
+def perform_table_merge(soup1, soup2, previous_table_block, wait_merge_table_footnotes):
+    """执行表格合并操作"""
+    # 检测表头有几行,并确认表头内容是否一致
+    header_count, headers_match, header_texts = detect_table_headers(soup1, soup2)
+    # logger.debug(f"检测到表头行数: {header_count}, 表头匹配: {headers_match}")
+    # logger.debug(f"表头内容: {header_texts}")
+
+    # 找到第一个表格的tbody,如果没有则查找table元素
+    tbody1 = soup1.find("tbody") or soup1.find("table")
+
+    # 找到第二个表格的tbody,如果没有则查找table元素
+    tbody2 = soup2.find("tbody") or soup2.find("table")
+
+    # 将第二个表格的行添加到第一个表格中
+    if tbody1 and tbody2:
+        rows2 = soup2.find_all("tr")
+        # 将第二个表格的行添加到第一个表格中(跳过表头行)
+        for row in rows2[header_count:]:
+            # 从原来的位置移除行,并添加到第一个表格中
+            row.extract()
+            tbody1.append(row)
+
+    # 添加待合并表格的footnote到前一个表格中
+    for table_footnote in wait_merge_table_footnotes:
+        temp_table_footnote = table_footnote.copy()
+        temp_table_footnote[SplitFlag.CROSS_PAGE] = True
+        previous_table_block["blocks"].append(temp_table_footnote)
+
+    return str(soup1)
+
+
+def merge_table(page_info_list):
+    """合并跨页表格"""
+    # 倒序遍历每一页
+    for page_idx in range(len(page_info_list) - 1, -1, -1):
+        # 跳过第一页,因为它没有前一页
+        if page_idx == 0:
+            continue
+
+        page_info = page_info_list[page_idx]
+        previous_page_info = page_info_list[page_idx - 1]
+
+        # 检查当前页是否有表格块
+        if not (page_info["para_blocks"] and page_info["para_blocks"][0]["type"] == BlockType.TABLE):
+            continue
+
+        current_table_block = page_info["para_blocks"][0]
+
+        # 检查上一页是否有表格块
+        if not (previous_page_info["para_blocks"] and previous_page_info["para_blocks"][-1]["type"] == BlockType.TABLE):
+            continue
+
+        previous_table_block = previous_page_info["para_blocks"][-1]
+
+        # 收集待合并表格的footnote
+        wait_merge_table_footnotes = [
+            block for block in current_table_block["blocks"]
+            if block["type"] == BlockType.TABLE_FOOTNOTE
+        ]
+
+        # 检查两个表格是否可以合并
+        can_merge, soup1, soup2, current_html, previous_html = can_merge_tables(
+            current_table_block, previous_table_block
+        )
+
+        if not can_merge:
+            continue
+
+        # 执行表格合并
+        merged_html = perform_table_merge(
+            soup1, soup2, previous_table_block, wait_merge_table_footnotes
+        )
+
+        # 更新previous_table_block的html
+        for block in previous_table_block["blocks"]:
+            if (block["type"] == BlockType.TABLE_BODY and block["lines"] and block["lines"][0]["spans"]):
+                block["lines"][0]["spans"][0]["html"] = merged_html
+                break
+
+        # 删除当前页的table
+        for block in current_table_block["blocks"]:
+            block['lines'] = []
+            block[SplitFlag.LINES_DELETED] = True

+ 3 - 0
mkdocs.yml

@@ -50,6 +50,9 @@ theme:
     - toc.integrate
 
 extra:
+  analytics:
+    provider: google
+    property: G-44K480CC48
   social:
     - icon: fontawesome/brands/github
       link: https://github.com/opendatalab/MinerU

+ 1 - 1
projects/multi_gpu_v2/_config_endpoint.py

@@ -44,7 +44,7 @@ def config_endpoint():
                 logging.info(f"Successfully connected to ModelScope. Using 'modelscope' as model source.")
                 return True
         except requests.exceptions.RequestException as e:
-            logging.error(f"Failed to connect to ModelScope at {model_list_url}: {e}")
+            logging.error(f"Failed to connect to ModelScope at {modelscope_url}: {e}")
         
     elif os.environ['MINERU_MODEL_SOURCE'] == 'local':
         logging.info("Using 'local' as model source.")

+ 6 - 21
pyproject.toml

@@ -35,6 +35,9 @@ dependencies = [
     "json-repair>=0.46.2",
     "opencv-python>=4.11.0.86",
     "fast-langdetect>=0.2.3,<0.3.0",
+    "scikit-image>=0.25.0,<1.0.0",
+    "openai>=1.70.0,<2",
+    "beautifulsoup4>=4.13.5,<5",
 ]
 
 [project.optional-dependencies]
@@ -53,23 +56,22 @@ vlm = [
     "pydantic",
 ]
 sglang = [
-    "sglang[all]>=0.4.7,<0.4.10",
+    "sglang[all]>=0.4.7,<0.4.11",
 ]
 pipeline = [
     "matplotlib>=3.10,<4",
     "ultralytics>=8.3.48,<9",
     "doclayout_yolo==0.0.4",
     "dill>=0.3.8,<1",
-    "rapid_table>=1.0.5,<2.0.0",
     "PyYAML>=6.0.2,<7",
     "ftfy>=6.3.1,<7",
-    "openai>=1.70.0,<2",
     "shapely>=2.0.7,<3",
     "pyclipper>=1.3.0,<2",
     "omegaconf>=2.3.0,<3",
-    "torch>=2.2.2,!=2.5.0,!=2.5.1,<3",
+    "torch>=2.6.0,<2.8.0",
     "torchvision",
     "transformers>=4.49.0,!=4.51.0,<5.0.0",
+    "onnxruntime>1.17.0",
 ]
 api = [
     "fastapi",
@@ -90,23 +92,6 @@ all = [
     "mineru[core]",
     "mineru[sglang]",
 ]
-pipeline_old_linux = [
-    "matplotlib>=3.10,<=3.10.1",
-    "ultralytics>=8.3.48,<=8.3.104",
-    "doclayout_yolo==0.0.4",
-    "dill==0.3.8",
-    "PyYAML==6.0.2",
-    "ftfy==6.3.1",
-    "openai==1.71.0",
-    "shapely==2.1.0",
-    "pyclipper==1.3.0.post6",
-    "omegaconf==2.3.0",
-    "albumentations==1.4.20",
-    "rapid_table==1.0.3",
-    "torch>=2.2.2,!=2.5.0,!=2.5.1,<3",
-    "torchvision",
-    "transformers>=4.49.0,!=4.51.0,<5.0.0",
-]
 
 [project.urls]
 homepage = "https://mineru.net/"

二進制
tests/unittest/pdfs/test.pdf


+ 36 - 21
tests/unittest/test_e2e.py

@@ -69,7 +69,10 @@ def test_pipeline_with_two_config():
         output_dir,
         parse_method="txt",
     )
-    assert_content("tests/unittest/output/test/txt/test_content_list.json")
+    res_json_path = (
+        Path(__file__).parent / "output" / "test" / "txt" / "test_content_list.json"
+    ).as_posix()
+    assert_content(res_json_path, parse_method="txt")
     infer_results, all_image_lists, all_pdf_docs, lang_list, ocr_enabled_list = (
         pipeline_doc_analyze(
             pdf_bytes_list,
@@ -87,7 +90,10 @@ def test_pipeline_with_two_config():
         output_dir,
         parse_method="ocr",
     )
-    assert_content("tests/unittest/output/test/ocr/test_content_list.json")
+    res_json_path = (
+        Path(__file__).parent / "output" / "test" / "ocr" / "test_content_list.json"
+    ).as_posix()
+    assert_content(res_json_path, parse_method="ocr")
 
 
 def test_vlm_transformers_with_default_config():
@@ -155,7 +161,10 @@ def test_vlm_transformers_with_default_config():
         )
 
         logger.info(f"local output dir is {local_md_dir}")
-        assert_content("tests/unittest/output/test/vlm/test_content_list.json")
+        res_json_path = (
+            Path(__file__).parent / "output" / "test" / "vlm" / "test_content_list.json"
+        ).as_posix()
+        assert_content(res_json_path, parse_method="vlm")
 
 
 def write_infer_result(
@@ -229,7 +238,7 @@ def validate_html(html_content):
         return False
 
 
-def assert_content(content_path):
+def assert_content(content_path, parse_method="txt"):
     content_list = []
     with open(content_path, "r", encoding="utf-8") as file:
         content_list = json.load(file)
@@ -240,31 +249,33 @@ def assert_content(content_path):
             case "image":
                 type_set.add("image")
                 assert (
-                    content_dict["image_caption"][0].strip().lower()
-                    == "Figure 1: Figure Caption".lower()
+                    fuzz.ratio(
+                        content_dict["image_caption"][0],
+                        "Figure 1: Figure Caption",
+                    )
+                    > 90
                 )
             # 表格校验,校验 Caption,表格格式和表格内容
             case "table":
                 type_set.add("table")
                 assert (
-                    content_dict["table_caption"][0].strip().lower()
-                    == "Table 1: Table Caption".lower()
+                    fuzz.ratio(
+                        content_dict["table_caption"][0],
+                        "Table 1: Table Caption",
+                    )
+                    > 90
                 )
                 assert validate_html(content_dict["table_body"])
                 target_str_list = [
-                    "Linear Regression",
+                    "Model",
+                    "Testing",
+                    "Error",
+                    "Linear",
+                    "Regression",
                     "0.98740",
                     "1321.2",
-                    "2-order Polynomial",
-                    "0.99906",
-                    "26.4",
-                    "3-order Polynomial",
-                    "0.99913",
-                    "101.2",
-                    "4-order Polynomial",
-                    "0.99914",
-                    "94.1",
-                    "Gray Prediction",
+                    "Gray",
+                    "Prediction",
                     "0.00617",
                     "687",
                 ]
@@ -272,8 +283,12 @@ def assert_content(content_path):
                 for target_str in target_str_list:
                     if target_str in content_dict["table_body"]:
                         correct_count += 1
-
-                assert correct_count > 0.9 * len(target_str_list)
+                if parse_method == "txt" or parse_method == "ocr":
+                    assert correct_count > 0.9 * len(target_str_list)
+                elif parse_method == "vlm":
+                    assert correct_count > 0.7 * len(target_str_list)
+                else:
+                    assert False
             # 公式校验,检测是否含有公式元素
             case "equation":
                 type_set.add("equation")

Some files were not shown because too many files changed in this diff