Browse Source

Merge pull request #6 from opendatalab/dev

Dev
Kaiwen Liu 1 year ago
parent
commit
ece7f8d5a4
100 changed files with 2735 additions and 1477 deletions
  1. 1 0
      .github/ISSUE_TEMPLATE/bug_report.yml
  2. 9 11
      .github/workflows/cli.yml
  3. 55 0
      .github/workflows/daily.yml
  4. 61 0
      .github/workflows/huigui.yml
  5. 0 22
      .github/workflows/update_base.yml
  6. 7 1
      .gitignore
  7. 16 0
      .readthedocs.yaml
  8. 1 0
      LICENSE.md
  9. 0 0
      README.md
  10. 0 328
      README.md.bak
  11. 15 6
      README_ja-JP.md
  12. 0 0
      README_zh-CN.md
  13. 0 424
      README_zh-CN.md.bak
  14. 0 117
      docs/README_Ubuntu_CUDA_Acceleration_en_US.md
  15. 0 104
      docs/README_Windows_CUDA_Acceleration_en_US.md
  16. 0 4
      docs/download_models.py
  17. 16 0
      docs/en/.readthedocs.yaml
  18. 20 0
      docs/en/Makefile
  19. 0 0
      docs/en/_static/image/logo.png
  20. 122 0
      docs/en/conf.py
  21. 26 0
      docs/en/index.rst
  22. 35 0
      docs/en/make.bat
  23. 0 63
      docs/how_to_download_models_en.md
  24. 0 75
      docs/how_to_download_models_zh_cn.md
  25. 6 0
      docs/requirements.txt
  26. 16 0
      docs/zh_cn/.readthedocs.yaml
  27. 20 0
      docs/zh_cn/Makefile
  28. BIN
      docs/zh_cn/_static/image/logo.png
  29. 122 0
      docs/zh_cn/conf.py
  30. 26 0
      docs/zh_cn/index.rst
  31. 35 0
      docs/zh_cn/make.bat
  32. 1 0
      magic-pdf.template.json
  33. 69 44
      magic_pdf/dict2md/ocr_mkcontent.py
  34. 1 0
      magic_pdf/libs/MakeContentConfig.py
  35. BIN
      magic_pdf/libs/__pycache__/__init__.cpython-312.pyc
  36. BIN
      magic_pdf/libs/__pycache__/version.cpython-312.pyc
  37. 19 0
      magic_pdf/libs/boxbase.py
  38. 10 0
      magic_pdf/libs/clean_memory.py
  39. 12 0
      magic_pdf/libs/config_reader.py
  40. 101 36
      magic_pdf/libs/draw_bbox.py
  41. 2 0
      magic_pdf/libs/ocr_content_type.py
  42. 1 1
      magic_pdf/libs/version.py
  43. 48 27
      magic_pdf/model/doc_analyze_by_custom_model.py
  44. 115 46
      magic_pdf/model/magic_model.py
  45. 65 36
      magic_pdf/model/pdf_extract_kit.py
  46. 5 2
      magic_pdf/model/pp_structure_v2.py
  47. 0 0
      magic_pdf/model/v3/__init__.py
  48. 125 0
      magic_pdf/model/v3/helpers.py
  49. 251 0
      magic_pdf/para/para_split_v3.py
  50. 1 1
      magic_pdf/pdf_parse_by_ocr.py
  51. 1 1
      magic_pdf/pdf_parse_by_txt.py
  52. 453 0
      magic_pdf/pdf_parse_union_core_v2.py
  53. 8 3
      magic_pdf/pipe/AbsPipe.py
  54. 6 4
      magic_pdf/pipe/OCRPipe.py
  55. 6 4
      magic_pdf/pipe/TXTPipe.py
  56. 11 7
      magic_pdf/pipe/UNIPipe.py
  57. 53 0
      magic_pdf/pre_proc/ocr_detect_all_bboxes.py
  58. 1 2
      magic_pdf/pre_proc/ocr_dict_merge.py
  59. 7 7
      magic_pdf/resources/model_config/UniMERNet/demo.yaml
  60. 1 1
      magic_pdf/resources/model_config/model_configs.yaml
  61. 14 1
      magic_pdf/tools/cli.py
  62. 13 8
      magic_pdf/tools/common.py
  63. 16 5
      magic_pdf/user_api.py
  64. 15 2
      old_docs/FAQ_en_us.md
  65. 21 3
      old_docs/FAQ_zh_cn.md
  66. 120 0
      old_docs/README_Ubuntu_CUDA_Acceleration_en_US.md
  67. 45 28
      old_docs/README_Ubuntu_CUDA_Acceleration_zh_CN.md
  68. 102 0
      old_docs/README_Windows_CUDA_Acceleration_en_US.md
  69. 40 35
      old_docs/README_Windows_CUDA_Acceleration_zh_CN.md
  70. 0 0
      old_docs/chemical_knowledge_introduction/introduction.pdf
  71. 0 0
      old_docs/chemical_knowledge_introduction/introduction.xmind
  72. 46 0
      old_docs/download_models.py
  73. 46 0
      old_docs/download_models_hf.py
  74. 29 0
      old_docs/how_to_download_models_en.md
  75. 48 0
      old_docs/how_to_download_models_zh_cn.md
  76. 0 0
      old_docs/images/MinerU-logo-hq.png
  77. BIN
      old_docs/images/MinerU-logo.png
  78. 0 0
      old_docs/images/datalab_logo.png
  79. 0 0
      old_docs/images/flowchart_en.png
  80. 0 0
      old_docs/images/flowchart_zh_cn.png
  81. 0 0
      old_docs/images/layout_example.png
  82. 0 0
      old_docs/images/poly.png
  83. 0 0
      old_docs/images/project_panorama_en.png
  84. 0 0
      old_docs/images/project_panorama_zh_cn.png
  85. 0 0
      old_docs/images/spans_example.png
  86. BIN
      old_docs/images/web_demo_1.png
  87. 0 0
      old_docs/output_file_en_us.md
  88. 0 0
      old_docs/output_file_zh_cn.md
  89. 4 0
      projects/README.md
  90. 4 0
      projects/README_zh-CN.md
  91. 24 0
      projects/gradio_app/README.md
  92. 24 0
      projects/gradio_app/README_zh-CN.md
  93. 23 18
      projects/gradio_app/app.py
  94. BIN
      projects/gradio_app/examples/academic_paper_formula.pdf
  95. BIN
      projects/gradio_app/examples/academic_paper_img_formula.pdf
  96. BIN
      projects/gradio_app/examples/garbled_formula.pdf
  97. BIN
      projects/gradio_app/examples/garbled_formula2.pdf
  98. BIN
      projects/gradio_app/examples/garbled_img_formula.pdf
  99. BIN
      projects/gradio_app/examples/scanned.pdf
  100. 119 0
      projects/gradio_app/header.html

+ 1 - 0
.github/ISSUE_TEMPLATE/bug_report.yml

@@ -80,6 +80,7 @@ body:
         -
         - "0.6.x"
         - "0.7.x"
+        - "0.8.x"
     validations:
       required: true
 

+ 9 - 11
.github/workflows/cli.yml

@@ -10,7 +10,6 @@ on:
     paths-ignore:
       - "cmds/**"
       - "**.md"
-      - "**.yml"
   pull_request:
     branches:
       - "master"
@@ -18,12 +17,11 @@ on:
     paths-ignore:
       - "cmds/**"
       - "**.md"
-      - "**.yml"
   workflow_dispatch:
 jobs:
   cli-test:
     runs-on: pdf
-    timeout-minutes: 120
+    timeout-minutes: 240
     strategy:
       fail-fast: true
 
@@ -33,16 +31,16 @@ jobs:
       with:
         fetch-depth: 2
 
-    - name: install
+    - name: install&test
       run: |
-        echo $GITHUB_WORKSPACE && sh tests/retry_env.sh
-    - name: unit test
-      run: |        
-        cd $GITHUB_WORKSPACE && export PYTHONPATH=. && coverage run -m  pytest  tests/test_unit.py --cov=magic_pdf/ --cov-report term-missing --cov-report html
+        source activate mineru
+        conda env list
+        pip show coverage
+        # cd $GITHUB_WORKSPACE && sh tests/retry_env.sh
+        cd $GITHUB_WORKSPACE && python tests/clean_coverage.py      
+        cd $GITHUB_WORKSPACE && coverage run -m pytest tests/unittest/ --cov=magic_pdf/  --cov-report html --cov-report term-missing
         cd $GITHUB_WORKSPACE && python tests/get_coverage.py
-    - name: cli test
-      run: |
-        cd $GITHUB_WORKSPACE &&  pytest -s -v tests/test_cli/test_cli_sdk.py
+        cd $GITHUB_WORKSPACE && pytest -m P0 -s -v tests/test_cli/test_cli_sdk.py
 
   notify_to_feishu:
     if: ${{ always() && !cancelled() && contains(needs.*.result, 'failure') && (github.ref_name == 'master') }}

+ 55 - 0
.github/workflows/daily.yml

@@ -0,0 +1,55 @@
+# This workflow will install Python dependencies, run tests and lint with a variety of Python versions
+# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python
+
+name: mineru
+on:
+  schedule:
+    - cron: '0 22 * * *'  # 每天晚上 10 点执行
+jobs:
+  cli-test:
+    runs-on: pdf
+    timeout-minutes: 240
+    strategy:
+      fail-fast: true
+
+    steps:
+    - name: PDF cli
+      uses: actions/checkout@v3
+      with:
+        fetch-depth: 2
+
+    - name: install&test
+      run: |
+        source activate mineru
+        conda env list
+        pip show coverage
+        # cd $GITHUB_WORKSPACE && sh tests/retry_env.sh
+        cd $GITHUB_WORKSPACE && python tests/clean_coverage.py      
+        cd $GITHUB_WORKSPACE && coverage run -m pytest tests/unittest/ --cov=magic_pdf/  --cov-report html --cov-report term-missing
+        cd $GITHUB_WORKSPACE && python tests/get_coverage.py
+        cd $GITHUB_WORKSPACE && pytest -s -v tests/test_cli/test_cli_sdk.py
+
+  notify_to_feishu:
+    if: ${{ always() && !cancelled() && contains(needs.*.result, 'failure') && (github.ref_name == 'master') }}
+    needs: cli-test
+    runs-on: pdf
+    steps:
+    - name: get_actor
+      run: |
+          metion_list="dt-yy"
+          echo $GITHUB_ACTOR
+          if [[ $GITHUB_ACTOR == "drunkpig" ]]; then
+            metion_list="xuchao"
+          elif [[ $GITHUB_ACTOR == "myhloli" ]]; then
+            metion_list="zhaoxiaomeng"
+          elif [[ $GITHUB_ACTOR == "icecraft" ]]; then
+            metion_list="xurui1"
+          fi
+          echo $metion_list
+          echo "METIONS=$metion_list" >> "$GITHUB_ENV"
+          echo ${{ env.METIONS }}
+
+    - name: notify
+      run: |
+        echo ${{ secrets.USER_ID }}
+        curl -X POST -H "Content-Type: application/json" -d '{"msg_type":"post","content":{"post":{"zh_cn":{"title":"'${{ github.repository }}' GitHubAction Failed","content":[[{"tag":"text","text":""},{"tag":"a","text":"Please click here for details ","href":"https://github.com/'${{ github.repository }}'/actions/runs/'${GITHUB_RUN_ID}'"},{"tag":"at","user_id":"'${{ secrets.USER_ID }}'"}]]}}}}'  ${{ secrets.WEBHOOK_URL }}

+ 61 - 0
.github/workflows/huigui.yml

@@ -0,0 +1,61 @@
+# This workflow will install Python dependencies, run tests and lint with a variety of Python versions
+# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python
+
+name: mineru
+on:
+  push:
+    branches:
+      - "master"
+      - "dev"
+    paths-ignore:
+      - "cmds/**"
+      - "**.md"
+  workflow_dispatch:
+jobs:
+  cli-test:
+    runs-on: pdf
+    timeout-minutes: 240
+    strategy:
+      fail-fast: true
+
+    steps:
+    - name: PDF cli
+      uses: actions/checkout@v3
+      with:
+        fetch-depth: 2
+
+    - name: install&test
+      run: |
+        source activate mineru
+        conda env list
+        pip show coverage
+        # cd $GITHUB_WORKSPACE && sh tests/retry_env.sh
+        cd $GITHUB_WORKSPACE && python tests/clean_coverage.py      
+        cd $GITHUB_WORKSPACE && coverage run -m pytest tests/unittest/ --cov=magic_pdf/  --cov-report html --cov-report term-missing
+        cd $GITHUB_WORKSPACE && python tests/get_coverage.py
+        cd $GITHUB_WORKSPACE && pytest -s -v tests/test_cli/test_cli_sdk.py
+
+  notify_to_feishu:
+    if: ${{ always() && !cancelled() && contains(needs.*.result, 'failure') && (github.ref_name == 'master') }}
+    needs: cli-test
+    runs-on: pdf
+    steps:
+    - name: get_actor
+      run: |
+          metion_list="dt-yy"
+          echo $GITHUB_ACTOR
+          if [[ $GITHUB_ACTOR == "drunkpig" ]]; then
+            metion_list="xuchao"
+          elif [[ $GITHUB_ACTOR == "myhloli" ]]; then
+            metion_list="zhaoxiaomeng"
+          elif [[ $GITHUB_ACTOR == "icecraft" ]]; then
+            metion_list="xurui1"
+          fi
+          echo $metion_list
+          echo "METIONS=$metion_list" >> "$GITHUB_ENV"
+          echo ${{ env.METIONS }}
+
+    - name: notify
+      run: |
+        echo ${{ secrets.USER_ID }}
+        curl -X POST -H "Content-Type: application/json" -d '{"msg_type":"post","content":{"post":{"zh_cn":{"title":"'${{ github.repository }}' GitHubAction Failed","content":[[{"tag":"text","text":""},{"tag":"a","text":"Please click here for details ","href":"https://github.com/'${{ github.repository }}'/actions/runs/'${GITHUB_RUN_ID}'"},{"tag":"at","user_id":"'${{ secrets.USER_ID }}'"}]]}}}}'  ${{ secrets.WEBHOOK_URL }}

+ 0 - 22
.github/workflows/update_base.yml

@@ -1,22 +0,0 @@
-# This workflow will install Python dependencies, run tests and lint with a variety of Python versions
-# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python
-
-name: update-base
-on:
-  push:
-    tags:
-      - '*released'
-  workflow_dispatch:
-jobs:
-  pdf-test:
-    runs-on: pdf
-    timeout-minutes: 40
-
-
-    steps:
-    - name: update-base
-      uses: actions/checkout@v3
-    - name: start-update
-      run: |
-        echo "start test"
-  

+ 7 - 1
.gitignore

@@ -1,5 +1,6 @@
 *.tar
 *.tar.gz
+*.zip
 venv*/
 envs/
 slurm_logs/
@@ -31,9 +32,14 @@ tmp
 .vscode
 .vscode/
 ocr_demo
-
+.coveragerc
 /app/common/__init__.py
 /magic_pdf/config/__init__.py
 source.dev.env
 
 tmp
+
+projects/web/node_modules
+projects/web/dist
+
+projects/web_demo/web_demo/static/

+ 16 - 0
.readthedocs.yaml

@@ -0,0 +1,16 @@
+version: 2
+
+build:
+  os: ubuntu-22.04
+  tools:
+    python: "3.10"
+
+formats:
+  - epub
+
+python:
+  install:
+    - requirements: docs/zh_cn/requirements.txt
+
+sphinx:
+  configuration: docs/zh_cn/conf.py

+ 1 - 0
LICENSE.md

@@ -659,3 +659,4 @@ specific requirements.
 if any, to sign a "copyright disclaimer" for the program, if necessary.
 For more information on this, and how to apply and follow the GNU AGPL, see
 <https://www.gnu.org/licenses/>.
+

File diff suppressed because it is too large
+ 0 - 0
README.md


+ 0 - 328
README.md.bak

@@ -1,328 +0,0 @@
-<div id="top">
-
-<p align="center">
-  <img src="docs/images/MinerU-logo.png" width="300px" style="vertical-align:middle;">
-</p>
-
-</div>
-<div align="center">
-
-[![stars](https://img.shields.io/github/stars/opendatalab/MinerU.svg)](https://github.com/opendatalab/MinerU)
-[![forks](https://img.shields.io/github/forks/opendatalab/MinerU.svg)](https://github.com/opendatalab/MinerU)
-[![open issues](https://img.shields.io/github/issues-raw/opendatalab/MinerU)](https://github.com/opendatalab/MinerU/issues)
-[![issue resolution](https://img.shields.io/github/issues-closed-raw/opendatalab/MinerU)](https://github.com/opendatalab/MinerU/issues)
-[![PyPI version](https://badge.fury.io/py/magic-pdf.svg)](https://badge.fury.io/py/magic-pdf)
-[![Downloads](https://static.pepy.tech/badge/magic-pdf)](https://pepy.tech/project/magic-pdf)
-[![Downloads](https://static.pepy.tech/badge/magic-pdf/month)](https://pepy.tech/project/magic-pdf)
-
-<a href="https://trendshift.io/repositories/11174" target="_blank"><img src="https://trendshift.io/api/badge/repositories/11174" alt="opendatalab%2FMinerU | Trendshift" style="width: 200px; height: 55px;"/></a>
-
-
-
-
-[English](README.md) | [简体中文](README_zh-CN.md) | [日本語](README_ja-JP.md)
-
-</div>
-
-<div align="center">
-<p align="center">
-<a href="https://github.com/opendatalab/MinerU">MinerU: An end-to-end PDF parsing tool based on PDF-Extract-Kit, supporting conversion from PDF to Markdown.</a>🚀🚀🚀<br>
-<a href="https://github.com/opendatalab/PDF-Extract-Kit">PDF-Extract-Kit: A Comprehensive Toolkit for High-Quality PDF Content Extraction</a>🔥🔥🔥
-</p>
-
-<p align="center">
-    👋 join us on <a href="https://discord.gg/gPxmVeGC" target="_blank">Discord</a> and <a href="https://cdn.vansin.top/internlm/mineru.jpg" target="_blank">WeChat</a>
-</p>
-</div>
-
-# MinerU 
-
-
-## Introduction
-
-MinerU is a one-stop, open-source, high-quality data extraction tool, includes the following primary features:
-
-- [Magic-PDF](#Magic-PDF)  PDF Document Extraction  
-- [Magic-Doc](#Magic-Doc)  Webpage & E-book Extraction
-
-
-# Magic-PDF
-
-
-## Introduction
-
-Magic-PDF is a tool designed to convert PDF documents into Markdown format, capable of processing files stored locally or on object storage supporting S3 protocol.
-
-Key features include:
-
-- Support for multiple front-end model inputs
-- Removal of headers, footers, footnotes, and page numbers
-- Human-readable layout formatting
-- Retains the original document's structure and formatting, including headings, paragraphs, lists, and more
-- Extraction and display of images and tables within markdown
-- Conversion of equations into LaTeX format
-- Automatic detection and conversion of garbled PDFs
-- Compatibility with CPU and GPU environments
-- Available for Windows, Linux and macOS platforms
-
-
-https://github.com/user-attachments/assets/4bea02c9-6d54-4cd6-97ed-dff14340982c
-
-
-
-## Project Panorama
-
-![Project Panorama](docs/images/project_panorama_en.png)
-
-
-## Flowchart
-
-![Flowchart](docs/images/flowchart_en.png)
-
-### Dependency repositorys
-
-- [PDF-Extract-Kit : A Comprehensive Toolkit for High-Quality PDF Content Extraction](https://github.com/opendatalab/PDF-Extract-Kit) 🚀🚀🚀
-
-## Getting Started
-
-### Requirements
-
-- Python >= 3.9
-
-Using a virtual environment is recommended to avoid potential dependency conflicts; both venv and conda are suitable. 
-For example:
-```bash
-conda create -n MinerU python=3.10
-conda activate MinerU
-```
-
-### Installation and Configuration
-
-#### 1. Install Magic-PDF
-
-**1.Install dependencies**
-
-The full-feature package depends on detectron2, which requires a compilation installation.   
-If you need to compile it yourself, please refer to https://github.com/facebookresearch/detectron2/issues/5114  
-Alternatively, you can directly use our precompiled whl package (limited to Python 3.10):
-
-```bash
-pip install detectron2 --extra-index-url https://wheels.myhloli.com
-```
-
-**2.Install the full-feature package with pip**
->Note: The pip-installed package supports CPU-only and is ideal for quick tests.
->
->For CUDA/MPS acceleration in production, see [Acceleration Using CUDA or MPS](#4-Acceleration-Using-CUDA-or-MPS).
-
-```bash
-pip install magic-pdf[full]==0.6.2b1
-```
-> ❗️❗️❗️
-> We have pre-released the 0.6.2 beta version, addressing numerous issues mentioned in our logs. However, this build has not undergone full QA testing and does not represent the final release quality. Should you encounter any problems, please promptly report them to us via issues or revert to using version 0.6.1.
-> ```bash
-> pip install magic-pdf[full-cpu]==0.6.1
-> ```
-
-
-
-#### 2. Downloading model weights files
-
-For detailed references, please see below [how_to_download_models](docs/how_to_download_models_en.md)
-
-After downloading the model weights, move the 'models' directory to a directory on a larger disk space, preferably an SSD.
-
-
-#### 3. Copy the Configuration File and Make Configurations
-You can get the [magic-pdf.template.json](magic-pdf.template.json) file in the repository root directory.
-```bash
-cp magic-pdf.template.json ~/magic-pdf.json
-```
-In magic-pdf.json, configure "models-dir" to point to the directory where the model weights files are located.
-
-```json
-{
-  "models-dir": "/tmp/models"
-}
-```
-
-
-#### 4. Acceleration Using CUDA or MPS
-If you have an available Nvidia GPU or are using a Mac with Apple Silicon, you can leverage acceleration with CUDA or MPS respectively.
-##### CUDA
-
-You need to install the corresponding PyTorch version according to your CUDA version.  
-This example installs the CUDA 11.8 version.More information https://pytorch.org/get-started/locally/
-```bash
-pip install --force-reinstall torch==2.3.1 torchvision==0.18.1 --index-url https://download.pytorch.org/whl/cu118
-```
-> ❗ ️Make sure to specify version
-> ```bash
-> torch==2.3.1 torchvision==0.18.1
-> ```
->  in the command, as these are the highest versions we support. Failing to specify the versions may result in automatically installing higher versions which can cause the program to fail.
-
-Also, you need to modify the value of "device-mode" in the configuration file magic-pdf.json.  
-```json
-{
-  "device-mode":"cuda"
-}
-```
-
-##### MPS
-
-For macOS users with M-series chip devices, you can use MPS for inference acceleration.  
-You also need to modify the value of "device-mode" in the configuration file magic-pdf.json.  
-```json
-{
-  "device-mode":"mps"
-}
-```
-
-
-### Usage
-
-#### 1.Usage via Command Line
-
-###### simple
-
-```bash
-magic-pdf pdf-command --pdf "pdf_path" --inside_model true
-```
-After the program has finished, you can find the generated markdown files under the directory "/tmp/magic-pdf".  
-You can find the corresponding xxx_model.json file in the markdown directory.   
-If you intend to do secondary development on the post-processing pipeline, you can use the command:  
-```bash
-magic-pdf pdf-command --pdf "pdf_path" --model "model_json_path"
-```
-In this way, you won't need to re-run the model data, making debugging more convenient.
-
-
-###### more 
-
-```bash
-magic-pdf --help
-```
-
-
-#### 2. Usage via Api
-
-###### Local
-```python
-image_writer = DiskReaderWriter(local_image_dir)
-image_dir = str(os.path.basename(local_image_dir))
-jso_useful_key = {"_pdf_type": "", "model_list": []}
-pipe = UNIPipe(pdf_bytes, jso_useful_key, image_writer)
-pipe.pipe_classify()
-pipe.pipe_parse()
-md_content = pipe.pipe_mk_markdown(image_dir, drop_mode="none")
-```
-
-###### Object Storage
-```python
-s3pdf_cli = S3ReaderWriter(pdf_ak, pdf_sk, pdf_endpoint)
-image_dir = "s3://img_bucket/"
-s3image_cli = S3ReaderWriter(img_ak, img_sk, img_endpoint, parent_path=image_dir)
-pdf_bytes = s3pdf_cli.read(s3_pdf_path, mode=s3pdf_cli.MODE_BIN)
-jso_useful_key = {"_pdf_type": "", "model_list": []}
-pipe = UNIPipe(pdf_bytes, jso_useful_key, s3image_cli)
-pipe.pipe_classify()
-pipe.pipe_parse()
-md_content = pipe.pipe_mk_markdown(image_dir, drop_mode="none")
-```
-
-Demo can be referred to [demo.py](demo/demo.py)
-
-
-# Magic-Doc
-
-
-## Introduction
-
-Magic-Doc is a tool designed to convert web pages or multi-format e-books into markdown format.
-
-Key Features Include:
-
-- Web Page Extraction
-  - Cross-modal precise parsing of text, images, tables, and formula information.
-
-- E-Book Document Extraction
-  - Supports various document formats including epub, mobi, with full adaptation for text and images.
-
-- Language Type Identification
-  - Accurate recognition of 176 languages.
-
-https://github.com/opendatalab/MinerU/assets/11393164/a5a650e9-f4c0-463e-acc3-960967f1a1ca
-
-
-
-https://github.com/opendatalab/MinerU/assets/11393164/0f4a6fe9-6cca-4113-9fdc-a537749d764d
-
-
-
-https://github.com/opendatalab/MinerU/assets/11393164/20438a02-ce6c-4af8-9dde-d722a4e825b2
-
-
-
-
-## Project Repository
-
-- [Magic-Doc](https://github.com/InternLM/magic-doc)
-  Outstanding Webpage and E-book Extraction Tool
-
-
-# All Thanks To Our Contributors
-
-<a href="https://github.com/opendatalab/MinerU/graphs/contributors">
-  <img src="https://contrib.rocks/image?repo=opendatalab/MinerU" />
-</a>
-
-
-# License Information
-
-[LICENSE.md](LICENSE.md)
-
-The project currently leverages PyMuPDF to deliver advanced functionalities; however, its adherence to the AGPL license may impose limitations on certain use cases. In upcoming iterations, we intend to explore and transition to a more permissively licensed PDF processing library to enhance user-friendliness and flexibility.
-
-
-# Acknowledgments
-
-- [PaddleOCR](https://github.com/PaddlePaddle/PaddleOCR)
-- [PyMuPDF](https://github.com/pymupdf/PyMuPDF)
-- [fast-langdetect](https://github.com/LlmKira/fast-langdetect)
-- [pdfminer.six](https://github.com/pdfminer/pdfminer.six)
-
-
-# Citation
-
-```bibtex
-@article{he2024opendatalab,
-  title={Opendatalab: Empowering general artificial intelligence with open datasets},
-  author={He, Conghui and Li, Wei and Jin, Zhenjiang and Xu, Chao and Wang, Bin and Lin, Dahua},
-  journal={arXiv preprint arXiv:2407.13773},
-  year={2024}
-}
-
-@misc{2024mineru,
-    title={MinerU: A One-stop, Open-source, High-quality Data Extraction Tool},
-    author={MinerU Contributors},
-    howpublished = {\url{https://github.com/opendatalab/MinerU}},
-    year={2024}
-}
-```
-
-
-# Star History
-
-<a>
- <picture>
-   <source media="(prefers-color-scheme: dark)" srcset="https://api.star-history.com/svg?repos=opendatalab/MinerU&type=Date&theme=dark" />
-   <source media="(prefers-color-scheme: light)" srcset="https://api.star-history.com/svg?repos=opendatalab/MinerU&type=Date" />
-   <img alt="Star History Chart" src="https://api.star-history.com/svg?repos=opendatalab/MinerU&type=Date" />
- </picture>
-</a>
-
-# Links
-- [LabelU (A Lightweight Multi-modal Data Annotation Tool)](https://github.com/opendatalab/labelU)
-- [LabelLLM (An Open-source LLM Dialogue Annotation Platform)](https://github.com/opendatalab/LabelLLM)
-- [PDF-Extract-Kit (A Comprehensive Toolkit for High-Quality PDF Content Extraction)](https://github.com/opendatalab/PDF-Extract-Kit)

+ 15 - 6
README_ja-JP.md

@@ -290,14 +290,23 @@ https://github.com/opendatalab/MinerU/assets/11393164/20438a02-ce6c-4af8-9dde-d7
 # 引用
 
 ```bibtex
-@misc{2024mineru,
-    title={MinerU: A One-stop, Open-source, High-quality Data Extraction Tool},
-    author={MinerU Contributors},
-    howpublished = {\url{https://github.com/opendatalab/MinerU}},
-    year={2024}
+@misc{wang2024mineruopensourcesolutionprecise,
+      title={MinerU: An Open-Source Solution for Precise Document Content Extraction}, 
+      author={Bin Wang and Chao Xu and Xiaomeng Zhao and Linke Ouyang and Fan Wu and Zhiyuan Zhao and Rui Xu and Kaiwen Liu and Yuan Qu and Fukai Shang and Bo Zhang and Liqun Wei and Zhihao Sui and Wei Li and Botian Shi and Yu Qiao and Dahua Lin and Conghui He},
+      year={2024},
+      eprint={2409.18839},
+      archivePrefix={arXiv},
+      primaryClass={cs.CV},
+      url={https://arxiv.org/abs/2409.18839}, 
 }
-```
 
+@article{he2024opendatalab,
+  title={Opendatalab: Empowering general artificial intelligence with open datasets},
+  author={He, Conghui and Li, Wei and Jin, Zhenjiang and Xu, Chao and Wang, Bin and Lin, Dahua},
+  journal={arXiv preprint arXiv:2407.13773},
+  year={2024}
+}
+```
 
 # スター履歴
 

File diff suppressed because it is too large
+ 0 - 0
README_zh-CN.md


+ 0 - 424
README_zh-CN.md.bak

@@ -1,424 +0,0 @@
-<div align="center" xmlns="http://www.w3.org/1999/html">
-<!-- logo -->
-<p align="center">
-  <img src="docs/images/MinerU-logo.png" width="300px" style="vertical-align:middle;">
-</p>
-
-<!-- icon -->
-
-[![stars](https://img.shields.io/github/stars/opendatalab/MinerU.svg)](https://github.com/opendatalab/MinerU)
-[![forks](https://img.shields.io/github/forks/opendatalab/MinerU.svg)](https://github.com/opendatalab/MinerU)
-[![open issues](https://img.shields.io/github/issues-raw/opendatalab/MinerU)](https://github.com/opendatalab/MinerU/issues)
-[![issue resolution](https://img.shields.io/github/issues-closed-raw/opendatalab/MinerU)](https://github.com/opendatalab/MinerU/issues)
-[![PyPI version](https://badge.fury.io/py/magic-pdf.svg)](https://badge.fury.io/py/magic-pdf)
-[![Downloads](https://static.pepy.tech/badge/magic-pdf)](https://pepy.tech/project/magic-pdf)
-[![Downloads](https://static.pepy.tech/badge/magic-pdf/month)](https://pepy.tech/project/magic-pdf)
-<a href="https://trendshift.io/repositories/11174" target="_blank"><img src="https://trendshift.io/api/badge/repositories/11174" alt="opendatalab%2FMinerU | Trendshift" style="width: 250px; height: 55px;" width="250" height="55"/></a>
-
-<!-- language -->
-
-[English](README.md) | [简体中文](README_zh-CN.md)
-
-<!-- hot link -->
-
-<p align="center">
-<a href="https://github.com/opendatalab/PDF-Extract-Kit">PDF-Extract-Kit: 高质量PDF解析工具箱</a>🔥🔥🔥
-</p>
-
-<!-- join us -->
-
-<p align="center">
-    👋 join us on <a href="https://discord.gg/Tdedn9GTXq" target="_blank">Discord</a> and <a href="https://cdn.vansin.top/internlm/mineru.jpg" target="_blank">WeChat</a>
-</p>
-
-</div>
-
-# 更新记录
-
-- 2024/08/09 0.7.0b1发布,简化安装步骤提升易用性,加入表格识别功能
-- 2024/08/01 0.6.2b1发布,优化了依赖冲突问题和安装文档
-- 2024/07/05 首次开源
-
-<!-- TABLE OF CONTENT -->
-
-<details open="open">
-  <summary><h2 style="display: inline-block">文档目录</h2></summary>
-  <ol>
-    <li>
-      <a href="#mineru">MinerU</a>
-      <ul>
-        <li><a href="#项目简介">项目简介</a></li>
-        <li><a href="#主要功能">主要功能</a></li>
-        <li><a href="#快速开始">快速开始</a>
-            <ul>
-            <li><a href="#在线体验">在线体验</a></li>
-            <li><a href="#使用CPU快速体验">使用CPU快速体验</a></li>
-            <li><a href="#使用GPU">使用GPU</a></li>
-            </ul>
-        </li>
-        <li><a href="#使用">使用方式</a>
-            <ul>
-            <li><a href="#命令行">命令行</a></li>
-            <li><a href="#api">API</a></li>
-            <li><a href="#二次开发">二次开发</a></li>
-            </ul>
-        </li>
-      </ul>
-    </li>
-    <li><a href="#todo">TODO</a></li>
-    <li><a href="#known-issues">Known Issues</a></li>
-    <li><a href="#faq">FAQ</a></li>
-    <li><a href="#all-thanks-to-our-contributors">Contributors</a></li>
-    <li><a href="#license-information">License Information</a></li>
-    <li><a href="#acknowledgments">Acknowledgements</a></li>
-    <li><a href="#citation">Citation</a></li>
-    <li><a href="#star-history">Star History</a></li>
-    <li><a href="#magic-doc">magic-doc快速提取PPT/DOC/PDF</a></li>
-    <li><a href="#magic-html">magic-html提取混合网页内容</a></li>
-    <li><a href="#links">Links</a></li>
-  </ol>
-</details>
-
-# MinerU
-
-## 项目简介
-
-MinerU是一款将PDF转化为机器可读格式的工具(如markdown、json),可以很方便地抽取为任意格式。
-MinerU诞生于[书生-浦语](https://github.com/InternLM/InternLM)的预训练过程中,我们将会集中精力解决科技文献中的符号转化问题,希望在大模型时代为科技发展做出贡献。
-相比国内外知名商用产品MinerU还很年轻,如果遇到问题或者结果不及预期请到[issue](https://github.com/opendatalab/MinerU/issues)提交问题,同时**附上相关PDF**。
-
-https://github.com/user-attachments/assets/4bea02c9-6d54-4cd6-97ed-dff14340982c
-
-## 主要功能
-
-- 删除页眉、页脚、脚注、页码等元素,保持语义连贯
-- 对多栏输出符合人类阅读顺序的文本
-- 保留原文档的结构,包括标题、段落、列表等
-- 提取图像、图片标题、表格、表格标题
-- 自动识别文档中的公式并将公式转换成latex
-- 自动识别文档中的表格并将表格转换成latex
-- 乱码PDF自动检测并启用OCR
-- 支持CPU和GPU环境
-- 支持windows/linux/mac平台
-
-## 快速开始
-
-如果遇到任何安装问题,请先查询 <a href="#faq">FAQ</a> </br>
-如果遇到解析效果不及预期,参考 <a href="#known-issues">Known Issues</a></br>
-有3种不同方式可以体验MinerU的效果:
-
-- [在线体验(无需任何安装)](#在线体验)
-- [使用CPU快速体验(Windows,Linux,Mac)](#使用cpu快速体验)
-- [Linux/Windows + CUDA](#使用gpu)
-
-**⚠️安装前必看——软硬件环境支持说明**
-
-为了确保项目的稳定性和可靠性,我们在开发过程中仅对特定的软硬件环境进行优化和测试。这样当用户在推荐的系统配置上部署和运行项目时,能够获得最佳的性能表现和最少的兼容性问题。
-
-通过集中资源和精力于主线环境,我们团队能够更高效地解决潜在的BUG,及时开发新功能。
-
-在非主线环境中,由于硬件、软件配置的多样性,以及第三方依赖项的兼容性问题,我们无法100%保证项目的完全可用性。因此,对于希望在非推荐环境中使用本项目的用户,我们建议先仔细阅读文档以及FAQ,大多数问题已经在FAQ中有对应的解决方案,除此之外我们鼓励社区反馈问题,以便我们能够逐步扩大支持范围。
-
-<table>
-    <tr>
-        <td colspan="3" rowspan="2">操作系统</td>
-    </tr>
-    <tr>
-        <td>Ubuntu 22.04 LTS</td>
-        <td>Windows 10 / 11</td>
-        <td>macOS 11+</td>
-    </tr>
-    <tr>
-        <td colspan="3">CPU</td>
-        <td>x86_64</td>
-        <td>x86_64</td>
-        <td>x86_64 / arm64</td>
-    </tr>
-    <tr>
-        <td colspan="3">内存</td>
-        <td colspan="3">大于等于16GB,推荐32G以上</td>
-    </tr>
-    <tr>
-        <td colspan="3">python版本</td>
-        <td colspan="3">3.10</td>
-    </tr>
-    <tr>
-        <td colspan="3">Nvidia Driver 版本</td>
-        <td>latest(专有驱动)</td>
-        <td>latest</td>
-        <td>None</td>
-    </tr>
-    <tr>
-        <td colspan="3">CUDA环境</td>
-        <td>自动安装[12.1(pytorch)+11.8(paddle)]</td>
-        <td>11.8(手动安装)+cuDNN v8.7.0(手动安装)</td>
-        <td>None</td>
-    </tr>
-    <tr>
-        <td rowspan="2">GPU硬件支持列表</td>
-        <td colspan="2">最低要求 8G+显存</td>
-        <td colspan="2">3060ti/3070/3080/3080ti/4060/4070/4070ti<br>
-        8G显存仅可开启lavout和公式识别加速</td>
-        <td rowspan="2">None</td>
-    </tr>
-    <tr>
-        <td colspan="2">推荐配置 16G+显存</td>
-        <td colspan="2">3090/3090ti/4070tisuper/4080/4090<br>
-        16G及以上可以同时开启layout,公式识别和ocr加速</td>
-    </tr>
-</table>
-
-### 在线体验
-
-[在线体验点击这里](https://opendatalab.com/OpenSourceTools/Extractor/PDF)
-
-### 使用CPU快速体验
-
-#### 1. 安装magic-pdf
-
-最新版本国内镜像源同步可能会有延迟,请耐心等待
-
-```bash
-conda create -n MinerU python=3.10
-conda activate MinerU
-pip install magic-pdf[full]==0.7.0b1 --extra-index-url https://wheels.myhloli.com -i https://pypi.tuna.tsinghua.edu.cn/simple
-```
-
-#### 2. 下载模型权重文件
-
-详细参考 [如何下载模型文件](docs/how_to_download_models_zh_cn.md)
-
-> ❗️模型下载后请务必检查模型文件是否下载完整
->
-> 请检查目录下的模型文件大小与网页上描述是否一致,如果可以的话,最好通过sha256校验模型是否下载完整
-
-#### 3. 拷贝配置文件并进行配置
-
-在仓库根目录可以获得 [magic-pdf.template.json](magic-pdf.template.json) 配置模版文件
-
-> ❗️务必执行以下命令将配置文件拷贝到【用户目录】下,否则程序将无法运行
->
-> windows的用户目录为 "C:\\Users\\用户名", linux用户目录为 "/home/用户名", macOS用户目录为 "/Users/用户名"
-
-```bash
-cp magic-pdf.template.json ~/magic-pdf.json
-```
-
-在用户目录中找到magic-pdf.json文件并配置"models-dir"为[2. 下载模型权重文件](#2-下载模型权重文件)中下载的模型权重文件所在目录
-
-> ❗️务必正确配置模型权重文件所在目录的【绝对路径】,否则会因为找不到模型文件而导致程序无法运行
->
-> windows系统中此路径应包含盘符,且需把路径中所有的`"\"`替换为`"/"`,否则会因为转义原因导致json文件语法错误。
-> 
-
-> 例如:模型放在D盘根目录的models目录,则model-dir的值应为"D:/models"
-
-```json
-{
-  // other config
-  "models-dir": "D:/models",
-  "table-config": {
-        "is_table_recog_enable": false, // 表格识别功能默认是关闭的,如果需要修改此处的值
-        "max_time": 400
-    }
-}
-```
-
-### 使用GPU
-
-如果您的设备支持CUDA,且满足主线环境中的显卡要求,则可以使用GPU加速,请根据自己的系统选择适合的教程:
-
-- [Ubuntu22.04LTS + GPU](docs/README_Ubuntu_CUDA_Acceleration_zh_CN.md)
-- [Windows10/11 + GPU](docs/README_Windows_CUDA_Acceleration_zh_CN.md)
-- 使用Docker快速部署
-    > Docker 需设备gpu显存大于等于16GB,默认开启所有加速功能
-  ```bash
-  wget https://github.com/opendatalab/MinerU/raw/master/Dockerfile
-  docker build -t mineru:0.7.0b1 .
-  docker run --rm -it --gpus=all mineru:0.7.0b1 /bin/bash
-  magic-pdf --help
-  ```
-    
-
-## 使用
-
-### 命令行
-
-```bash
-magic-pdf --help
-Usage: magic-pdf [OPTIONS]
-
-Options:
-  -v, --version                display the version and exit
-  -p, --path PATH              local pdf filepath or directory  [required]
-  -o, --output-dir TEXT        output local directory
-  -m, --method [ocr|txt|auto]  the method for parsing pdf.
-                               ocr: using ocr technique to extract information from pdf,
-                               txt: suitable for the text-based pdf only and outperform ocr,
-                               auto: automatically choose the best method for parsing pdf
-                                  from ocr and txt.
-                               without method specified, auto will be used by default.
-  --help                       Show this message and exit.
-
-
-## show version
-magic-pdf -v
-
-## command line example
-magic-pdf -p {some_pdf} -o {some_output_dir} -m auto
-```
-
-其中 `{some_pdf}` 可以是单个pdf文件,也可以是一个包含多个pdf文件的目录。
-运行完命令后输出的结果会保存在`{some_output_dir}`目录下, 输出的文件列表如下
-
-```text
-├── some_pdf.md                          # markdown 文件
-├── images                               # 存放图片目录
-├── some_pdf_layout.pdf                  # layout 绘图
-├── some_pdf_middle.json                 # minerU 中间处理结果
-├── some_pdf_model.json                  # 模型推理结果
-├── some_pdf_origin.pdf                  # 原 pdf 文件
-└── some_pdf_spans.pdf                   # 最小粒度的bbox位置信息绘图
-```
-
-更多有关输出文件的信息,请参考[输出文件说明](docs/output_file_zh_cn.md)
-
-### API
-
-处理本地磁盘上的文件
-
-```python
-image_writer = DiskReaderWriter(local_image_dir)
-image_dir = str(os.path.basename(local_image_dir))
-jso_useful_key = {"_pdf_type": "", "model_list": []}
-pipe = UNIPipe(pdf_bytes, jso_useful_key, image_writer)
-pipe.pipe_classify()
-pipe.pipe_analyze()
-pipe.pipe_parse()
-md_content = pipe.pipe_mk_markdown(image_dir, drop_mode="none")
-```
-
-处理对象存储上的文件
-
-```python
-s3pdf_cli = S3ReaderWriter(pdf_ak, pdf_sk, pdf_endpoint)
-image_dir = "s3://img_bucket/"
-s3image_cli = S3ReaderWriter(img_ak, img_sk, img_endpoint, parent_path=image_dir)
-pdf_bytes = s3pdf_cli.read(s3_pdf_path, mode=s3pdf_cli.MODE_BIN)
-jso_useful_key = {"_pdf_type": "", "model_list": []}
-pipe = UNIPipe(pdf_bytes, jso_useful_key, s3image_cli)
-pipe.pipe_classify()
-pipe.pipe_analyze()
-pipe.pipe_parse()
-md_content = pipe.pipe_mk_markdown(image_dir, drop_mode="none")
-```
-
-详细实现可参考
-
-- [demo.py 最简单的处理方式](demo/demo.py)
-- [magic_pdf_parse_main.py 能够更清晰看到处理流程](demo/magic_pdf_parse_main.py)
-
-### 二次开发
-
-TODO
-
-# TODO
-
-- [ ] 基于语义的阅读顺序
-- [ ] 正文中列表识别
-- [ ] 正文中代码块识别
-- [ ] 目录识别
-- [x] 表格识别
-- [ ] [化学式识别](docs/chemical_knowledge_introduction/introduction.pdf)
-- [ ] 几何图形识别
-
-# Known Issues
-
-- 阅读顺序基于规则的分割,在一些情况下会乱序
-- 不支持竖排文字
-- 列表、代码块、目录在layout模型里还没有支持
-- 漫画书、艺术图册、小学教材、习题尚不能很好解析
-- 在一些公式密集的PDF上强制启用OCR效果会更好
-- 如果您要处理包含大量公式的pdf,强烈建议开启OCR功能。使用pymuPDF提取文字的时候会出现文本行互相重叠的情况导致公式插入位置不准确。
-- **表格识别**目前处于测试阶段,识别速度较慢,识别准确度有待提升。以下是我们在Ubuntu 22.04 LTS + Intel(R) Xeon(R) Platinum 8352V CPU @ 2.10GHz + NVIDIA GeForce RTX 4090环境下的一些性能测试结果,可供参考。
-
-| 表格大小     | 解析耗时 |
-| ------------ | -------- |
-| 6\*5 55kb    | 37s      |
-| 16\*12 284kb | 3m18s    |
-| 44\*7 559kb  | 4m12s    |
-
-# FAQ
-
-
-[常见问题](docs/FAQ_zh_cn.md)
-
-<<<<<<< HEAD
-=======
-[FAQ](docs/FAQ_en_us.md)
->>>>>>> 7f0fe20004af7416db886f4b75c116bcc1c986b4
-
-[FAQ](docs/FAQ_en_us.md)
-
-# All Thanks To Our Contributors
-
-<a href="https://github.com/opendatalab/MinerU/graphs/contributors">
-  <img src="https://contrib.rocks/image?repo=opendatalab/MinerU" />
-</a>
-
-# License Information
-
-[LICENSE.md](LICENSE.md)
-
-本项目目前采用PyMuPDF以实现高级功能,但因其遵循AGPL协议,可能对某些使用场景构成限制。未来版本迭代中,我们计划探索并替换为许可条款更为宽松的PDF处理库,以提升用户友好度及灵活性。
-
-# Acknowledgments
-
-- [PDF-Extract-Kit](https://github.com/opendatalab/PDF-Extract-Kit)
-- [StructEqTable](https://github.com/UniModal4Reasoning/StructEqTable-Deploy)
-- [PaddleOCR](https://github.com/PaddlePaddle/PaddleOCR)
-- [PyMuPDF](https://github.com/pymupdf/PyMuPDF)
-- [fast-langdetect](https://github.com/LlmKira/fast-langdetect)
-- [pdfminer.six](https://github.com/pdfminer/pdfminer.six)
-
-# Citation
-
-```bibtex
-@article{he2024opendatalab,
-  title={Opendatalab: Empowering general artificial intelligence with open datasets},
-  author={He, Conghui and Li, Wei and Jin, Zhenjiang and Xu, Chao and Wang, Bin and Lin, Dahua},
-  journal={arXiv preprint arXiv:2407.13773},
-  year={2024}
-}
-
-@misc{2024mineru,
-    title={MinerU: A One-stop, Open-source, High-quality Data Extraction Tool},
-    author={MinerU Contributors},
-    howpublished = {\url{https://github.com/opendatalab/MinerU}},
-    year={2024}
-}
-```
-
-# Star History
-
-<a>
- <picture>
-   <source media="(prefers-color-scheme: dark)" srcset="https://api.star-history.com/svg?repos=opendatalab/MinerU&type=Date&theme=dark" />
-   <source media="(prefers-color-scheme: light)" srcset="https://api.star-history.com/svg?repos=opendatalab/MinerU&type=Date" />
-   <img alt="Star History Chart" src="https://api.star-history.com/svg?repos=opendatalab/MinerU&type=Date" />
- </picture>
-</a>
-
-# Magic-doc
-
-[Magic-Doc](https://github.com/InternLM/magic-doc) Fast speed ppt/pptx/doc/docx/pdf extraction tool
-
-# Magic-html
-
-[Magic-HTML](https://github.com/opendatalab/magic-html) Mixed web page extraction tool
-
-# Links
-
-- [LabelU (A Lightweight Multi-modal Data Annotation Tool)](https://github.com/opendatalab/labelU)
-- [LabelLLM (An Open-source LLM Dialogue Annotation Platform)](https://github.com/opendatalab/LabelLLM)
-- [PDF-Extract-Kit (A Comprehensive Toolkit for High-Quality PDF Content Extraction)](https://github.com/opendatalab/PDF-Extract-Kit)

+ 0 - 117
docs/README_Ubuntu_CUDA_Acceleration_en_US.md

@@ -1,117 +0,0 @@
-
-# Ubuntu 22.04 LTS
-
-### 1. Check if NVIDIA Drivers Are Installed
-   ```sh
-   nvidia-smi
-   ```
-   If you see information similar to the following, it means that the NVIDIA drivers are already installed, and you can skip Step 2.
-   ```plaintext
-   +---------------------------------------------------------------------------------------+
-   | NVIDIA-SMI 537.34                 Driver Version: 537.34       CUDA Version: 12.2     |
-   |-----------------------------------------+----------------------+----------------------+
-   | GPU  Name                     TCC/WDDM  | Bus-Id        Disp.A | Volatile Uncorr. ECC |
-   | Fan  Temp   Perf          Pwr:Usage/Cap |         Memory-Usage | GPU-Util  Compute M. |
-   |                                         |                      |               MIG M. |
-   |=========================================+======================+======================|
-   |   0  NVIDIA GeForce RTX 3060 Ti   WDDM  | 00000000:01:00.0  On |                  N/A |
-   |  0%   51C    P8              12W / 200W |   1489MiB /  8192MiB |      5%      Default |
-   |                                         |                      |                  N/A |
-   +-----------------------------------------+----------------------+----------------------+
-   ```
-
-### 2. Install the Driver
-   If no driver is installed, use the following command:
-   ```sh
-   sudo apt-get update
-   sudo apt-get install nvidia-driver-545
-   ```
-   Install the proprietary driver and restart your computer after installation.
-   ```sh
-   reboot
-   ```
-
-### 3. Install Anaconda
-   If Anaconda is already installed, skip this step.
-   ```sh
-   wget https://repo.anaconda.com/archive/Anaconda3-2024.06-1-Linux-x86_64.sh
-   bash Anaconda3-2024.06-1-Linux-x86_64.sh
-   ```
-   In the final step, enter `yes`, close the terminal, and reopen it.
-
-### 4. Create an Environment Using Conda
-   Specify Python version 3.10.
-   ```sh
-   conda create -n MinerU python=3.10
-   conda activate MinerU
-   ```
-
-### 5. Install Applications
-   ```sh
-   pip install -U magic-pdf[full] --extra-index-url https://wheels.myhloli.com
-   ```
-❗ After installation, make sure to check the version of `magic-pdf` using the following command:
-   ```sh
-   magic-pdf --version
-   ```
-   If the version number is less than 0.7.0, please report the issue.
-
-### 6. Download Models
-   Refer to detailed instructions on [how to download model files](how_to_download_models_en.md).  
-   After downloading, move the `models` directory to an SSD with more space.
-   
-❗ After downloading the models, ensure they are complete:
-   - Check that the file sizes match the description on the website.
-   - If possible, verify the integrity using SHA256.
-
-### 7. Configuration Before First Run
-   Obtain the configuration template file `magic-pdf.template.json` from the root directory of the repository.
-   
-❗ Execute the following command to copy the configuration file to your home directory, otherwise the program will not run:
-   ```sh
-   wget https://github.com/opendatalab/MinerU/raw/master/magic-pdf.template.json
-   cp magic-pdf.template.json ~/magic-pdf.json
-   ```
-   Find the `magic-pdf.json` file in your home directory and configure `"models-dir"` to be the directory where the model weights from Step 6 were downloaded.
-   
-❗ Correctly specify the absolute path of the directory containing the model weights; otherwise, the program will fail due to missing model files.
-   ```json
-   {
-     "models-dir": "/tmp/models"
-   }
-   ```
-
-### 8. First Run
-   Download a sample file from the repository and test it.
-   ```sh
-   wget https://github.com/opendatalab/MinerU/raw/master/demo/small_ocr.pdf
-   magic-pdf -p small_ocr.pdf
-   ```
-
-### 9. Test CUDA Acceleration
-
-If your graphics card has at least 8GB of VRAM, follow these steps to test CUDA acceleration:
-
-1. Modify the value of `"device-mode"` in the `magic-pdf.json` configuration file located in your home directory.
-   ```json
-   {
-     "device-mode": "cuda"
-   }
-   ```
-2. Test CUDA acceleration with the following command:
-   ```sh
-   magic-pdf -p small_ocr.pdf
-   ```
-
-### 10. Enable CUDA Acceleration for OCR
-
-❗ The following operations require a graphics card with at least 16GB of VRAM; otherwise, the program may crash or experience reduced performance.
-    
-1. Download `paddlepaddle-gpu`. Installation will automatically enable OCR acceleration.
-   ```sh
-   python -m pip install paddlepaddle-gpu==3.0.0b1 -i https://www.paddlepaddle.org.cn/packages/stable/cu118/
-   ```
-2. Test OCR acceleration with the following command:
-   ```sh
-   magic-pdf -p small_ocr.pdf
-   ```

+ 0 - 104
docs/README_Windows_CUDA_Acceleration_en_US.md

@@ -1,104 +0,0 @@
-# Windows 10/11
-
-### 1. Install CUDA and cuDNN
-Required versions: CUDA 11.8 + cuDNN 8.7.0
-   - CUDA 11.8: https://developer.nvidia.com/cuda-11-8-0-download-archive
-   - cuDNN v8.7.0 (November 28th, 2022), for CUDA 11.x: https://developer.nvidia.com/rdp/cudnn-archive
-   
-### 2. Install Anaconda
-   If Anaconda is already installed, you can skip this step.
-   
-Download link: https://repo.anaconda.com/archive/Anaconda3-2024.06-1-Windows-x86_64.exe
-
-### 3. Create an Environment Using Conda
-   Python version must be 3.10.
-   ```
-   conda create -n MinerU python=3.10
-   conda activate MinerU
-   ```
-
-### 4. Install Applications
-   ```
-   pip install -U magic-pdf[full] --extra-index-url https://wheels.myhloli.com
-   ```
-   >❗️After installation, verify the version of `magic-pdf`:
-   >  ```bash
-   >  magic-pdf --version
-   >  ```
-   > If the version number is less than 0.7.0, please report it in the issues section.
-   
-### 5. Download Models
-   Refer to detailed instructions on [how to download model files](how_to_download_models_en.md).  
-   After downloading, move the `models` directory to an SSD with more space.
-   
-   >❗ After downloading the models, ensure they are complete:
-   >- Check that the file sizes match the description on the website.
-   >- If possible, verify the integrity using SHA256.
-
-### 6. Configuration Before the First Run
-   Obtain the configuration template file `magic-pdf.template.json` from the repository root directory.
-    
-   >❗️Execute the following command to copy the configuration file to your user directory, or the program will not run.
-   >   
-   > In Windows, user directory is "C:\Users\username"
-   
-   ```powershell
-     (New-Object System.Net.WebClient).DownloadFile('https://github.com/opendatalab/MinerU/raw/master/magic-pdf.template.json', 'magic-pdf.template.json')
-     cp magic-pdf.template.json ~/magic-pdf.json
-   ```
-
-   Find the `magic-pdf.json` file in your user directory and configure `"models-dir"` to point to the directory where the model weights from step 5 were downloaded.
-   
-   > ❗️Ensure the absolute path of the model weights directory is correctly configured, or the program will fail to run due to not finding the model files.
-   >    
-   > In Windows, this path should include the drive letter and replace all `"\"` to `"/"`.
-   >   
-   > Example: If the models are placed in the root directory of drive D, the value for `model-dir` should be `"D:/models"`.
-   
-   ```json
-   {
-     "models-dir": "/tmp/models"
-   }
-   ```
-
-### 7. First Run
-   Download a sample file from the repository and test it.
-   ```powershell
-     (New-Object System.Net.WebClient).DownloadFile('https://github.com/opendatalab/MinerU/raw/master/demo/small_ocr.pdf', 'small_ocr.pdf')
-     magic-pdf -p small_ocr.pdf
-   ```
-
-### 8. Test CUDA Acceleration
-   If your graphics card has at least 8GB of VRAM, follow these steps to test CUDA-accelerated parsing performance.
-   1. **Overwrite the installation of torch and torchvision** supporting CUDA.
-      ```
-      pip install --force-reinstall torch==2.3.1 torchvision==0.18.1 --index-url https://download.pytorch.org/whl/cu118
-      ```
-      >❗️Ensure the following versions are specified in the command:
-      >```
-      > torch==2.3.1 torchvision==0.18.1
-      >```
-      >These are the highest versions we support. Installing higher versions without specifying them will cause the program to fail.
-   2. **Modify the value of `"device-mode"`** in the `magic-pdf.json` configuration file located in your user directory.
-     
-      ```json
-      {
-        "device-mode": "cuda"
-      }
-      ```
-   3. **Run the following command to test CUDA acceleration**:
-
-      ```
-      magic-pdf -p small_ocr.pdf
-      ```
-
-### 9. Enable CUDA Acceleration for OCR
-   >❗️This operation requires at least 16GB of VRAM on your graphics card, otherwise it will cause the program to crash or slow down.
-   1. **Download paddlepaddle-gpu**, which will automatically enable OCR acceleration upon installation.
-      ```
-      pip install paddlepaddle-gpu==2.6.1
-      ```
-   2. **Run the following command to test OCR acceleration**:
-      ```
-      magic-pdf -p small_ocr.pdf
-      ```

+ 0 - 4
docs/download_models.py

@@ -1,4 +0,0 @@
-# use modelscope sdk download models
-from modelscope import snapshot_download
-model_dir = snapshot_download('opendatalab/PDF-Extract-Kit')
-print(f"model dir is: {model_dir}/models")

+ 16 - 0
docs/en/.readthedocs.yaml

@@ -0,0 +1,16 @@
+version: 2
+
+build:
+  os: ubuntu-22.04
+  tools:
+    python: "3.10"
+
+formats:
+  - epub
+
+python:
+  install:
+    - requirements: docs/requirements.txt
+
+sphinx:
+  configuration: docs/en/conf.py

+ 20 - 0
docs/en/Makefile

@@ -0,0 +1,20 @@
+# Minimal makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line, and also
+# from the environment for the first two.
+SPHINXOPTS    ?=
+SPHINXBUILD   ?= sphinx-build
+SOURCEDIR     = .
+BUILDDIR      = _build
+
+# Put it first so that "make" without argument is like "make help".
+help:
+	@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
+
+.PHONY: help Makefile
+
+# Catch-all target: route all unknown targets to Sphinx using the new
+# "make mode" option.  $(O) is meant as a shortcut for $(SPHINXOPTS).
+%: Makefile
+	@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)

+ 0 - 0
docs/images/MinerU-logo.png → docs/en/_static/image/logo.png


+ 122 - 0
docs/en/conf.py

@@ -0,0 +1,122 @@
+# Configuration file for the Sphinx documentation builder.
+#
+# This file only contains a selection of the most common options. For a full
+# list see the documentation:
+# https://www.sphinx-doc.org/en/master/usage/configuration.html
+
+# -- Path setup --------------------------------------------------------------
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+
+import os
+import subprocess
+import sys
+
+from sphinx.ext import autodoc
+
+
+def install(package):
+    subprocess.check_call([sys.executable, '-m', 'pip', 'install', package])
+
+
+requirements_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'requirements.txt'))
+if os.path.exists(requirements_path):
+    with open(requirements_path) as f:
+        packages = f.readlines()
+    for package in packages:
+        install(package.strip())
+
+sys.path.insert(0, os.path.abspath('../..'))
+
+# -- Project information -----------------------------------------------------
+
+project = 'MinerU'
+copyright = '2024, MinerU Contributors'
+author = 'OpenDataLab'
+
+# The full version, including alpha/beta/rc tags
+version_file = '../../magic_pdf/libs/version.py'
+with open(version_file) as f:
+    exec(compile(f.read(), version_file, 'exec'))
+__version__ = locals()['__version__']
+# The short X.Y version
+version = __version__
+# The full version, including alpha/beta/rc tags
+release = __version__
+
+# -- General configuration ---------------------------------------------------
+
+# Add any Sphinx extension module names here, as strings. They can be
+# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
+# ones.
+extensions = [
+    'sphinx.ext.napoleon',
+    'sphinx.ext.viewcode',
+    'sphinx.ext.intersphinx',
+    'sphinx_copybutton',
+    'sphinx.ext.autodoc',
+    'sphinx.ext.autosummary',
+    'myst_parser',
+    'sphinxarg.ext',
+]
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['_templates']
+
+# List of patterns, relative to source directory, that match files and
+# directories to ignore when looking for source files.
+# This pattern also affects html_static_path and html_extra_path.
+exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
+
+# Exclude the prompt "$" when copying code
+copybutton_prompt_text = r'\$ '
+copybutton_prompt_is_regexp = True
+
+language = 'en'
+
+# -- Options for HTML output -------------------------------------------------
+
+# The theme to use for HTML and HTML Help pages.  See the documentation for
+# a list of builtin themes.
+#
+html_theme = 'sphinx_book_theme'
+html_logo = '_static/image/logo.png'
+html_theme_options = {
+    'path_to_docs': 'docs/en',
+    'repository_url': 'https://github.com/opendatalab/MinerU',
+    'use_repository_button': True,
+}
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+# html_static_path = ['_static']
+
+# Mock out external dependencies here.
+autodoc_mock_imports = [
+    'cpuinfo',
+    'torch',
+    'transformers',
+    'psutil',
+    'prometheus_client',
+    'sentencepiece',
+    'vllm.cuda_utils',
+    'vllm._C',
+    'numpy',
+    'tqdm',
+]
+
+
+class MockedClassDocumenter(autodoc.ClassDocumenter):
+    """Remove note about base class when a class is derived from object."""
+
+    def add_line(self, line: str, source: str, *lineno: int) -> None:
+        if line == '   Bases: :py:class:`object`':
+            return
+        super().add_line(line, source, *lineno)
+
+
+autodoc.ClassDocumenter = MockedClassDocumenter
+
+navigation_with_keys = False

+ 26 - 0
docs/en/index.rst

@@ -0,0 +1,26 @@
+.. xtuner documentation master file, created by
+   sphinx-quickstart on Tue Jan  9 16:33:06 2024.
+   You can adapt this file completely to your liking, but it should at least
+   contain the root `toctree` directive.
+
+Welcome to the MinerU Documentation
+==============================================
+
+.. figure:: ./_static/image/logo.png
+  :align: center
+  :alt: mineru
+  :class: no-scaled-link
+
+.. raw:: html
+
+   <p style="text-align:center">
+   <strong>A one-stop, open-source, high-quality data extraction tool
+   </strong>
+   </p>
+
+   <p style="text-align:center">
+   <script async defer src="https://buttons.github.io/buttons.js"></script>
+   <a class="github-button" href="https://github.com/opendatalab/MinerU" data-show-count="true" data-size="large" aria-label="Star">Star</a>
+   <a class="github-button" href="https://github.com/opendatalab/MinerU/subscription" data-icon="octicon-eye" data-size="large" aria-label="Watch">Watch</a>
+   <a class="github-button" href="https://github.com/opendatalab/MinerU/fork" data-icon="octicon-repo-forked" data-size="large" aria-label="Fork">Fork</a>
+   </p>

+ 35 - 0
docs/en/make.bat

@@ -0,0 +1,35 @@
+@ECHO OFF
+
+pushd %~dp0
+
+REM Command file for Sphinx documentation
+
+if "%SPHINXBUILD%" == "" (
+	set SPHINXBUILD=sphinx-build
+)
+set SOURCEDIR=.
+set BUILDDIR=_build
+
+%SPHINXBUILD% >NUL 2>NUL
+if errorlevel 9009 (
+	echo.
+	echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
+	echo.installed, then set the SPHINXBUILD environment variable to point
+	echo.to the full path of the 'sphinx-build' executable. Alternatively you
+	echo.may add the Sphinx directory to PATH.
+	echo.
+	echo.If you don't have Sphinx installed, grab it from
+	echo.https://www.sphinx-doc.org/
+	exit /b 1
+)
+
+if "%1" == "" goto help
+
+%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
+goto end
+
+:help
+%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
+
+:end
+popd

+ 0 - 63
docs/how_to_download_models_en.md

@@ -1,63 +0,0 @@
-### 1. Download the Model from Hugging Face
-Use a Python Script to Download Model Files from Hugging Face
-```bash
-pip install huggingface_hub
-wget https://github.com/opendatalab/MinerU/raw/master/docs/download_models_hf.py
-python download_models_hf.py
-```
-After the Python script finishes executing, it will output the directory where the models are downloaded.
-### 2. Additional steps
-
-#### 1. Check whether the model directory is downloaded completely.
-
-The structure of the model folder is as follows, including configuration files and weight files of different components:
-```
-../
-├── Layout
-│   ├── config.json
-│   └── model_final.pth
-├── MFD
-│   └── weights.pt
-├── MFR
-│   └── UniMERNet
-│       ├── config.json
-│       ├── preprocessor_config.json
-│       ├── pytorch_model.bin
-│       ├── README.md
-│       ├── tokenizer_config.json
-│       └── tokenizer.json
-│── TabRec
-│   └─StructEqTable
-│       ├── config.json
-│       ├── generation_config.json
-│       ├── model.safetensors
-│       ├── preprocessor_config.json
-│       ├── special_tokens_map.json
-│       ├── spiece.model
-│       ├── tokenizer.json
-│       └── tokenizer_config.json 
-│   └─ TableMaster 
-│       └─ ch_PP-OCRv3_det_infer
-│           ├── inference.pdiparams
-│           ├── inference.pdiparams.info
-│           └── inference.pdmodel
-│       └─ ch_PP-OCRv3_rec_infer
-│           ├── inference.pdiparams
-│           ├── inference.pdiparams.info
-│           └── inference.pdmodel
-│       └─ table_structure_tablemaster_infer
-│           ├── inference.pdiparams
-│           ├── inference.pdiparams.info
-│           └── inference.pdmodel
-│       ├── ppocr_keys_v1.txt
-│       └── table_master_structure_dict.txt
-└── README.md
-```
-#### 2. Check whether the model file is fully downloaded.
-
-Please check whether the size of the model file in the directory is consistent with the description on the web page. If possible, it is best to check whether the model is downloaded completely through sha256.
-
-#### 3. 
-
-Additionally, in `~/magic-pdf.json`, update the model directory path to the absolute path of the `models` directory output by the previous Python script. Otherwise, you will encounter an error indicating that the model cannot be loaded.
-

+ 0 - 75
docs/how_to_download_models_zh_cn.md

@@ -1,75 +0,0 @@
-# 如何下载模型文件
-
-模型文件可以从 Hugging Face 或 Model Scope 下载,由于网络原因,国内用户访问HF可能会失败,请使用 ModelScope。
-
-<details>
-  <summary>方法一:从 Hugging Face 下载模型</summary>
-  <p>使用python脚本 从Hugging Face下载模型文件</p>
-  <pre><code>pip install huggingface_hub
-wget https://gitee.com/myhloli/MinerU/raw/master/docs/download_models_hf.py
-python download_models_hf.py</code></pre>
-  <p>python脚本执行完毕后,会输出模型下载目录</p>
-</details>
-
-## 方法二:从 ModelScope 下载模型
-
-### 使用python脚本 从ModelScope下载模型文件
-
-```bash
-pip install modelscope
-wget https://gitee.com/myhloli/MinerU/raw/master/docs/download_models.py
-python download_models.py
-```
-python脚本执行完毕后,会输出模型下载目录
-## 【❗️必须要做❗️】的额外步骤(模型下载完成后请务必完成以下操作)
-
-### 1.检查模型目录是否下载完整
-模型文件夹的结构如下,包含了不同组件的配置文件和权重文件:
-```
-./
-├── Layout  # 布局检测模型
-│   ├── config.json
-│   └── model_final.pth
-├── MFD  # 公式检测
-│   └── weights.pt
-├── MFR  # 公式识别模型
-│   └── UniMERNet
-│       ├── config.json
-│       ├── preprocessor_config.json
-│       ├── pytorch_model.bin
-│       ├── README.md
-│       ├── tokenizer_config.json
-│       └── tokenizer.json
-│── TabRec # 表格识别模型
-│   └─StructEqTable
-│       ├── config.json
-│       ├── generation_config.json
-│       ├── model.safetensors
-│       ├── preprocessor_config.json
-│       ├── special_tokens_map.json
-│       ├── spiece.model
-│       ├── tokenizer.json
-│       └── tokenizer_config.json 
-│   └─ TableMaster 
-│       └─ ch_PP-OCRv3_det_infer
-│           ├── inference.pdiparams
-│           ├── inference.pdiparams.info
-│           └── inference.pdmodel
-│       └─ ch_PP-OCRv3_rec_infer
-│           ├── inference.pdiparams
-│           ├── inference.pdiparams.info
-│           └── inference.pdmodel
-│       └─ table_structure_tablemaster_infer
-│           ├── inference.pdiparams
-│           ├── inference.pdiparams.info
-│           └── inference.pdmodel
-│       ├── ppocr_keys_v1.txt
-│       └── table_master_structure_dict.txt
-└── README.md
-```
-
-### 2.检查模型文件是否下载完整
-请检查目录下的模型文件大小与网页上描述是否一致,如果可以的话,最好通过sha256校验模型是否下载完整
-
-### 3.修改magic-pdf.json中的模型路径
-此外在 `~/magic-pdf.json`里修改模型的目录指向之前python脚本输出的models目录的绝对路径,否则会报模型无法加载的错误。

+ 6 - 0
docs/requirements.txt

@@ -0,0 +1,6 @@
+myst-parser
+sphinx
+sphinx-argparse
+sphinx-book-theme
+sphinx-copybutton
+sphinx_rtd_theme

+ 16 - 0
docs/zh_cn/.readthedocs.yaml

@@ -0,0 +1,16 @@
+version: 2
+
+build:
+  os: ubuntu-22.04
+  tools:
+    python: "3.10"
+
+formats:
+  - epub
+
+python:
+  install:
+    - requirements: docs/requirements.txt
+
+sphinx:
+  configuration: docs/zh_cn/conf.py

+ 20 - 0
docs/zh_cn/Makefile

@@ -0,0 +1,20 @@
+# Minimal makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line, and also
+# from the environment for the first two.
+SPHINXOPTS    ?=
+SPHINXBUILD   ?= sphinx-build
+SOURCEDIR     = .
+BUILDDIR      = _build
+
+# Put it first so that "make" without argument is like "make help".
+help:
+	@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
+
+.PHONY: help Makefile
+
+# Catch-all target: route all unknown targets to Sphinx using the new
+# "make mode" option.  $(O) is meant as a shortcut for $(SPHINXOPTS).
+%: Makefile
+	@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)

BIN
docs/zh_cn/_static/image/logo.png


+ 122 - 0
docs/zh_cn/conf.py

@@ -0,0 +1,122 @@
+# Configuration file for the Sphinx documentation builder.
+#
+# This file only contains a selection of the most common options. For a full
+# list see the documentation:
+# https://www.sphinx-doc.org/en/master/usage/configuration.html
+
+# -- Path setup --------------------------------------------------------------
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+
+import os
+import subprocess
+import sys
+
+from sphinx.ext import autodoc
+
+
+def install(package):
+    subprocess.check_call([sys.executable, '-m', 'pip', 'install', package])
+
+
+requirements_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'requirements.txt'))
+if os.path.exists(requirements_path):
+    with open(requirements_path) as f:
+        packages = f.readlines()
+    for package in packages:
+        install(package.strip())
+
+sys.path.insert(0, os.path.abspath('../..'))
+
+# -- Project information -----------------------------------------------------
+
+project = 'MinerU'
+copyright = '2024, OpenDataLab'
+author = 'MinerU Contributors'
+
+# The full version, including alpha/beta/rc tags
+version_file = '../../magic_pdf/libs/version.py'
+with open(version_file) as f:
+    exec(compile(f.read(), version_file, 'exec'))
+__version__ = locals()['__version__']
+# The short X.Y version
+version = __version__
+# The full version, including alpha/beta/rc tags
+release = __version__
+
+# -- General configuration ---------------------------------------------------
+
+# Add any Sphinx extension module names here, as strings. They can be
+# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
+# ones.
+extensions = [
+    'sphinx.ext.napoleon',
+    'sphinx.ext.viewcode',
+    'sphinx.ext.intersphinx',
+    'sphinx_copybutton',
+    'sphinx.ext.autodoc',
+    'sphinx.ext.autosummary',
+    'myst_parser',
+    'sphinxarg.ext',
+]
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['_templates']
+
+# List of patterns, relative to source directory, that match files and
+# directories to ignore when looking for source files.
+# This pattern also affects html_static_path and html_extra_path.
+exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
+
+# Exclude the prompt "$" when copying code
+copybutton_prompt_text = r'\$ '
+copybutton_prompt_is_regexp = True
+
+language = 'zh_CN'
+
+# -- Options for HTML output -------------------------------------------------
+
+# The theme to use for HTML and HTML Help pages.  See the documentation for
+# a list of builtin themes.
+#
+html_theme = 'sphinx_book_theme'
+html_logo = '_static/image/logo.png'
+html_theme_options = {
+    'path_to_docs': 'docs/zh_cn',
+    'repository_url': 'https://github.com/opendatalab/MinerU',
+    'use_repository_button': True,
+}
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+# html_static_path = ['_static']
+
+# Mock out external dependencies here.
+autodoc_mock_imports = [
+    'cpuinfo',
+    'torch',
+    'transformers',
+    'psutil',
+    'prometheus_client',
+    'sentencepiece',
+    'vllm.cuda_utils',
+    'vllm._C',
+    'numpy',
+    'tqdm',
+]
+
+
+class MockedClassDocumenter(autodoc.ClassDocumenter):
+    """Remove note about base class when a class is derived from object."""
+
+    def add_line(self, line: str, source: str, *lineno: int) -> None:
+        if line == '   Bases: :py:class:`object`':
+            return
+        super().add_line(line, source, *lineno)
+
+
+autodoc.ClassDocumenter = MockedClassDocumenter
+
+navigation_with_keys = False

+ 26 - 0
docs/zh_cn/index.rst

@@ -0,0 +1,26 @@
+.. xtuner documentation master file, created by
+   sphinx-quickstart on Tue Jan  9 16:33:06 2024.
+   You can adapt this file completely to your liking, but it should at least
+   contain the root `toctree` directive.
+
+欢迎来到 MinerU 的中文文档
+==============================================
+
+.. figure:: ./_static/image/logo.png
+  :align: center
+  :alt: mineru
+  :class: no-scaled-link
+
+.. raw:: html
+
+   <p style="text-align:center">
+   <strong> 一站式开源高质量数据提取工具
+   </strong>
+   </p>
+
+   <p style="text-align:center">
+   <script async defer src="https://buttons.github.io/buttons.js"></script>
+   <a class="github-button" href="https://github.com/opendatalab/MinerU" data-show-count="true" data-size="large" aria-label="Star">Star</a>
+   <a class="github-button" href="https://github.com/opendatalab/MinerU/subscription" data-icon="octicon-eye" data-size="large" aria-label="Watch">Watch</a>
+   <a class="github-button" href="https://github.com/opendatalab/MinerU/fork" data-icon="octicon-repo-forked" data-size="large" aria-label="Fork">Fork</a>
+   </p>

+ 35 - 0
docs/zh_cn/make.bat

@@ -0,0 +1,35 @@
+@ECHO OFF
+
+pushd %~dp0
+
+REM Command file for Sphinx documentation
+
+if "%SPHINXBUILD%" == "" (
+	set SPHINXBUILD=sphinx-build
+)
+set SOURCEDIR=.
+set BUILDDIR=_build
+
+%SPHINXBUILD% >NUL 2>NUL
+if errorlevel 9009 (
+	echo.
+	echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
+	echo.installed, then set the SPHINXBUILD environment variable to point
+	echo.to the full path of the 'sphinx-build' executable. Alternatively you
+	echo.may add the Sphinx directory to PATH.
+	echo.
+	echo.If you don't have Sphinx installed, grab it from
+	echo.https://www.sphinx-doc.org/
+	exit /b 1
+)
+
+if "%1" == "" goto help
+
+%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
+goto end
+
+:help
+%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
+
+:end
+popd

+ 1 - 0
magic-pdf.template.json

@@ -4,6 +4,7 @@
         "bucket-name-2":["ak", "sk", "endpoint"]
     },
     "models-dir":"/tmp/models",
+    "layoutreader-model-dir":"/tmp/layoutreader",
     "device-mode":"cpu",
     "table-config": {
         "model": "TableMaster",

+ 69 - 44
magic_pdf/dict2md/ocr_mkcontent.py

@@ -8,6 +8,7 @@ from magic_pdf.libs.language import detect_lang
 from magic_pdf.libs.MakeContentConfig import DropMode, MakeMode
 from magic_pdf.libs.markdown_utils import ocr_escape_special_markdown_char
 from magic_pdf.libs.ocr_content_type import BlockType, ContentType
+from magic_pdf.para.para_split_v3 import ListLineTag
 
 
 def __is_hyphen_at_line_end(line):
@@ -116,17 +117,20 @@ def ocr_mk_markdown_with_para_core(paras_of_layout, mode, img_buket_path=''):
 
 def ocr_mk_markdown_with_para_core_v2(paras_of_layout,
                                       mode,
-                                      img_buket_path=''):
+                                      img_buket_path='',
+                                      parse_type="auto",
+                                      lang=None
+                                      ):
     page_markdown = []
     for para_block in paras_of_layout:
         para_text = ''
         para_type = para_block['type']
-        if para_type == BlockType.Text:
-            para_text = merge_para_with_text(para_block)
+        if para_type in [BlockType.Text, BlockType.List, BlockType.Index]:
+            para_text = merge_para_with_text(para_block, parse_type=parse_type, lang=lang)
         elif para_type == BlockType.Title:
-            para_text = f'# {merge_para_with_text(para_block)}'
+            para_text = f'# {merge_para_with_text(para_block, parse_type=parse_type, lang=lang)}'
         elif para_type == BlockType.InterlineEquation:
-            para_text = merge_para_with_text(para_block)
+            para_text = merge_para_with_text(para_block, parse_type=parse_type, lang=lang)
         elif para_type == BlockType.Image:
             if mode == 'nlp':
                 continue
@@ -139,17 +143,17 @@ def ocr_mk_markdown_with_para_core_v2(paras_of_layout,
                                     para_text += f"\n![]({join_path(img_buket_path, span['image_path'])})  \n"
                 for block in para_block['blocks']:  # 2nd.拼image_caption
                     if block['type'] == BlockType.ImageCaption:
-                        para_text += merge_para_with_text(block)
+                        para_text += merge_para_with_text(block, parse_type=parse_type, lang=lang)
                 for block in para_block['blocks']:  # 2nd.拼image_caption
                     if block['type'] == BlockType.ImageFootnote:
-                        para_text += merge_para_with_text(block)
+                        para_text += merge_para_with_text(block, parse_type=parse_type, lang=lang)
         elif para_type == BlockType.Table:
             if mode == 'nlp':
                 continue
             elif mode == 'mm':
                 for block in para_block['blocks']:  # 1st.拼table_caption
                     if block['type'] == BlockType.TableCaption:
-                        para_text += merge_para_with_text(block)
+                        para_text += merge_para_with_text(block, parse_type=parse_type, lang=lang)
                 for block in para_block['blocks']:  # 2nd.拼table_body
                     if block['type'] == BlockType.TableBody:
                         for line in block['lines']:
@@ -164,7 +168,7 @@ def ocr_mk_markdown_with_para_core_v2(paras_of_layout,
                                         para_text += f"\n![]({join_path(img_buket_path, span['image_path'])})  \n"
                 for block in para_block['blocks']:  # 3rd.拼table_footnote
                     if block['type'] == BlockType.TableFootnote:
-                        para_text += merge_para_with_text(block)
+                        para_text += merge_para_with_text(block, parse_type=parse_type, lang=lang)
 
         if para_text.strip() == '':
             continue
@@ -174,22 +178,26 @@ def ocr_mk_markdown_with_para_core_v2(paras_of_layout,
     return page_markdown
 
 
-def merge_para_with_text(para_block):
-
-    def detect_language(text):
-        en_pattern = r'[a-zA-Z]+'
-        en_matches = re.findall(en_pattern, text)
-        en_length = sum(len(match) for match in en_matches)
-        if len(text) > 0:
-            if en_length / len(text) >= 0.5:
-                return 'en'
-            else:
-                return 'unknown'
+def detect_language(text):
+    en_pattern = r'[a-zA-Z]+'
+    en_matches = re.findall(en_pattern, text)
+    en_length = sum(len(match) for match in en_matches)
+    if len(text) > 0:
+        if en_length / len(text) >= 0.5:
+            return 'en'
         else:
-            return 'empty'
+            return 'unknown'
+    else:
+        return 'empty'
 
+
+def merge_para_with_text(para_block, parse_type="auto", lang=None):
     para_text = ''
-    for line in para_block['lines']:
+    for i, line in enumerate(para_block['lines']):
+
+        if i >= 1 and line.get(ListLineTag.IS_LIST_START_LINE, False):
+            para_text += '  \n'
+
         line_text = ''
         line_lang = ''
         for span in line['spans']:
@@ -205,11 +213,15 @@ def merge_para_with_text(para_block):
                 content = span['content']
                 # language = detect_lang(content)
                 language = detect_language(content)
-                if language == 'en':  # 只对英文长词进行分词处理,中文分词会丢失文本
-                    content = ocr_escape_special_markdown_char(
-                        split_long_words(content))
-                else:
+                # 判断是否小语种
+                if lang is not None and lang != 'en':
                     content = ocr_escape_special_markdown_char(content)
+                else:  # 非小语种逻辑
+                    if language == 'en' and parse_type == 'ocr':  # 只对英文长词进行分词处理,中文分词会丢失文本
+                        content = ocr_escape_special_markdown_char(
+                            split_long_words(content))
+                    else:
+                        content = ocr_escape_special_markdown_char(content)
             elif span_type == ContentType.InlineEquation:
                 content = f" ${span['content']}$ "
             elif span_type == ContentType.InterlineEquation:
@@ -265,41 +277,39 @@ def para_to_standard_format(para, img_buket_path):
     return para_content
 
 
-def para_to_standard_format_v2(para_block, img_buket_path, page_idx):
+def para_to_standard_format_v2(para_block, img_buket_path, page_idx, parse_type="auto", lang=None, drop_reason=None):
     para_type = para_block['type']
+    para_content = {}
     if para_type == BlockType.Text:
         para_content = {
             'type': 'text',
-            'text': merge_para_with_text(para_block),
-            'page_idx': page_idx,
+            'text': merge_para_with_text(para_block, parse_type=parse_type, lang=lang),
         }
     elif para_type == BlockType.Title:
         para_content = {
             'type': 'text',
-            'text': merge_para_with_text(para_block),
+            'text': merge_para_with_text(para_block, parse_type=parse_type, lang=lang),
             'text_level': 1,
-            'page_idx': page_idx,
         }
     elif para_type == BlockType.InterlineEquation:
         para_content = {
             'type': 'equation',
-            'text': merge_para_with_text(para_block),
+            'text': merge_para_with_text(para_block, parse_type=parse_type, lang=lang),
             'text_format': 'latex',
-            'page_idx': page_idx,
         }
     elif para_type == BlockType.Image:
-        para_content = {'type': 'image', 'page_idx': page_idx}
+        para_content = {'type': 'image'}
         for block in para_block['blocks']:
             if block['type'] == BlockType.ImageBody:
                 para_content['img_path'] = join_path(
                     img_buket_path,
                     block['lines'][0]['spans'][0]['image_path'])
             if block['type'] == BlockType.ImageCaption:
-                para_content['img_caption'] = merge_para_with_text(block)
+                para_content['img_caption'] = merge_para_with_text(block, parse_type=parse_type, lang=lang)
             if block['type'] == BlockType.ImageFootnote:
-                para_content['img_footnote'] = merge_para_with_text(block)
+                para_content['img_footnote'] = merge_para_with_text(block, parse_type=parse_type, lang=lang)
     elif para_type == BlockType.Table:
-        para_content = {'type': 'table', 'page_idx': page_idx}
+        para_content = {'type': 'table'}
         for block in para_block['blocks']:
             if block['type'] == BlockType.TableBody:
                 if block["lines"][0]["spans"][0].get('latex', ''):
@@ -308,9 +318,14 @@ def para_to_standard_format_v2(para_block, img_buket_path, page_idx):
                     para_content['table_body'] = f"\n\n{block['lines'][0]['spans'][0]['html']}\n\n"
                 para_content['img_path'] = join_path(img_buket_path, block["lines"][0]["spans"][0]['image_path'])
             if block['type'] == BlockType.TableCaption:
-                para_content['table_caption'] = merge_para_with_text(block)
+                para_content['table_caption'] = merge_para_with_text(block, parse_type=parse_type, lang=lang)
             if block['type'] == BlockType.TableFootnote:
-                para_content['table_footnote'] = merge_para_with_text(block)
+                para_content['table_footnote'] = merge_para_with_text(block, parse_type=parse_type, lang=lang)
+
+    para_content['page_idx'] = page_idx
+
+    if drop_reason is not None:
+        para_content['drop_reason'] = drop_reason
 
     return para_content
 
@@ -394,13 +409,19 @@ def ocr_mk_mm_standard_format(pdf_info_dict: list):
 def union_make(pdf_info_dict: list,
                make_mode: str,
                drop_mode: str,
-               img_buket_path: str = ''):
+               img_buket_path: str = '',
+               parse_type: str = "auto",
+               lang=None):
     output_content = []
     for page_info in pdf_info_dict:
+        drop_reason_flag = False
+        drop_reason = None
         if page_info.get('need_drop', False):
             drop_reason = page_info.get('drop_reason')
             if drop_mode == DropMode.NONE:
                 pass
+            elif drop_mode == DropMode.NONE_WITH_REASON:
+                drop_reason_flag = True
             elif drop_mode == DropMode.WHOLE_PDF:
                 raise Exception((f'drop_mode is {DropMode.WHOLE_PDF} ,'
                                  f'drop_reason is {drop_reason}'))
@@ -417,16 +438,20 @@ def union_make(pdf_info_dict: list,
             continue
         if make_mode == MakeMode.MM_MD:
             page_markdown = ocr_mk_markdown_with_para_core_v2(
-                paras_of_layout, 'mm', img_buket_path)
+                paras_of_layout, 'mm', img_buket_path, parse_type=parse_type, lang=lang)
             output_content.extend(page_markdown)
         elif make_mode == MakeMode.NLP_MD:
             page_markdown = ocr_mk_markdown_with_para_core_v2(
-                paras_of_layout, 'nlp')
+                paras_of_layout, 'nlp', parse_type=parse_type, lang=lang)
             output_content.extend(page_markdown)
         elif make_mode == MakeMode.STANDARD_FORMAT:
             for para_block in paras_of_layout:
-                para_content = para_to_standard_format_v2(
-                    para_block, img_buket_path, page_idx)
+                if drop_reason_flag:
+                    para_content = para_to_standard_format_v2(
+                        para_block, img_buket_path, page_idx, parse_type=parse_type, lang=lang, drop_reason=drop_reason)
+                else:
+                    para_content = para_to_standard_format_v2(
+                        para_block, img_buket_path, page_idx, parse_type=parse_type, lang=lang)
                 output_content.append(para_content)
     if make_mode in [MakeMode.MM_MD, MakeMode.NLP_MD]:
         return '\n\n'.join(output_content)

+ 1 - 0
magic_pdf/libs/MakeContentConfig.py

@@ -8,3 +8,4 @@ class DropMode:
     WHOLE_PDF = "whole_pdf"
     SINGLE_PAGE = "single_page"
     NONE = "none"
+    NONE_WITH_REASON = "none_with_reason"

BIN
magic_pdf/libs/__pycache__/__init__.cpython-312.pyc


BIN
magic_pdf/libs/__pycache__/version.cpython-312.pyc


+ 19 - 0
magic_pdf/libs/boxbase.py

@@ -426,3 +426,22 @@ def bbox_distance(bbox1, bbox2):
     elif top:
         return y2 - y1b
     return 0.0
+
+
+def box_area(bbox):
+    return (bbox[2] - bbox[0]) * (bbox[3] - bbox[1])
+
+
+def get_overlap_area(bbox1, bbox2):
+    """计算box1和box2的重叠面积占bbox1的比例."""
+    # Determine the coordinates of the intersection rectangle
+    x_left = max(bbox1[0], bbox2[0])
+    y_top = max(bbox1[1], bbox2[1])
+    x_right = min(bbox1[2], bbox2[2])
+    y_bottom = min(bbox1[3], bbox2[3])
+
+    if x_right < x_left or y_bottom < y_top:
+        return 0.0
+
+    # The area of overlap area
+    return (x_right - x_left) * (y_bottom - y_top)

+ 10 - 0
magic_pdf/libs/clean_memory.py

@@ -0,0 +1,10 @@
+# Copyright (c) Opendatalab. All rights reserved.
+import torch
+import gc
+
+
+def clean_memory():
+    if torch.cuda.is_available():
+        torch.cuda.empty_cache()
+        torch.cuda.ipc_collect()
+    gc.collect()

+ 12 - 0
magic_pdf/libs/config_reader.py

@@ -67,6 +67,18 @@ def get_local_models_dir():
         return models_dir
 
 
+def get_local_layoutreader_model_dir():
+    config = read_config()
+    layoutreader_model_dir = config.get("layoutreader-model-dir")
+    if layoutreader_model_dir is None or not os.path.exists(layoutreader_model_dir):
+        home_dir = os.path.expanduser("~")
+        layoutreader_at_modelscope_dir_path = os.path.join(home_dir, ".cache/modelscope/hub/ppaanngggg/layoutreader")
+        logger.warning(f"'layoutreader-model-dir' not exists, use {layoutreader_at_modelscope_dir_path} as default")
+        return layoutreader_at_modelscope_dir_path
+    else:
+        return layoutreader_model_dir
+
+
 def get_device():
     config = read_config()
     device = config.get("device-mode")

+ 101 - 36
magic_pdf/libs/draw_bbox.py

@@ -33,7 +33,7 @@ def draw_bbox_without_number(i, bbox_list, page, rgb_config, fill_config):
             )  # Draw the rectangle
 
 
-def draw_bbox_with_number(i, bbox_list, page, rgb_config, fill_config):
+def draw_bbox_with_number(i, bbox_list, page, rgb_config, fill_config, draw_bbox=True):
     new_rgb = []
     for item in rgb_config:
         item = float(item) / 255
@@ -42,31 +42,31 @@ def draw_bbox_with_number(i, bbox_list, page, rgb_config, fill_config):
     for j, bbox in enumerate(page_data):
         x0, y0, x1, y1 = bbox
         rect_coords = fitz.Rect(x0, y0, x1, y1)  # Define the rectangle
-        if fill_config:
-            page.draw_rect(
-                rect_coords,
-                color=None,
-                fill=new_rgb,
-                fill_opacity=0.3,
-                width=0.5,
-                overlay=True,
-            )  # Draw the rectangle
-        else:
-            page.draw_rect(
-                rect_coords,
-                color=new_rgb,
-                fill=None,
-                fill_opacity=1,
-                width=0.5,
-                overlay=True,
-            )  # Draw the rectangle
+        if draw_bbox:
+            if fill_config:
+                page.draw_rect(
+                    rect_coords,
+                    color=None,
+                    fill=new_rgb,
+                    fill_opacity=0.3,
+                    width=0.5,
+                    overlay=True,
+                )  # Draw the rectangle
+            else:
+                page.draw_rect(
+                    rect_coords,
+                    color=new_rgb,
+                    fill=None,
+                    fill_opacity=1,
+                    width=0.5,
+                    overlay=True,
+                )  # Draw the rectangle
         page.insert_text(
-            (x0, y0 + 10), str(j + 1), fontsize=10, color=new_rgb
+            (x1+2, y0 + 10), str(j + 1), fontsize=10, color=new_rgb
         )  # Insert the index in the top left corner of the rectangle
 
 
 def draw_layout_bbox(pdf_info, pdf_bytes, out_path, filename):
-    layout_bbox_list = []
     dropped_bbox_list = []
     tables_list, tables_body_list = [], []
     tables_caption_list, tables_footnote_list = [], []
@@ -75,17 +75,19 @@ def draw_layout_bbox(pdf_info, pdf_bytes, out_path, filename):
     titles_list = []
     texts_list = []
     interequations_list = []
+    lists_list = []
+    indexs_list = []
     for page in pdf_info:
-        page_layout_list = []
+
         page_dropped_list = []
         tables, tables_body, tables_caption, tables_footnote = [], [], [], []
         imgs, imgs_body, imgs_caption, imgs_footnote = [], [], [], []
         titles = []
         texts = []
         interequations = []
-        for layout in page['layout_bboxes']:
-            page_layout_list.append(layout['layout_bbox'])
-        layout_bbox_list.append(page_layout_list)
+        lists = []
+        indexs = []
+
         for dropped_bbox in page['discarded_blocks']:
             page_dropped_list.append(dropped_bbox['bbox'])
         dropped_bbox_list.append(page_dropped_list)
@@ -117,6 +119,11 @@ def draw_layout_bbox(pdf_info, pdf_bytes, out_path, filename):
                 texts.append(bbox)
             elif block['type'] == BlockType.InterlineEquation:
                 interequations.append(bbox)
+            elif block['type'] == BlockType.List:
+                lists.append(bbox)
+            elif block['type'] == BlockType.Index:
+                indexs.append(bbox)
+
         tables_list.append(tables)
         tables_body_list.append(tables_body)
         tables_caption_list.append(tables_caption)
@@ -128,10 +135,22 @@ def draw_layout_bbox(pdf_info, pdf_bytes, out_path, filename):
         titles_list.append(titles)
         texts_list.append(texts)
         interequations_list.append(interequations)
+        lists_list.append(lists)
+        indexs_list.append(indexs)
+
+    layout_bbox_list = []
+
+    for page in pdf_info:
+        page_block_list = []
+        for block in page['para_blocks']:
+            bbox = block['bbox']
+            page_block_list.append(bbox)
+        layout_bbox_list.append(page_block_list)
 
     pdf_docs = fitz.open('pdf', pdf_bytes)
+
     for i, page in enumerate(pdf_docs):
-        draw_bbox_with_number(i, layout_bbox_list, page, [255, 0, 0], False)
+
         draw_bbox_without_number(i, dropped_bbox_list, page, [158, 158, 158],
                                  True)
         draw_bbox_without_number(i, tables_list, page, [153, 153, 0],
@@ -146,12 +165,16 @@ def draw_layout_bbox(pdf_info, pdf_bytes, out_path, filename):
         draw_bbox_without_number(i, imgs_body_list, page, [153, 255, 51], True)
         draw_bbox_without_number(i, imgs_caption_list, page, [102, 178, 255],
                                  True)
-        draw_bbox_with_number(i, imgs_footnote_list, page, [255, 178, 102],
+        draw_bbox_without_number(i, imgs_footnote_list, page, [255, 178, 102],
                               True),
         draw_bbox_without_number(i, titles_list, page, [102, 102, 255], True)
         draw_bbox_without_number(i, texts_list, page, [153, 0, 76], True)
         draw_bbox_without_number(i, interequations_list, page, [0, 255, 0],
                                  True)
+        draw_bbox_without_number(i, lists_list, page, [40, 169, 92], True)
+        draw_bbox_without_number(i, indexs_list, page, [40, 169, 92], True)
+
+        draw_bbox_with_number(i, layout_bbox_list, page, [255, 0, 0], False, draw_bbox=False)
 
     # Save the PDF
     pdf_docs.save(f'{out_path}/{filename}_layout.pdf')
@@ -211,9 +234,9 @@ def draw_span_bbox(pdf_info, pdf_bytes, out_path, filename):
         # 构造其余useful_list
         for block in page['para_blocks']:
             if block['type'] in [
-                    BlockType.Text,
-                    BlockType.Title,
-                    BlockType.InterlineEquation,
+                BlockType.Text,
+                BlockType.Title,
+                BlockType.InterlineEquation,
             ]:
                 for line in block['lines']:
                     for span in line['spans']:
@@ -232,10 +255,8 @@ def draw_span_bbox(pdf_info, pdf_bytes, out_path, filename):
     for i, page in enumerate(pdf_docs):
         # 获取当前页面的数据
         draw_bbox_without_number(i, text_list, page, [255, 0, 0], False)
-        draw_bbox_without_number(i, inline_equation_list, page, [0, 255, 0],
-                                 False)
-        draw_bbox_without_number(i, interline_equation_list, page, [0, 0, 255],
-                                 False)
+        draw_bbox_without_number(i, inline_equation_list, page, [0, 255, 0], False)
+        draw_bbox_without_number(i, interline_equation_list, page, [0, 0, 255], False)
         draw_bbox_without_number(i, image_list, page, [255, 204, 0], False)
         draw_bbox_without_number(i, table_list, page, [204, 0, 255], False)
         draw_bbox_without_number(i, dropped_list, page, [158, 158, 158], False)
@@ -244,7 +265,7 @@ def draw_span_bbox(pdf_info, pdf_bytes, out_path, filename):
     pdf_docs.save(f'{out_path}/{filename}_spans.pdf')
 
 
-def drow_model_bbox(model_list: list, pdf_bytes, out_path, filename):
+def draw_model_bbox(model_list: list, pdf_bytes, out_path, filename):
     dropped_bbox_list = []
     tables_body_list, tables_caption_list, tables_footnote_list = [], [], []
     imgs_body_list, imgs_caption_list, imgs_footnote_list = [], [], []
@@ -279,7 +300,7 @@ def drow_model_bbox(model_list: list, pdf_bytes, out_path, filename):
             elif layout_det['category_id'] == CategoryId.ImageCaption:
                 imgs_caption.append(bbox)
             elif layout_det[
-                    'category_id'] == CategoryId.InterlineEquation_YOLO:
+                'category_id'] == CategoryId.InterlineEquation_YOLO:
                 interequations.append(bbox)
             elif layout_det['category_id'] == CategoryId.Abandon:
                 page_dropped_list.append(bbox)
@@ -316,3 +337,47 @@ def drow_model_bbox(model_list: list, pdf_bytes, out_path, filename):
 
     # Save the PDF
     pdf_docs.save(f'{out_path}/{filename}_model.pdf')
+
+
+def draw_line_sort_bbox(pdf_info, pdf_bytes, out_path, filename):
+    layout_bbox_list = []
+
+    for page in pdf_info:
+        page_line_list = []
+        for block in page['preproc_blocks']:
+            if block['type'] in ['text', 'title', 'interline_equation']:
+                for line in block['lines']:
+                    bbox = line['bbox']
+                    index = line['index']
+                    page_line_list.append({'index': index, 'bbox': bbox})
+            if block['type'] in ['table', 'image']:
+                bbox = block['bbox']
+                index = block['index']
+                page_line_list.append({'index': index, 'bbox': bbox})
+            # for line in block['lines']:
+            #     bbox = line['bbox']
+            #     index = line['index']
+            #     page_line_list.append({'index': index, 'bbox': bbox})
+        sorted_bboxes = sorted(page_line_list, key=lambda x: x['index'])
+        layout_bbox_list.append(sorted_bbox['bbox'] for sorted_bbox in sorted_bboxes)
+    pdf_docs = fitz.open('pdf', pdf_bytes)
+    for i, page in enumerate(pdf_docs):
+        draw_bbox_with_number(i, layout_bbox_list, page, [255, 0, 0], False)
+
+    pdf_docs.save(f'{out_path}/{filename}_line_sort.pdf')
+
+
+def draw_layout_sort_bbox(pdf_info, pdf_bytes, out_path, filename):
+    layout_bbox_list = []
+
+    for page in pdf_info:
+        page_block_list = []
+        for block in page['para_blocks']:
+            bbox = block['bbox']
+            page_block_list.append(bbox)
+        layout_bbox_list.append(page_block_list)
+    pdf_docs = fitz.open('pdf', pdf_bytes)
+    for i, page in enumerate(pdf_docs):
+        draw_bbox_with_number(i, layout_bbox_list, page, [255, 0, 0], False)
+
+    pdf_docs.save(f'{out_path}/{filename}_layout_sort.pdf')

+ 2 - 0
magic_pdf/libs/ocr_content_type.py

@@ -20,6 +20,8 @@ class BlockType:
     InterlineEquation = 'interline_equation'
     Footnote = 'footnote'
     Discarded = 'discarded'
+    List = 'list'
+    Index = 'index'
 
 
 class CategoryId:

+ 1 - 1
magic_pdf/libs/version.py

@@ -1 +1 @@
-__version__ = "0.7.1"
+__version__ = "0.8.0"

+ 48 - 27
magic_pdf/model/doc_analyze_by_custom_model.py

@@ -4,6 +4,7 @@ import fitz
 import numpy as np
 from loguru import logger
 
+from magic_pdf.libs.clean_memory import clean_memory
 from magic_pdf.libs.config_reader import get_local_models_dir, get_device, get_table_recog_config
 from magic_pdf.model.model_list import MODEL
 import magic_pdf.model as model_config
@@ -23,7 +24,7 @@ def remove_duplicates_dicts(lst):
     return unique_dicts
 
 
-def load_images_from_pdf(pdf_bytes: bytes, dpi=200) -> list:
+def load_images_from_pdf(pdf_bytes: bytes, dpi=200, start_page_id=0, end_page_id=None) -> list:
     try:
         from PIL import Image
     except ImportError:
@@ -32,18 +33,28 @@ def load_images_from_pdf(pdf_bytes: bytes, dpi=200) -> list:
 
     images = []
     with fitz.open("pdf", pdf_bytes) as doc:
+        pdf_page_num = doc.page_count
+        end_page_id = end_page_id if end_page_id is not None and end_page_id >= 0 else pdf_page_num - 1
+        if end_page_id > pdf_page_num - 1:
+            logger.warning("end_page_id is out of range, use images length")
+            end_page_id = pdf_page_num - 1
+
         for index in range(0, doc.page_count):
-            page = doc[index]
-            mat = fitz.Matrix(dpi / 72, dpi / 72)
-            pm = page.get_pixmap(matrix=mat, alpha=False)
+            if start_page_id <= index <= end_page_id:
+                page = doc[index]
+                mat = fitz.Matrix(dpi / 72, dpi / 72)
+                pm = page.get_pixmap(matrix=mat, alpha=False)
+
+                # If the width or height exceeds 9000 after scaling, do not scale further.
+                if pm.width > 9000 or pm.height > 9000:
+                    pm = page.get_pixmap(matrix=fitz.Matrix(1, 1), alpha=False)
 
-            # If the width or height exceeds 9000 after scaling, do not scale further.
-            if pm.width > 9000 or pm.height > 9000:
-                pm = page.get_pixmap(matrix=fitz.Matrix(1, 1), alpha=False)
+                img = Image.frombytes("RGB", (pm.width, pm.height), pm.samples)
+                img = np.array(img)
+                img_dict = {"img": img, "width": pm.width, "height": pm.height}
+            else:
+                img_dict = {"img": [], "width": 0, "height": 0}
 
-            img = Image.frombytes("RGB", (pm.width, pm.height), pm.samples)
-            img = np.array(img)
-            img_dict = {"img": img, "width": pm.width, "height": pm.height}
             images.append(img_dict)
     return images
 
@@ -57,14 +68,14 @@ class ModelSingleton:
             cls._instance = super().__new__(cls)
         return cls._instance
 
-    def get_model(self, ocr: bool, show_log: bool):
-        key = (ocr, show_log)
+    def get_model(self, ocr: bool, show_log: bool, lang=None):
+        key = (ocr, show_log, lang)
         if key not in self._models:
-            self._models[key] = custom_model_init(ocr=ocr, show_log=show_log)
+            self._models[key] = custom_model_init(ocr=ocr, show_log=show_log, lang=lang)
         return self._models[key]
 
 
-def custom_model_init(ocr: bool = False, show_log: bool = False):
+def custom_model_init(ocr: bool = False, show_log: bool = False, lang=None):
     model = None
 
     if model_config.__model_mode__ == "lite":
@@ -78,7 +89,7 @@ def custom_model_init(ocr: bool = False, show_log: bool = False):
         model_init_start = time.time()
         if model == MODEL.Paddle:
             from magic_pdf.model.pp_structure_v2 import CustomPaddleModel
-            custom_model = CustomPaddleModel(ocr=ocr, show_log=show_log)
+            custom_model = CustomPaddleModel(ocr=ocr, show_log=show_log, lang=lang)
         elif model == MODEL.PEK:
             from magic_pdf.model.pdf_extract_kit import CustomPEKModel
             # 从配置文件读取model-dir和device
@@ -89,7 +100,9 @@ def custom_model_init(ocr: bool = False, show_log: bool = False):
                            "show_log": show_log,
                            "models_dir": local_models_dir,
                            "device": device,
-                           "table_config": table_config}
+                           "table_config": table_config,
+                           "lang": lang,
+                           }
             custom_model = CustomPEKModel(**model_input)
         else:
             logger.error("Not allow model_name!")
@@ -104,19 +117,19 @@ def custom_model_init(ocr: bool = False, show_log: bool = False):
 
 
 def doc_analyze(pdf_bytes: bytes, ocr: bool = False, show_log: bool = False,
-                start_page_id=0, end_page_id=None):
+                start_page_id=0, end_page_id=None, lang=None):
 
     model_manager = ModelSingleton()
-    custom_model = model_manager.get_model(ocr, show_log)
-
-    images = load_images_from_pdf(pdf_bytes)
+    custom_model = model_manager.get_model(ocr, show_log, lang)
 
-    # end_page_id = end_page_id if end_page_id else len(images) - 1
-    end_page_id = end_page_id if end_page_id is not None and end_page_id >= 0 else len(images) - 1
+    with fitz.open("pdf", pdf_bytes) as doc:
+        pdf_page_num = doc.page_count
+        end_page_id = end_page_id if end_page_id is not None and end_page_id >= 0 else pdf_page_num - 1
+        if end_page_id > pdf_page_num - 1:
+            logger.warning("end_page_id is out of range, use images length")
+            end_page_id = pdf_page_num - 1
 
-    if end_page_id > len(images) - 1:
-        logger.warning("end_page_id is out of range, use images length")
-        end_page_id = len(images) - 1
+    images = load_images_from_pdf(pdf_bytes, start_page_id=start_page_id, end_page_id=end_page_id)
 
     model_json = []
     doc_analyze_start = time.time()
@@ -132,7 +145,15 @@ def doc_analyze(pdf_bytes: bytes, ocr: bool = False, show_log: bool = False,
         page_info = {"page_no": index, "height": page_height, "width": page_width}
         page_dict = {"layout_dets": result, "page_info": page_info}
         model_json.append(page_dict)
-    doc_analyze_cost = time.time() - doc_analyze_start
-    logger.info(f"doc analyze cost: {doc_analyze_cost}")
+
+    gc_start = time.time()
+    clean_memory()
+    gc_time = round(time.time() - gc_start, 2)
+    logger.info(f"gc time: {gc_time}")
+
+    doc_analyze_time = round(time.time() - doc_analyze_start, 2)
+    doc_analyze_speed = round( (end_page_id + 1 - start_page_id) / doc_analyze_time, 2)
+    logger.info(f"doc analyze time: {round(time.time() - doc_analyze_start, 2)},"
+                f" speed: {doc_analyze_speed} pages/second")
 
     return model_json

+ 115 - 46
magic_pdf/model/magic_model.py

@@ -1,8 +1,9 @@
 import json
 
 from magic_pdf.libs.boxbase import (_is_in, _is_part_overlap, bbox_distance,
-                                    bbox_relative_pos, calculate_iou,
-                                    calculate_overlap_area_in_bbox1_area_ratio)
+                                    bbox_relative_pos, box_area, calculate_iou,
+                                    calculate_overlap_area_in_bbox1_area_ratio,
+                                    get_overlap_area)
 from magic_pdf.libs.commons import fitz, join_path
 from magic_pdf.libs.coordinate_transform import get_scale_ratio
 from magic_pdf.libs.local_math import float_gt
@@ -12,6 +13,7 @@ from magic_pdf.rw.AbsReaderWriter import AbsReaderWriter
 from magic_pdf.rw.DiskReaderWriter import DiskReaderWriter
 
 CAPATION_OVERLAP_AREA_RATIO = 0.6
+MERGE_BOX_OVERLAP_AREA_RATIO = 1.1
 
 
 class MagicModel:
@@ -108,6 +110,24 @@ class MagicModel:
         self.__fix_by_remove_high_iou_and_low_confidence()
         self.__fix_footnote()
 
+    def _bbox_distance(self, bbox1, bbox2):
+        left, right, bottom, top = bbox_relative_pos(bbox1, bbox2)
+        flags = [left, right, bottom, top]
+        count = sum([1 if v else 0 for v in flags])
+        if count > 1:
+            return float('inf')
+        if left or right:
+            l1 = bbox1[3] - bbox1[1]
+            l2 = bbox2[3] - bbox2[1]
+        else:
+            l1 = bbox1[2] - bbox1[0]
+            l2 = bbox2[2] - bbox2[0]
+
+        if l2 > l1 and (l2 - l1) / l1 > 0.5:
+            return float('inf')
+
+        return bbox_distance(bbox1, bbox2)
+
     def __fix_footnote(self):
         # 3: figure, 5: table, 7: footnote
         for model_page_info in self.__model_list:
@@ -124,49 +144,51 @@ class MagicModel:
                     tables.append(obj)
                 if len(footnotes) * len(figures) == 0:
                     continue
-                dis_figure_footnote = {}
-                dis_table_footnote = {}
-
-                for i in range(len(footnotes)):
-                    for j in range(len(figures)):
-                        pos_flag_count = sum(
-                            list(
-                                map(
-                                    lambda x: 1 if x else 0,
-                                    bbox_relative_pos(
-                                        footnotes[i]['bbox'], figures[j]['bbox']
-                                    ),
-                                )
+            dis_figure_footnote = {}
+            dis_table_footnote = {}
+
+            for i in range(len(footnotes)):
+                for j in range(len(figures)):
+                    pos_flag_count = sum(
+                        list(
+                            map(
+                                lambda x: 1 if x else 0,
+                                bbox_relative_pos(
+                                    footnotes[i]['bbox'], figures[j]['bbox']
+                                ),
                             )
                         )
-                        if pos_flag_count > 1:
-                            continue
-                        dis_figure_footnote[i] = min(
-                            bbox_distance(figures[j]['bbox'], footnotes[i]['bbox']),
-                            dis_figure_footnote.get(i, float('inf')),
-                        )
-                for i in range(len(footnotes)):
-                    for j in range(len(tables)):
-                        pos_flag_count = sum(
-                            list(
-                                map(
-                                    lambda x: 1 if x else 0,
-                                    bbox_relative_pos(
-                                        footnotes[i]['bbox'], tables[j]['bbox']
-                                    ),
-                                )
+                    )
+                    if pos_flag_count > 1:
+                        continue
+                    dis_figure_footnote[i] = min(
+                        self._bbox_distance(figures[j]['bbox'], footnotes[i]['bbox']),
+                        dis_figure_footnote.get(i, float('inf')),
+                    )
+            for i in range(len(footnotes)):
+                for j in range(len(tables)):
+                    pos_flag_count = sum(
+                        list(
+                            map(
+                                lambda x: 1 if x else 0,
+                                bbox_relative_pos(
+                                    footnotes[i]['bbox'], tables[j]['bbox']
+                                ),
                             )
                         )
-                        if pos_flag_count > 1:
-                            continue
+                    )
+                    if pos_flag_count > 1:
+                        continue
 
-                        dis_table_footnote[i] = min(
-                            bbox_distance(tables[j]['bbox'], footnotes[i]['bbox']),
-                            dis_table_footnote.get(i, float('inf')),
-                        )
-                for i in range(len(footnotes)):
-                    if dis_table_footnote.get(i, float('inf')) > dis_figure_footnote[i]:
-                        footnotes[i]['category_id'] = CategoryId.ImageFootnote
+                    dis_table_footnote[i] = min(
+                        self._bbox_distance(tables[j]['bbox'], footnotes[i]['bbox']),
+                        dis_table_footnote.get(i, float('inf')),
+                    )
+            for i in range(len(footnotes)):
+                if i not in dis_figure_footnote:
+                    continue
+                if dis_table_footnote.get(i, float('inf')) > dis_figure_footnote[i]:
+                    footnotes[i]['category_id'] = CategoryId.ImageFootnote
 
     def __reduct_overlap(self, bboxes):
         N = len(bboxes)
@@ -191,6 +213,44 @@ class MagicModel:
         筛选出所有和 merged bbox 有 overlap 且 overlap 面积大于 object 的面积的 subjects。
         再求出筛选出的 subjects 和 object 的最短距离
         """
+        def search_overlap_between_boxes(
+            subject_idx, object_idx
+        ):
+            idxes = [subject_idx, object_idx]
+            x0s = [all_bboxes[idx]['bbox'][0] for idx in idxes]
+            y0s = [all_bboxes[idx]['bbox'][1] for idx in idxes]
+            x1s = [all_bboxes[idx]['bbox'][2] for idx in idxes]
+            y1s = [all_bboxes[idx]['bbox'][3] for idx in idxes]
+
+            merged_bbox = [
+                min(x0s),
+                min(y0s),
+                max(x1s),
+                max(y1s),
+            ]
+            ratio = 0
+
+            other_objects = list(
+                map(
+                    lambda x: {'bbox': x['bbox'], 'score': x['score']},
+                    filter(
+                        lambda x: x['category_id']
+                        not in (object_category_id, subject_category_id),
+                        self.__model_list[page_no]['layout_dets'],
+                    ),
+                )
+            )
+            for other_object in other_objects:
+                ratio = max(
+                    ratio,
+                    get_overlap_area(
+                        merged_bbox, other_object['bbox']
+                    ) * 1.0 / box_area(all_bboxes[object_idx]['bbox'])
+                )
+                if ratio >= MERGE_BOX_OVERLAP_AREA_RATIO:
+                    break
+
+            return ratio
 
         def may_find_other_nearest_bbox(subject_idx, object_idx):
             ret = float('inf')
@@ -299,7 +359,16 @@ class MagicModel:
                 ):
                     continue
 
-                dis[i][j] = bbox_distance(all_bboxes[i]['bbox'], all_bboxes[j]['bbox'])
+                subject_idx, object_idx = i, j
+                if all_bboxes[j]['category_id'] == subject_category_id:
+                    subject_idx, object_idx = j, i
+
+                if search_overlap_between_boxes(subject_idx, object_idx) >= MERGE_BOX_OVERLAP_AREA_RATIO:
+                    dis[i][j] = float('inf')
+                    dis[j][i] = dis[i][j]
+                    continue
+
+                dis[i][j] = self._bbox_distance(all_bboxes[subject_idx]['bbox'], all_bboxes[object_idx]['bbox'])
                 dis[j][i] = dis[i][j]
 
         used = set()
@@ -627,13 +696,13 @@ class MagicModel:
                     span['type'] = ContentType.Image
                 elif category_id == 5:
                     # 获取table模型结果
-                    latex = layout_det.get("latex", None)
-                    html = layout_det.get("html", None)
+                    latex = layout_det.get('latex', None)
+                    html = layout_det.get('html', None)
                     if latex:
-                        span["latex"] = latex
+                        span['latex'] = latex
                     elif html:
-                        span["html"] = html
-                    span["type"] = ContentType.Table
+                        span['html'] = html
+                    span['type'] = ContentType.Table
                 elif category_id == 13:
                     span['content'] = layout_det['latex']
                     span['type'] = ContentType.InlineEquation

+ 65 - 36
magic_pdf/model/pdf_extract_kit.py

@@ -3,9 +3,11 @@ import os
 import time
 
 from magic_pdf.libs.Constants import *
+from magic_pdf.libs.clean_memory import clean_memory
 from magic_pdf.model.model_list import AtomicModel
 
 os.environ['NO_ALBUMENTATIONS_UPDATE'] = '1'  # 禁止albumentations检查更新
+os.environ['YOLO_VERBOSE'] = 'False'  # disable yolo logger
 try:
     import cv2
     import yaml
@@ -32,7 +34,7 @@ except ImportError as e:
     exit(1)
 
 from magic_pdf.model.pek_sub_modules.layoutlmv3.model_init import Layoutlmv3_Predictor
-from magic_pdf.model.pek_sub_modules.post_process import get_croped_image, latex_rm_whitespace
+from magic_pdf.model.pek_sub_modules.post_process import latex_rm_whitespace
 from magic_pdf.model.pek_sub_modules.self_modify import ModifiedPaddleOCR
 from magic_pdf.model.pek_sub_modules.structeqtable.StructTableModel import StructTableModel
 from magic_pdf.model.ppTableModel import ppTableModel
@@ -58,12 +60,13 @@ def mfd_model_init(weight):
 def mfr_model_init(weight_dir, cfg_path, _device_='cpu'):
     args = argparse.Namespace(cfg_path=cfg_path, options=None)
     cfg = Config(args)
-    cfg.config.model.pretrained = os.path.join(weight_dir, "pytorch_model.bin")
+    cfg.config.model.pretrained = os.path.join(weight_dir, "pytorch_model.pth")
     cfg.config.model.model_config.model_name = weight_dir
     cfg.config.model.tokenizer_config.path = weight_dir
     task = tasks.setup_task(cfg)
     model = task.build_model(cfg)
-    model = model.to(_device_)
+    model.to(_device_)
+    model.eval()
     vis_processor = load_processor('formula_image_eval', cfg.config.datasets.formula_rec_eval.vis_processor.eval)
     mfr_transform = transforms.Compose([vis_processor, ])
     return [model, mfr_transform]
@@ -74,8 +77,11 @@ def layout_model_init(weight, config_file, device):
     return model
 
 
-def ocr_model_init(show_log: bool = False, det_db_box_thresh=0.3):
-    model = ModifiedPaddleOCR(show_log=show_log, det_db_box_thresh=det_db_box_thresh)
+def ocr_model_init(show_log: bool = False, det_db_box_thresh=0.3, lang=None):
+    if lang is not None:
+        model = ModifiedPaddleOCR(show_log=show_log, det_db_box_thresh=det_db_box_thresh, lang=lang)
+    else:
+        model = ModifiedPaddleOCR(show_log=show_log, det_db_box_thresh=det_db_box_thresh)
     return model
 
 
@@ -134,7 +140,8 @@ def atom_model_init(model_name: str, **kwargs):
     elif model_name == AtomicModel.OCR:
         atom_model = ocr_model_init(
             kwargs.get("ocr_show_log"),
-            kwargs.get("det_db_box_thresh")
+            kwargs.get("det_db_box_thresh"),
+            kwargs.get("lang")
         )
     elif model_name == AtomicModel.Table:
         atom_model = table_model_init(
@@ -150,6 +157,23 @@ def atom_model_init(model_name: str, **kwargs):
     return atom_model
 
 
+#  Unified crop img logic
+def crop_img(input_res, input_pil_img, crop_paste_x=0, crop_paste_y=0):
+    crop_xmin, crop_ymin = int(input_res['poly'][0]), int(input_res['poly'][1])
+    crop_xmax, crop_ymax = int(input_res['poly'][4]), int(input_res['poly'][5])
+    # Create a white background with an additional width and height of 50
+    crop_new_width = crop_xmax - crop_xmin + crop_paste_x * 2
+    crop_new_height = crop_ymax - crop_ymin + crop_paste_y * 2
+    return_image = Image.new('RGB', (crop_new_width, crop_new_height), 'white')
+
+    # Crop image
+    crop_box = (crop_xmin, crop_ymin, crop_xmax, crop_ymax)
+    cropped_img = input_pil_img.crop(crop_box)
+    return_image.paste(cropped_img, (crop_paste_x, crop_paste_y))
+    return_list = [crop_paste_x, crop_paste_y, crop_xmin, crop_ymin, crop_xmax, crop_ymax, crop_new_width, crop_new_height]
+    return return_image, return_list
+
+
 class CustomPEKModel:
 
     def __init__(self, ocr: bool = False, show_log: bool = False, **kwargs):
@@ -177,9 +201,10 @@ class CustomPEKModel:
         self.table_max_time = self.table_config.get("max_time", TABLE_MAX_TIME_VALUE)
         self.table_model_type = self.table_config.get("model", TABLE_MASTER)
         self.apply_ocr = ocr
+        self.lang = kwargs.get("lang", None)
         logger.info(
-            "DocAnalysis init, this may take some times. apply_layout: {}, apply_formula: {}, apply_ocr: {}, apply_table: {}".format(
-                self.apply_layout, self.apply_formula, self.apply_ocr, self.apply_table
+            "DocAnalysis init, this may take some times. apply_layout: {}, apply_formula: {}, apply_ocr: {}, apply_table: {}, lang: {}".format(
+                self.apply_layout, self.apply_formula, self.apply_ocr, self.apply_table, self.lang
             )
         )
         assert self.apply_layout, "DocAnalysis must contain layout model."
@@ -225,11 +250,13 @@ class CustomPEKModel:
         )
         # 初始化ocr
         if self.apply_ocr:
+
             # self.ocr_model = ModifiedPaddleOCR(show_log=show_log, det_db_box_thresh=0.3)
             self.ocr_model = atom_model_manager.get_atom_model(
                 atom_model_name=AtomicModel.OCR,
                 ocr_show_log=show_log,
-                det_db_box_thresh=0.3
+                det_db_box_thresh=0.3,
+                lang=self.lang
             )
         # init table model
         if self.apply_table:
@@ -243,10 +270,13 @@ class CustomPEKModel:
                 table_max_time=self.table_max_time,
                 device=self.device
             )
+
         logger.info('DocAnalysis init done!')
 
     def __call__(self, image):
 
+        page_start = time.time()
+
         latex_filling_list = []
         mf_image_list = []
 
@@ -254,11 +284,15 @@ class CustomPEKModel:
         layout_start = time.time()
         layout_res = self.layout_model(image, ignore_catids=[])
         layout_cost = round(time.time() - layout_start, 2)
-        logger.info(f"layout detection cost: {layout_cost}")
+        logger.info(f"layout detection time: {layout_cost}")
+
+        pil_img = Image.fromarray(image)
 
         if self.apply_formula:
             # 公式检测
+            mfd_start = time.time()
             mfd_res = self.mfd_model.predict(image, imgsz=1888, conf=0.25, iou=0.45, verbose=True)[0]
+            logger.info(f"mfd time: {round(time.time() - mfd_start, 2)}")
             for xyxy, conf, cla in zip(mfd_res.boxes.xyxy.cpu(), mfd_res.boxes.conf.cpu(), mfd_res.boxes.cls.cpu()):
                 xmin, ymin, xmax, ymax = [int(p.item()) for p in xyxy]
                 new_item = {
@@ -269,7 +303,8 @@ class CustomPEKModel:
                 }
                 layout_res.append(new_item)
                 latex_filling_list.append(new_item)
-                bbox_img = get_croped_image(Image.fromarray(image), [xmin, ymin, xmax, ymax])
+                # bbox_img = get_croped_image(pil_img, [xmin, ymin, xmax, ymax])
+                bbox_img = pil_img.crop((xmin, ymin, xmax, ymax))
                 mf_image_list.append(bbox_img)
 
             # 公式识别
@@ -279,7 +314,8 @@ class CustomPEKModel:
             mfr_res = []
             for mf_img in dataloader:
                 mf_img = mf_img.to(self.device)
-                output = self.mfr_model.generate({'image': mf_img})
+                with torch.no_grad():
+                    output = self.mfr_model.generate({'image': mf_img})
                 mfr_res.extend(output['pred_str'])
             for res, latex in zip(latex_filling_list, mfr_res):
                 res['latex'] = latex_rm_whitespace(latex)
@@ -301,23 +337,14 @@ class CustomPEKModel:
             elif int(res['category_id']) in [5]:
                 table_res_list.append(res)
 
-        #  Unified crop img logic
-        def crop_img(input_res, input_pil_img, crop_paste_x=0, crop_paste_y=0):
-            crop_xmin, crop_ymin = int(input_res['poly'][0]), int(input_res['poly'][1])
-            crop_xmax, crop_ymax = int(input_res['poly'][4]), int(input_res['poly'][5])
-            # Create a white background with an additional width and height of 50
-            crop_new_width = crop_xmax - crop_xmin + crop_paste_x * 2
-            crop_new_height = crop_ymax - crop_ymin + crop_paste_y * 2
-            return_image = Image.new('RGB', (crop_new_width, crop_new_height), 'white')
-
-            # Crop image
-            crop_box = (crop_xmin, crop_ymin, crop_xmax, crop_ymax)
-            cropped_img = input_pil_img.crop(crop_box)
-            return_image.paste(cropped_img, (crop_paste_x, crop_paste_y))
-            return_list = [crop_paste_x, crop_paste_y, crop_xmin, crop_ymin, crop_xmax, crop_ymax, crop_new_width, crop_new_height]
-            return return_image, return_list
-
-        pil_img = Image.fromarray(image)
+        if torch.cuda.is_available():
+            properties = torch.cuda.get_device_properties(self.device)
+            total_memory = properties.total_memory / (1024 ** 3)  # 将字节转换为 GB
+            if total_memory <= 10:
+                gc_start = time.time()
+                clean_memory()
+                gc_time = round(time.time() - gc_start, 2)
+                logger.info(f"gc time: {gc_time}")
 
         # ocr识别
         if self.apply_ocr:
@@ -367,7 +394,7 @@ class CustomPEKModel:
                         })
 
             ocr_cost = round(time.time() - ocr_start, 2)
-            logger.info(f"ocr cost: {ocr_cost}")
+            logger.info(f"ocr time: {ocr_cost}")
 
         # 表格识别 table recognition
         if self.apply_table:
@@ -375,7 +402,7 @@ class CustomPEKModel:
             for res in table_res_list:
                 new_image, _ = crop_img(res, pil_img)
                 single_table_start_time = time.time()
-                logger.info("------------------table recognition processing begins-----------------")
+                # logger.info("------------------table recognition processing begins-----------------")
                 latex_code = None
                 html_code = None
                 if self.table_model_type == STRUCT_EQTABLE:
@@ -383,8 +410,9 @@ class CustomPEKModel:
                         latex_code = self.table_model.image2latex(new_image)[0]
                 else:
                     html_code = self.table_model.img2html(new_image)
+
                 run_time = time.time() - single_table_start_time
-                logger.info(f"------------table recognition processing ends within {run_time}s-----")
+                # logger.info(f"------------table recognition processing ends within {run_time}s-----")
                 if run_time > self.table_max_time:
                     logger.warning(f"------------table recognition processing exceeds max time {self.table_max_time}s----------")
                 # 判断是否返回正常
@@ -395,12 +423,13 @@ class CustomPEKModel:
                     if expected_ending:
                         res["latex"] = latex_code
                     else:
-                        logger.warning(f"------------table recognition processing fails----------")
+                        logger.warning(f"table recognition processing fails, not found expected LaTeX table end")
                 elif html_code:
                     res["html"] = html_code
                 else:
-                    logger.warning(f"------------table recognition processing fails----------")
-            table_cost = round(time.time() - table_start, 2)
-            logger.info(f"table cost: {table_cost}")
+                    logger.warning(f"table recognition processing fails, not get latex or html return")
+            logger.info(f"table time: {round(time.time() - table_start, 2)}")
+
+        logger.info(f"-----page total time: {round(time.time() - page_start, 2)}-----")
 
         return layout_res

+ 5 - 2
magic_pdf/model/pp_structure_v2.py

@@ -18,8 +18,11 @@ def region_to_bbox(region):
 
 
 class CustomPaddleModel:
-    def __init__(self, ocr: bool = False, show_log: bool = False):
-        self.model = PPStructure(table=False, ocr=ocr, show_log=show_log)
+    def __init__(self, ocr: bool = False, show_log: bool = False, lang=None):
+        if lang is not None:
+            self.model = PPStructure(table=False, ocr=ocr, show_log=show_log, lang=lang)
+        else:
+            self.model = PPStructure(table=False, ocr=ocr, show_log=show_log)
 
     def __call__(self, img):
         try:

+ 0 - 0
projects/web_api/tests/__init__.py → magic_pdf/model/v3/__init__.py


+ 125 - 0
magic_pdf/model/v3/helpers.py

@@ -0,0 +1,125 @@
+from collections import defaultdict
+from typing import List, Dict
+
+import torch
+from transformers import LayoutLMv3ForTokenClassification
+
+MAX_LEN = 510
+CLS_TOKEN_ID = 0
+UNK_TOKEN_ID = 3
+EOS_TOKEN_ID = 2
+
+
+class DataCollator:
+    def __call__(self, features: List[dict]) -> Dict[str, torch.Tensor]:
+        bbox = []
+        labels = []
+        input_ids = []
+        attention_mask = []
+
+        # clip bbox and labels to max length, build input_ids and attention_mask
+        for feature in features:
+            _bbox = feature["source_boxes"]
+            if len(_bbox) > MAX_LEN:
+                _bbox = _bbox[:MAX_LEN]
+            _labels = feature["target_index"]
+            if len(_labels) > MAX_LEN:
+                _labels = _labels[:MAX_LEN]
+            _input_ids = [UNK_TOKEN_ID] * len(_bbox)
+            _attention_mask = [1] * len(_bbox)
+            assert len(_bbox) == len(_labels) == len(_input_ids) == len(_attention_mask)
+            bbox.append(_bbox)
+            labels.append(_labels)
+            input_ids.append(_input_ids)
+            attention_mask.append(_attention_mask)
+
+        # add CLS and EOS tokens
+        for i in range(len(bbox)):
+            bbox[i] = [[0, 0, 0, 0]] + bbox[i] + [[0, 0, 0, 0]]
+            labels[i] = [-100] + labels[i] + [-100]
+            input_ids[i] = [CLS_TOKEN_ID] + input_ids[i] + [EOS_TOKEN_ID]
+            attention_mask[i] = [1] + attention_mask[i] + [1]
+
+        # padding to max length
+        max_len = max(len(x) for x in bbox)
+        for i in range(len(bbox)):
+            bbox[i] = bbox[i] + [[0, 0, 0, 0]] * (max_len - len(bbox[i]))
+            labels[i] = labels[i] + [-100] * (max_len - len(labels[i]))
+            input_ids[i] = input_ids[i] + [EOS_TOKEN_ID] * (max_len - len(input_ids[i]))
+            attention_mask[i] = attention_mask[i] + [0] * (
+                max_len - len(attention_mask[i])
+            )
+
+        ret = {
+            "bbox": torch.tensor(bbox),
+            "attention_mask": torch.tensor(attention_mask),
+            "labels": torch.tensor(labels),
+            "input_ids": torch.tensor(input_ids),
+        }
+        # set label > MAX_LEN to -100, because original labels may be > MAX_LEN
+        ret["labels"][ret["labels"] > MAX_LEN] = -100
+        # set label > 0 to label-1, because original labels are 1-indexed
+        ret["labels"][ret["labels"] > 0] -= 1
+        return ret
+
+
+def boxes2inputs(boxes: List[List[int]]) -> Dict[str, torch.Tensor]:
+    bbox = [[0, 0, 0, 0]] + boxes + [[0, 0, 0, 0]]
+    input_ids = [CLS_TOKEN_ID] + [UNK_TOKEN_ID] * len(boxes) + [EOS_TOKEN_ID]
+    attention_mask = [1] + [1] * len(boxes) + [1]
+    return {
+        "bbox": torch.tensor([bbox]),
+        "attention_mask": torch.tensor([attention_mask]),
+        "input_ids": torch.tensor([input_ids]),
+    }
+
+
+def prepare_inputs(
+    inputs: Dict[str, torch.Tensor], model: LayoutLMv3ForTokenClassification
+) -> Dict[str, torch.Tensor]:
+    ret = {}
+    for k, v in inputs.items():
+        v = v.to(model.device)
+        if torch.is_floating_point(v):
+            v = v.to(model.dtype)
+        ret[k] = v
+    return ret
+
+
+def parse_logits(logits: torch.Tensor, length: int) -> List[int]:
+    """
+    parse logits to orders
+
+    :param logits: logits from model
+    :param length: input length
+    :return: orders
+    """
+    logits = logits[1 : length + 1, :length]
+    orders = logits.argsort(descending=False).tolist()
+    ret = [o.pop() for o in orders]
+    while True:
+        order_to_idxes = defaultdict(list)
+        for idx, order in enumerate(ret):
+            order_to_idxes[order].append(idx)
+        # filter idxes len > 1
+        order_to_idxes = {k: v for k, v in order_to_idxes.items() if len(v) > 1}
+        if not order_to_idxes:
+            break
+        # filter
+        for order, idxes in order_to_idxes.items():
+            # find original logits of idxes
+            idxes_to_logit = {}
+            for idx in idxes:
+                idxes_to_logit[idx] = logits[idx, order]
+            idxes_to_logit = sorted(
+                idxes_to_logit.items(), key=lambda x: x[1], reverse=True
+            )
+            # keep the highest logit as order, set others to next candidate
+            for idx, _ in idxes_to_logit[1:]:
+                ret[idx] = orders[idx].pop()
+
+    return ret
+
+
+def check_duplicate(a: List[int]) -> bool:
+    return len(a) != len(set(a))

File diff suppressed because it is too large
+ 251 - 0
magic_pdf/para/para_split_v3.py


+ 1 - 1
magic_pdf/pdf_parse_by_ocr.py

@@ -1,4 +1,4 @@
-from magic_pdf.pdf_parse_union_core import pdf_parse_union
+from magic_pdf.pdf_parse_union_core_v2 import pdf_parse_union
 
 
 def parse_pdf_by_ocr(pdf_bytes,

+ 1 - 1
magic_pdf/pdf_parse_by_txt.py

@@ -1,4 +1,4 @@
-from magic_pdf.pdf_parse_union_core import pdf_parse_union
+from magic_pdf.pdf_parse_union_core_v2 import pdf_parse_union
 
 
 def parse_pdf_by_txt(

+ 453 - 0
magic_pdf/pdf_parse_union_core_v2.py

@@ -0,0 +1,453 @@
+import os
+import statistics
+import time
+
+from loguru import logger
+
+from typing import List
+
+import torch
+
+from magic_pdf.libs.clean_memory import clean_memory
+from magic_pdf.libs.commons import fitz, get_delta_time
+from magic_pdf.libs.config_reader import get_local_layoutreader_model_dir
+from magic_pdf.libs.convert_utils import dict_to_list
+from magic_pdf.libs.drop_reason import DropReason
+from magic_pdf.libs.hash_utils import compute_md5
+from magic_pdf.libs.local_math import float_equal
+from magic_pdf.libs.ocr_content_type import ContentType
+from magic_pdf.model.magic_model import MagicModel
+from magic_pdf.para.para_split_v3 import para_split
+from magic_pdf.pre_proc.citationmarker_remove import remove_citation_marker
+from magic_pdf.pre_proc.construct_page_dict import ocr_construct_page_component_v2
+from magic_pdf.pre_proc.cut_image import ocr_cut_image_and_table
+from magic_pdf.pre_proc.equations_replace import remove_chars_in_text_blocks, replace_equations_in_textblock, \
+    combine_chars_to_pymudict
+from magic_pdf.pre_proc.ocr_detect_all_bboxes import ocr_prepare_bboxes_for_layout_split_v2
+from magic_pdf.pre_proc.ocr_dict_merge import  fill_spans_in_blocks, fix_block_spans, fix_discarded_block
+from magic_pdf.pre_proc.ocr_span_list_modify import remove_overlaps_min_spans, get_qa_need_list_v2, \
+    remove_overlaps_low_confidence_spans
+from magic_pdf.pre_proc.resolve_bbox_conflict import check_useful_block_horizontal_overlap
+
+
+def remove_horizontal_overlap_block_which_smaller(all_bboxes):
+    useful_blocks = []
+    for bbox in all_bboxes:
+        useful_blocks.append({
+            "bbox": bbox[:4]
+        })
+    is_useful_block_horz_overlap, smaller_bbox, bigger_bbox = check_useful_block_horizontal_overlap(useful_blocks)
+    if is_useful_block_horz_overlap:
+        logger.warning(
+            f"skip this page, reason: {DropReason.USEFUL_BLOCK_HOR_OVERLAP}, smaller bbox is {smaller_bbox}, bigger bbox is {bigger_bbox}")
+        for bbox in all_bboxes.copy():
+            if smaller_bbox == bbox[:4]:
+                all_bboxes.remove(bbox)
+
+    return is_useful_block_horz_overlap, all_bboxes
+
+
+def __replace_STX_ETX(text_str:str):
+    """ Replace \u0002 and \u0003, as these characters become garbled when extracted using pymupdf. In fact, they were originally quotation marks.
+Drawback: This issue is only observed in English text; it has not been found in Chinese text so far.
+
+    Args:
+        text_str (str): raw text
+
+    Returns:
+        _type_: replaced text
+    """
+    if text_str:
+        s = text_str.replace('\u0002', "'")
+        s = s.replace("\u0003", "'")
+        return s
+    return text_str
+
+
+def txt_spans_extract(pdf_page, inline_equations, interline_equations):
+    text_raw_blocks = pdf_page.get_text("dict", flags=fitz.TEXTFLAGS_TEXT)["blocks"]
+    char_level_text_blocks = pdf_page.get_text("rawdict", flags=fitz.TEXTFLAGS_TEXT)[
+        "blocks"
+    ]
+    text_blocks = combine_chars_to_pymudict(text_raw_blocks, char_level_text_blocks)
+    text_blocks = replace_equations_in_textblock(
+        text_blocks, inline_equations, interline_equations
+    )
+    text_blocks = remove_citation_marker(text_blocks)
+    text_blocks = remove_chars_in_text_blocks(text_blocks)
+    spans = []
+    for v in text_blocks:
+        for line in v["lines"]:
+            for span in line["spans"]:
+                bbox = span["bbox"]
+                if float_equal(bbox[0], bbox[2]) or float_equal(bbox[1], bbox[3]):
+                    continue
+                if span.get('type') not in (ContentType.InlineEquation, ContentType.InterlineEquation):
+                    spans.append(
+                        {
+                            "bbox": list(span["bbox"]),
+                            "content": __replace_STX_ETX(span["text"]),
+                            "type": ContentType.Text,
+                            "score": 1.0,
+                        }
+                    )
+    return spans
+
+
+def replace_text_span(pymu_spans, ocr_spans):
+    return list(filter(lambda x: x["type"] != ContentType.Text, ocr_spans)) + pymu_spans
+
+
+def model_init(model_name: str):
+    from transformers import LayoutLMv3ForTokenClassification
+    if torch.cuda.is_available():
+        device = torch.device("cuda")
+        if torch.cuda.is_bf16_supported():
+            supports_bfloat16 = True
+        else:
+            supports_bfloat16 = False
+    else:
+        device = torch.device("cpu")
+        supports_bfloat16 = False
+
+    if model_name == "layoutreader":
+        # 检测modelscope的缓存目录是否存在
+        layoutreader_model_dir = get_local_layoutreader_model_dir()
+        if os.path.exists(layoutreader_model_dir):
+            model = LayoutLMv3ForTokenClassification.from_pretrained(layoutreader_model_dir)
+        else:
+            logger.warning(
+                f"local layoutreader model not exists, use online model from huggingface")
+            model = LayoutLMv3ForTokenClassification.from_pretrained("hantian/layoutreader")
+        # 检查设备是否支持 bfloat16
+        if supports_bfloat16:
+            model.bfloat16()
+        model.to(device).eval()
+    else:
+        logger.error("model name not allow")
+        exit(1)
+    return model
+
+
+class ModelSingleton:
+    _instance = None
+    _models = {}
+
+    def __new__(cls, *args, **kwargs):
+        if cls._instance is None:
+            cls._instance = super().__new__(cls)
+        return cls._instance
+
+    def get_model(self, model_name: str):
+        if model_name not in self._models:
+            self._models[model_name] = model_init(model_name=model_name)
+        return self._models[model_name]
+
+
+def do_predict(boxes: List[List[int]], model) -> List[int]:
+    from magic_pdf.model.v3.helpers import prepare_inputs, boxes2inputs, parse_logits
+    inputs = boxes2inputs(boxes)
+    inputs = prepare_inputs(inputs, model)
+    logits = model(**inputs).logits.cpu().squeeze(0)
+    return parse_logits(logits, len(boxes))
+
+
+def cal_block_index(fix_blocks, sorted_bboxes):
+    for block in fix_blocks:
+        # if block['type'] in ['text', 'title', 'interline_equation']:
+        #     line_index_list = []
+        #     if len(block['lines']) == 0:
+        #         block['index'] = sorted_bboxes.index(block['bbox'])
+        #     else:
+        #         for line in block['lines']:
+        #             line['index'] = sorted_bboxes.index(line['bbox'])
+        #             line_index_list.append(line['index'])
+        #         median_value = statistics.median(line_index_list)
+        #         block['index'] = median_value
+        #
+        # elif block['type'] in ['table', 'image']:
+        #     block['index'] = sorted_bboxes.index(block['bbox'])
+
+        line_index_list = []
+        if len(block['lines']) == 0:
+            block['index'] = sorted_bboxes.index(block['bbox'])
+        else:
+            for line in block['lines']:
+                line['index'] = sorted_bboxes.index(line['bbox'])
+                line_index_list.append(line['index'])
+            median_value = statistics.median(line_index_list)
+            block['index'] = median_value
+
+        # 删除图表block中的虚拟line信息
+        if block['type'] in ['table', 'image']:
+            del block['lines']
+
+    return fix_blocks
+
+
+def insert_lines_into_block(block_bbox, line_height, page_w, page_h):
+    # block_bbox是一个元组(x0, y0, x1, y1),其中(x0, y0)是左下角坐标,(x1, y1)是右上角坐标
+    x0, y0, x1, y1 = block_bbox
+
+    block_height = y1 - y0
+    block_weight = x1 - x0
+
+    # 如果block高度小于n行正文,则直接返回block的bbox
+    if line_height*3 < block_height:
+        if block_height > page_h*0.25 and page_w*0.5 > block_weight > page_w*0.25:  # 可能是双列结构,可以切细点
+            lines = int(block_height/line_height)+1
+        else:
+            # 如果block的宽度超过0.4页面宽度,则将block分成3行
+            if block_weight > page_w*0.4:
+                line_height = (y1 - y0) / 3
+                lines = 3
+            elif block_weight > page_w*0.25: # 否则将block分成两行
+                line_height = (y1 - y0) / 2
+                lines = 2
+            else: # 判断长宽比
+                if block_height/block_weight > 1.2:  # 细长的不分
+                    return [[x0, y0, x1, y1]]
+                else: # 不细长的还是分成两行
+                    line_height = (y1 - y0) / 2
+                    lines = 2
+
+        # 确定从哪个y位置开始绘制线条
+        current_y = y0
+
+        # 用于存储线条的位置信息[(x0, y), ...]
+        lines_positions = []
+
+        for i in range(lines):
+            lines_positions.append([x0, current_y, x1, current_y + line_height])
+            current_y += line_height
+        return lines_positions
+
+    else:
+        return [[x0, y0, x1, y1]]
+
+
+def sort_lines_by_model(fix_blocks, page_w, page_h, line_height):
+    page_line_list = []
+    for block in fix_blocks:
+        if block['type'] in ['text', 'title', 'interline_equation']:
+            if len(block['lines']) == 0:
+                bbox = block['bbox']
+                lines = insert_lines_into_block(bbox, line_height, page_w, page_h)
+                for line in lines:
+                    block['lines'].append({'bbox': line, 'spans': []})
+                page_line_list.extend(lines)
+            else:
+                for line in block['lines']:
+                    bbox = line['bbox']
+                    page_line_list.append(bbox)
+        elif block['type'] in ['table', 'image']:
+            bbox = block['bbox']
+            lines = insert_lines_into_block(bbox, line_height, page_w, page_h)
+            block['lines'] = []
+            for line in lines:
+                block['lines'].append({'bbox': line, 'spans': []})
+            page_line_list.extend(lines)
+
+    # 使用layoutreader排序
+    x_scale = 1000.0 / page_w
+    y_scale = 1000.0 / page_h
+    boxes = []
+    # logger.info(f"Scale: {x_scale}, {y_scale}, Boxes len: {len(page_line_list)}")
+    for left, top, right, bottom in page_line_list:
+        if left < 0:
+            logger.warning(
+                f"left < 0, left: {left}, right: {right}, top: {top}, bottom: {bottom}, page_w: {page_w}, page_h: {page_h}")
+            left = 0
+        if right > page_w:
+            logger.warning(
+                f"right > page_w, left: {left}, right: {right}, top: {top}, bottom: {bottom}, page_w: {page_w}, page_h: {page_h}")
+            right = page_w
+        if top < 0:
+            logger.warning(
+                f"top < 0, left: {left}, right: {right}, top: {top}, bottom: {bottom}, page_w: {page_w}, page_h: {page_h}")
+            top = 0
+        if bottom > page_h:
+            logger.warning(
+                f"bottom > page_h, left: {left}, right: {right}, top: {top}, bottom: {bottom}, page_w: {page_w}, page_h: {page_h}")
+            bottom = page_h
+
+        left = round(left * x_scale)
+        top = round(top * y_scale)
+        right = round(right * x_scale)
+        bottom = round(bottom * y_scale)
+        assert (
+                1000 >= right >= left >= 0 and 1000 >= bottom >= top >= 0
+        ), f"Invalid box. right: {right}, left: {left}, bottom: {bottom}, top: {top}"
+        boxes.append([left, top, right, bottom])
+    model_manager = ModelSingleton()
+    model = model_manager.get_model("layoutreader")
+    with torch.no_grad():
+        orders = do_predict(boxes, model)
+    sorted_bboxes = [page_line_list[i] for i in orders]
+
+    return sorted_bboxes
+
+
+def get_line_height(blocks):
+    page_line_height_list = []
+    for block in blocks:
+        if block['type'] in ['text', 'title', 'interline_equation']:
+            for line in block['lines']:
+                bbox = line['bbox']
+                page_line_height_list.append(int(bbox[3]-bbox[1]))
+    if len(page_line_height_list) > 0:
+        return statistics.median(page_line_height_list)
+    else:
+        return 10
+
+
+def parse_page_core(pdf_docs, magic_model, page_id, pdf_bytes_md5, imageWriter, parse_mode):
+    need_drop = False
+    drop_reason = []
+
+    '''从magic_model对象中获取后面会用到的区块信息'''
+    img_blocks = magic_model.get_imgs(page_id)
+    table_blocks = magic_model.get_tables(page_id)
+    discarded_blocks = magic_model.get_discarded(page_id)
+    text_blocks = magic_model.get_text_blocks(page_id)
+    title_blocks = magic_model.get_title_blocks(page_id)
+    inline_equations, interline_equations, interline_equation_blocks = magic_model.get_equations(page_id)
+
+    page_w, page_h = magic_model.get_page_size(page_id)
+
+    spans = magic_model.get_all_spans(page_id)
+
+    '''根据parse_mode,构造spans'''
+    if parse_mode == "txt":
+        """ocr 中文本类的 span 用 pymu spans 替换!"""
+        pymu_spans = txt_spans_extract(
+            pdf_docs[page_id], inline_equations, interline_equations
+        )
+        spans = replace_text_span(pymu_spans, spans)
+    elif parse_mode == "ocr":
+        pass
+    else:
+        raise Exception("parse_mode must be txt or ocr")
+
+    '''删除重叠spans中置信度较低的那些'''
+    spans, dropped_spans_by_confidence = remove_overlaps_low_confidence_spans(spans)
+    '''删除重叠spans中较小的那些'''
+    spans, dropped_spans_by_span_overlap = remove_overlaps_min_spans(spans)
+    '''对image和table截图'''
+    spans = ocr_cut_image_and_table(spans, pdf_docs[page_id], page_id, pdf_bytes_md5, imageWriter)
+
+    '''将所有区块的bbox整理到一起'''
+    # interline_equation_blocks参数不够准,后面切换到interline_equations上
+    interline_equation_blocks = []
+    if len(interline_equation_blocks) > 0:
+        all_bboxes, all_discarded_blocks = ocr_prepare_bboxes_for_layout_split_v2(
+            img_blocks, table_blocks, discarded_blocks, text_blocks, title_blocks,
+            interline_equation_blocks, page_w, page_h)
+    else:
+        all_bboxes, all_discarded_blocks = ocr_prepare_bboxes_for_layout_split_v2(
+            img_blocks, table_blocks, discarded_blocks, text_blocks, title_blocks,
+            interline_equations, page_w, page_h)
+
+    '''先处理不需要排版的discarded_blocks'''
+    discarded_block_with_spans, spans = fill_spans_in_blocks(all_discarded_blocks, spans, 0.4)
+    fix_discarded_blocks = fix_discarded_block(discarded_block_with_spans)
+
+    '''如果当前页面没有bbox则跳过'''
+    if len(all_bboxes) == 0:
+        logger.warning(f"skip this page, not found useful bbox, page_id: {page_id}")
+        return ocr_construct_page_component_v2([], [], page_id, page_w, page_h, [],
+                                               [], [], interline_equations, fix_discarded_blocks,
+                                               need_drop, drop_reason)
+
+    '''将span填入blocks中'''
+    block_with_spans, spans = fill_spans_in_blocks(all_bboxes, spans, 0.3)
+
+    '''对block进行fix操作'''
+    fix_blocks = fix_block_spans(block_with_spans, img_blocks, table_blocks)
+
+    '''获取所有line并计算正文line的高度'''
+    line_height = get_line_height(fix_blocks)
+
+    '''获取所有line并对line排序'''
+    sorted_bboxes = sort_lines_by_model(fix_blocks, page_w, page_h, line_height)
+
+    '''根据line的中位数算block的序列关系'''
+    fix_blocks = cal_block_index(fix_blocks, sorted_bboxes)
+
+    '''重排block'''
+    sorted_blocks = sorted(fix_blocks, key=lambda b: b['index'])
+
+    '''获取QA需要外置的list'''
+    images, tables, interline_equations = get_qa_need_list_v2(sorted_blocks)
+
+    '''构造pdf_info_dict'''
+    page_info = ocr_construct_page_component_v2(sorted_blocks, [], page_id, page_w, page_h, [],
+                                                images, tables, interline_equations, fix_discarded_blocks,
+                                                need_drop, drop_reason)
+    return page_info
+
+
+def pdf_parse_union(pdf_bytes,
+                    model_list,
+                    imageWriter,
+                    parse_mode,
+                    start_page_id=0,
+                    end_page_id=None,
+                    debug_mode=False,
+                    ):
+    pdf_bytes_md5 = compute_md5(pdf_bytes)
+    pdf_docs = fitz.open("pdf", pdf_bytes)
+
+    '''初始化空的pdf_info_dict'''
+    pdf_info_dict = {}
+
+    '''用model_list和docs对象初始化magic_model'''
+    magic_model = MagicModel(model_list, pdf_docs)
+
+    '''根据输入的起始范围解析pdf'''
+    # end_page_id = end_page_id if end_page_id else len(pdf_docs) - 1
+    end_page_id = end_page_id if end_page_id is not None and end_page_id >= 0 else len(pdf_docs) - 1
+
+    if end_page_id > len(pdf_docs) - 1:
+        logger.warning("end_page_id is out of range, use pdf_docs length")
+        end_page_id = len(pdf_docs) - 1
+
+    '''初始化启动时间'''
+    start_time = time.time()
+
+    for page_id, page in enumerate(pdf_docs):
+        '''debug时输出每页解析的耗时'''
+        if debug_mode:
+            time_now = time.time()
+            logger.info(
+                f"page_id: {page_id}, last_page_cost_time: {get_delta_time(start_time)}"
+            )
+            start_time = time_now
+
+        '''解析pdf中的每一页'''
+        if start_page_id <= page_id <= end_page_id:
+            page_info = parse_page_core(pdf_docs, magic_model, page_id, pdf_bytes_md5, imageWriter, parse_mode)
+        else:
+            page_w = page.rect.width
+            page_h = page.rect.height
+            page_info = ocr_construct_page_component_v2([], [], page_id, page_w, page_h, [],
+                                                [], [], [], [],
+                                                True, "skip page")
+        pdf_info_dict[f"page_{page_id}"] = page_info
+
+    """分段"""
+    para_split(pdf_info_dict, debug_mode=debug_mode)
+
+    """dict转list"""
+    pdf_info_list = dict_to_list(pdf_info_dict)
+    new_pdf_info_dict = {
+        "pdf_info": pdf_info_list,
+    }
+
+    clean_memory()
+
+    return new_pdf_info_dict
+
+
+if __name__ == '__main__':
+    pass

+ 8 - 3
magic_pdf/pipe/AbsPipe.py

@@ -17,7 +17,7 @@ class AbsPipe(ABC):
     PIP_TXT = "txt"
 
     def __init__(self, pdf_bytes: bytes, model_list: list, image_writer: AbsReaderWriter, is_debug: bool = False,
-                 start_page_id=0, end_page_id=None):
+                 start_page_id=0, end_page_id=None, lang=None):
         self.pdf_bytes = pdf_bytes
         self.model_list = model_list
         self.image_writer = image_writer
@@ -25,6 +25,7 @@ class AbsPipe(ABC):
         self.is_debug = is_debug
         self.start_page_id = start_page_id
         self.end_page_id = end_page_id
+        self.lang = lang
     
     def get_compress_pdf_mid_data(self):
         return JsonCompressor.compress_json(self.pdf_mid_data)
@@ -94,7 +95,9 @@ class AbsPipe(ABC):
         """
         pdf_mid_data = JsonCompressor.decompress_json(compressed_pdf_mid_data)
         pdf_info_list = pdf_mid_data["pdf_info"]
-        content_list = union_make(pdf_info_list, MakeMode.STANDARD_FORMAT, drop_mode, img_buket_path)
+        parse_type = pdf_mid_data["_parse_type"]
+        lang = pdf_mid_data.get("_lang", None)
+        content_list = union_make(pdf_info_list, MakeMode.STANDARD_FORMAT, drop_mode, img_buket_path, parse_type, lang)
         return content_list
 
     @staticmethod
@@ -104,7 +107,9 @@ class AbsPipe(ABC):
         """
         pdf_mid_data = JsonCompressor.decompress_json(compressed_pdf_mid_data)
         pdf_info_list = pdf_mid_data["pdf_info"]
-        md_content = union_make(pdf_info_list, md_make_mode, drop_mode, img_buket_path)
+        parse_type = pdf_mid_data["_parse_type"]
+        lang = pdf_mid_data.get("_lang", None)
+        md_content = union_make(pdf_info_list, md_make_mode, drop_mode, img_buket_path, parse_type, lang)
         return md_content
 
 

+ 6 - 4
magic_pdf/pipe/OCRPipe.py

@@ -10,19 +10,21 @@ from magic_pdf.user_api import parse_ocr_pdf
 class OCRPipe(AbsPipe):
 
     def __init__(self, pdf_bytes: bytes, model_list: list, image_writer: AbsReaderWriter, is_debug: bool = False,
-                 start_page_id=0, end_page_id=None):
-        super().__init__(pdf_bytes, model_list, image_writer, is_debug, start_page_id, end_page_id)
+                 start_page_id=0, end_page_id=None, lang=None):
+        super().__init__(pdf_bytes, model_list, image_writer, is_debug, start_page_id, end_page_id, lang)
 
     def pipe_classify(self):
         pass
 
     def pipe_analyze(self):
         self.model_list = doc_analyze(self.pdf_bytes, ocr=True,
-                                      start_page_id=self.start_page_id, end_page_id=self.end_page_id)
+                                      start_page_id=self.start_page_id, end_page_id=self.end_page_id,
+                                      lang=self.lang)
 
     def pipe_parse(self):
         self.pdf_mid_data = parse_ocr_pdf(self.pdf_bytes, self.model_list, self.image_writer, is_debug=self.is_debug,
-                                          start_page_id=self.start_page_id, end_page_id=self.end_page_id)
+                                          start_page_id=self.start_page_id, end_page_id=self.end_page_id,
+                                          lang=self.lang)
 
     def pipe_mk_uni_format(self, img_parent_path: str, drop_mode=DropMode.WHOLE_PDF):
         result = super().pipe_mk_uni_format(img_parent_path, drop_mode)

+ 6 - 4
magic_pdf/pipe/TXTPipe.py

@@ -11,19 +11,21 @@ from magic_pdf.user_api import parse_txt_pdf
 class TXTPipe(AbsPipe):
 
     def __init__(self, pdf_bytes: bytes, model_list: list, image_writer: AbsReaderWriter, is_debug: bool = False,
-                 start_page_id=0, end_page_id=None):
-        super().__init__(pdf_bytes, model_list, image_writer, is_debug, start_page_id, end_page_id)
+                 start_page_id=0, end_page_id=None, lang=None):
+        super().__init__(pdf_bytes, model_list, image_writer, is_debug, start_page_id, end_page_id, lang)
 
     def pipe_classify(self):
         pass
 
     def pipe_analyze(self):
         self.model_list = doc_analyze(self.pdf_bytes, ocr=False,
-                                      start_page_id=self.start_page_id, end_page_id=self.end_page_id)
+                                      start_page_id=self.start_page_id, end_page_id=self.end_page_id,
+                                      lang=self.lang)
 
     def pipe_parse(self):
         self.pdf_mid_data = parse_txt_pdf(self.pdf_bytes, self.model_list, self.image_writer, is_debug=self.is_debug,
-                                          start_page_id=self.start_page_id, end_page_id=self.end_page_id)
+                                          start_page_id=self.start_page_id, end_page_id=self.end_page_id,
+                                          lang=self.lang)
 
     def pipe_mk_uni_format(self, img_parent_path: str, drop_mode=DropMode.WHOLE_PDF):
         result = super().pipe_mk_uni_format(img_parent_path, drop_mode)

+ 11 - 7
magic_pdf/pipe/UNIPipe.py

@@ -14,9 +14,9 @@ from magic_pdf.user_api import parse_union_pdf, parse_ocr_pdf
 class UNIPipe(AbsPipe):
 
     def __init__(self, pdf_bytes: bytes, jso_useful_key: dict, image_writer: AbsReaderWriter, is_debug: bool = False,
-                 start_page_id=0, end_page_id=None):
+                 start_page_id=0, end_page_id=None, lang=None):
         self.pdf_type = jso_useful_key["_pdf_type"]
-        super().__init__(pdf_bytes, jso_useful_key["model_list"], image_writer, is_debug, start_page_id, end_page_id)
+        super().__init__(pdf_bytes, jso_useful_key["model_list"], image_writer, is_debug, start_page_id, end_page_id, lang)
         if len(self.model_list) == 0:
             self.input_model_is_empty = True
         else:
@@ -28,22 +28,26 @@ class UNIPipe(AbsPipe):
     def pipe_analyze(self):
         if self.pdf_type == self.PIP_TXT:
             self.model_list = doc_analyze(self.pdf_bytes, ocr=False,
-                                          start_page_id=self.start_page_id, end_page_id=self.end_page_id)
+                                          start_page_id=self.start_page_id, end_page_id=self.end_page_id,
+                                          lang=self.lang)
         elif self.pdf_type == self.PIP_OCR:
             self.model_list = doc_analyze(self.pdf_bytes, ocr=True,
-                                          start_page_id=self.start_page_id, end_page_id=self.end_page_id)
+                                          start_page_id=self.start_page_id, end_page_id=self.end_page_id,
+                                          lang=self.lang)
 
     def pipe_parse(self):
         if self.pdf_type == self.PIP_TXT:
             self.pdf_mid_data = parse_union_pdf(self.pdf_bytes, self.model_list, self.image_writer,
                                                 is_debug=self.is_debug, input_model_is_empty=self.input_model_is_empty,
-                                                start_page_id=self.start_page_id, end_page_id=self.end_page_id)
+                                                start_page_id=self.start_page_id, end_page_id=self.end_page_id,
+                                                lang=self.lang)
         elif self.pdf_type == self.PIP_OCR:
             self.pdf_mid_data = parse_ocr_pdf(self.pdf_bytes, self.model_list, self.image_writer,
                                               is_debug=self.is_debug,
-                                              start_page_id=self.start_page_id, end_page_id=self.end_page_id)
+                                              start_page_id=self.start_page_id, end_page_id=self.end_page_id,
+                                              lang=self.lang)
 
-    def pipe_mk_uni_format(self, img_parent_path: str, drop_mode=DropMode.WHOLE_PDF):
+    def pipe_mk_uni_format(self, img_parent_path: str, drop_mode=DropMode.NONE_WITH_REASON):
         result = super().pipe_mk_uni_format(img_parent_path, drop_mode)
         logger.info("uni_pipe mk content list finished")
         return result

+ 53 - 0
magic_pdf/pre_proc/ocr_detect_all_bboxes.py

@@ -60,6 +60,59 @@ def ocr_prepare_bboxes_for_layout_split(img_blocks, table_blocks, discarded_bloc
     return all_bboxes, all_discarded_blocks, drop_reasons
 
 
+def ocr_prepare_bboxes_for_layout_split_v2(img_blocks, table_blocks, discarded_blocks, text_blocks,
+                                        title_blocks, interline_equation_blocks, page_w, page_h):
+    all_bboxes = []
+    all_discarded_blocks = []
+    for image in img_blocks:
+        x0, y0, x1, y1 = image['bbox']
+        all_bboxes.append([x0, y0, x1, y1, None, None, None, BlockType.Image, None, None, None, None, image["score"]])
+
+    for table in table_blocks:
+        x0, y0, x1, y1 = table['bbox']
+        all_bboxes.append([x0, y0, x1, y1, None, None, None, BlockType.Table, None, None, None, None, table["score"]])
+
+    for text in text_blocks:
+        x0, y0, x1, y1 = text['bbox']
+        all_bboxes.append([x0, y0, x1, y1, None, None, None, BlockType.Text, None, None, None, None, text["score"]])
+
+    for title in title_blocks:
+        x0, y0, x1, y1 = title['bbox']
+        all_bboxes.append([x0, y0, x1, y1, None, None, None, BlockType.Title, None, None, None, None, title["score"]])
+
+    for interline_equation in interline_equation_blocks:
+        x0, y0, x1, y1 = interline_equation['bbox']
+        all_bboxes.append([x0, y0, x1, y1, None, None, None, BlockType.InterlineEquation, None, None, None, None, interline_equation["score"]])
+
+    '''block嵌套问题解决'''
+    '''文本框与标题框重叠,优先信任文本框'''
+    all_bboxes = fix_text_overlap_title_blocks(all_bboxes)
+    '''任何框体与舍弃框重叠,优先信任舍弃框'''
+    all_bboxes = remove_need_drop_blocks(all_bboxes, discarded_blocks)
+
+    # interline_equation 与title或text框冲突的情况,分两种情况处理
+    '''interline_equation框与文本类型框iou比较接近1的时候,信任行间公式框'''
+    all_bboxes = fix_interline_equation_overlap_text_blocks_with_hi_iou(all_bboxes)
+    '''interline_equation框被包含在文本类型框内,且interline_equation比文本区块小很多时信任文本框,这时需要舍弃公式框'''
+    # 通过后续大框套小框逻辑删除
+
+    '''discarded_blocks中只保留宽度超过1/3页面宽度的,高度超过10的,处于页面下半50%区域的(限定footnote)'''
+    for discarded in discarded_blocks:
+        x0, y0, x1, y1 = discarded['bbox']
+        all_discarded_blocks.append([x0, y0, x1, y1, None, None, None, BlockType.Discarded, None, None, None, None, discarded["score"]])
+        # 将footnote加入到all_bboxes中,用来计算layout
+        # if (x1 - x0) > (page_w / 3) and (y1 - y0) > 10 and y0 > (page_h / 2):
+        #     all_bboxes.append([x0, y0, x1, y1, None, None, None, BlockType.Footnote, None, None, None, None, discarded["score"]])
+
+    '''经过以上处理后,还存在大框套小框的情况,则删除小框'''
+    all_bboxes = remove_overlaps_min_blocks(all_bboxes)
+    all_discarded_blocks = remove_overlaps_min_blocks(all_discarded_blocks)
+    '''将剩余的bbox做分离处理,防止后面分layout时出错'''
+    all_bboxes, drop_reasons = remove_overlap_between_bbox_for_block(all_bboxes)
+
+    return all_bboxes, all_discarded_blocks
+
+
 def fix_interline_equation_overlap_text_blocks_with_hi_iou(all_bboxes):
     # 先提取所有text和interline block
     text_blocks = []

+ 1 - 2
magic_pdf/pre_proc/ocr_dict_merge.py

@@ -49,8 +49,7 @@ def merge_spans_to_line(spans):
                 continue
 
             # 如果当前的span与当前行的最后一个span在y轴上重叠,则添加到当前行
-            if __is_overlaps_y_exceeds_threshold(span['bbox'],
-                                                 current_line[-1]['bbox']):
+            if __is_overlaps_y_exceeds_threshold(span['bbox'], current_line[-1]['bbox'], 0.6):
                 current_line.append(span)
             else:
                 # 否则,开始新行

+ 7 - 7
magic_pdf/resources/model_config/UniMERNet/demo.yaml

@@ -2,13 +2,13 @@ model:
   arch: unimernet
   model_type: unimernet
   model_config:
-    model_name: ./models
-    max_seq_len: 1024
-    length_aware: False
+    model_name: ./models/unimernet_base
+    max_seq_len: 1536
+
   load_pretrained: True
-  pretrained: ./models/pytorch_model.bin
+  pretrained: './models/unimernet_base/pytorch_model.pth'
   tokenizer_config:
-    path: ./models
+    path: ./models/unimernet_base
 
 datasets:
   formula_rec_eval:
@@ -18,7 +18,7 @@ datasets:
         image_size:
           - 192
           - 672
-   
+
 run:
   runner: runner_iter
   task: unimernet_train
@@ -43,4 +43,4 @@ run:
   distributed_type: ddp  # or fsdp when train llm
 
   generate_cfg:
-    temperature: 0.0
+    temperature: 0.0

+ 1 - 1
magic_pdf/resources/model_config/model_configs.yaml

@@ -10,6 +10,6 @@ config:
 weights:
   layout: Layout/model_final.pth
   mfd: MFD/weights.pt
-  mfr: MFR/UniMERNet
+  mfr: MFR/unimernet_small
   struct_eqtable: TabRec/StructEqTable
   TableMaster: TabRec/TableMaster

+ 14 - 1
magic_pdf/tools/cli.py

@@ -45,6 +45,18 @@ without method specified, auto will be used by default.""",
     default='auto',
 )
 @click.option(
+    '-l',
+    '--lang',
+    'lang',
+    type=str,
+    help="""
+    Input the languages in the pdf (if known) to improve OCR accuracy.  Optional.
+    You should input "Abbreviation" with language form url:
+    https://paddlepaddle.github.io/PaddleOCR/en/ppocr/blog/multi_languages.html#5-support-languages-and-abbreviations
+    """,
+    default=None,
+)
+@click.option(
     '-d',
     '--debug',
     'debug_able',
@@ -68,7 +80,7 @@ without method specified, auto will be used by default.""",
     help='The ending page for PDF parsing, beginning from 0.',
     default=None,
 )
-def cli(path, output_dir, method, debug_able, start_page_id, end_page_id):
+def cli(path, output_dir, method, lang, debug_able, start_page_id, end_page_id):
     model_config.__use_inside_model__ = True
     model_config.__model_mode__ = 'full'
     os.makedirs(output_dir, exist_ok=True)
@@ -90,6 +102,7 @@ def cli(path, output_dir, method, debug_able, start_page_id, end_page_id):
                 debug_able,
                 start_page_id=start_page_id,
                 end_page_id=end_page_id,
+                lang=lang
             )
 
         except Exception as e:

+ 13 - 8
magic_pdf/tools/common.py

@@ -7,7 +7,7 @@ from loguru import logger
 
 import magic_pdf.model as model_config
 from magic_pdf.libs.draw_bbox import (draw_layout_bbox, draw_span_bbox,
-                                      drow_model_bbox)
+                                      draw_model_bbox, draw_line_sort_bbox)
 from magic_pdf.libs.MakeContentConfig import DropMode, MakeMode
 from magic_pdf.pipe.OCRPipe import OCRPipe
 from magic_pdf.pipe.TXTPipe import TXTPipe
@@ -39,16 +39,19 @@ def do_parse(
     f_dump_middle_json=True,
     f_dump_model_json=True,
     f_dump_orig_pdf=True,
-    f_dump_content_list=False,
+    f_dump_content_list=True,
     f_make_md_mode=MakeMode.MM_MD,
     f_draw_model_bbox=False,
+    f_draw_line_sort_bbox=False,
     start_page_id=0,
     end_page_id=None,
+    lang=None,
 ):
     if debug_able:
-        logger.warning("debug mode is on")
-        f_dump_content_list = True
+        logger.warning('debug mode is on')
+        # f_dump_content_list = True
         f_draw_model_bbox = True
+        f_draw_line_sort_bbox = True
 
     orig_model_list = copy.deepcopy(model_list)
     local_image_dir, local_md_dir = prepare_env(output_dir, pdf_file_name,
@@ -61,13 +64,13 @@ def do_parse(
     if parse_method == 'auto':
         jso_useful_key = {'_pdf_type': '', 'model_list': model_list}
         pipe = UNIPipe(pdf_bytes, jso_useful_key, image_writer, is_debug=True,
-                       start_page_id=start_page_id, end_page_id=end_page_id)
+                       start_page_id=start_page_id, end_page_id=end_page_id, lang=lang)
     elif parse_method == 'txt':
         pipe = TXTPipe(pdf_bytes, model_list, image_writer, is_debug=True,
-                       start_page_id=start_page_id, end_page_id=end_page_id)
+                       start_page_id=start_page_id, end_page_id=end_page_id, lang=lang)
     elif parse_method == 'ocr':
         pipe = OCRPipe(pdf_bytes, model_list, image_writer, is_debug=True,
-                       start_page_id=start_page_id, end_page_id=end_page_id)
+                       start_page_id=start_page_id, end_page_id=end_page_id, lang=lang)
     else:
         logger.error('unknown parse method')
         exit(1)
@@ -89,7 +92,9 @@ def do_parse(
     if f_draw_span_bbox:
         draw_span_bbox(pdf_info, pdf_bytes, local_md_dir, pdf_file_name)
     if f_draw_model_bbox:
-        drow_model_bbox(copy.deepcopy(orig_model_list), pdf_bytes, local_md_dir, pdf_file_name)
+        draw_model_bbox(copy.deepcopy(orig_model_list), pdf_bytes, local_md_dir, pdf_file_name)
+    if f_draw_line_sort_bbox:
+        draw_line_sort_bbox(pdf_info, pdf_bytes, local_md_dir, pdf_file_name)
 
     md_content = pipe.pipe_mk_markdown(image_dir,
                                        drop_mode=DropMode.NONE,

+ 16 - 5
magic_pdf/user_api.py

@@ -26,7 +26,7 @@ PARSE_TYPE_OCR = "ocr"
 
 
 def parse_txt_pdf(pdf_bytes: bytes, pdf_models: list, imageWriter: AbsReaderWriter, is_debug=False,
-                  start_page_id=0, end_page_id=None,
+                  start_page_id=0, end_page_id=None, lang=None,
                   *args, **kwargs):
     """
     解析文本类pdf
@@ -44,11 +44,14 @@ def parse_txt_pdf(pdf_bytes: bytes, pdf_models: list, imageWriter: AbsReaderWrit
 
     pdf_info_dict["_version_name"] = __version__
 
+    if lang is not None:
+        pdf_info_dict["_lang"] = lang
+
     return pdf_info_dict
 
 
 def parse_ocr_pdf(pdf_bytes: bytes, pdf_models: list, imageWriter: AbsReaderWriter, is_debug=False,
-                  start_page_id=0, end_page_id=None,
+                  start_page_id=0, end_page_id=None, lang=None,
                   *args, **kwargs):
     """
     解析ocr类pdf
@@ -66,12 +69,15 @@ def parse_ocr_pdf(pdf_bytes: bytes, pdf_models: list, imageWriter: AbsReaderWrit
 
     pdf_info_dict["_version_name"] = __version__
 
+    if lang is not None:
+        pdf_info_dict["_lang"] = lang
+
     return pdf_info_dict
 
 
 def parse_union_pdf(pdf_bytes: bytes, pdf_models: list, imageWriter: AbsReaderWriter, is_debug=False,
                     input_model_is_empty: bool = False,
-                    start_page_id=0, end_page_id=None,
+                    start_page_id=0, end_page_id=None, lang=None,
                     *args, **kwargs):
     """
     ocr和文本混合的pdf,全部解析出来
@@ -95,9 +101,11 @@ def parse_union_pdf(pdf_bytes: bytes, pdf_models: list, imageWriter: AbsReaderWr
     if pdf_info_dict is None or pdf_info_dict.get("_need_drop", False):
         logger.warning(f"parse_pdf_by_txt drop or error, switch to parse_pdf_by_ocr")
         if input_model_is_empty:
-            pdf_models = doc_analyze(pdf_bytes, ocr=True,
+            pdf_models = doc_analyze(pdf_bytes,
+                                     ocr=True,
                                      start_page_id=start_page_id,
-                                     end_page_id=end_page_id)
+                                     end_page_id=end_page_id,
+                                     lang=lang)
         pdf_info_dict = parse_pdf(parse_pdf_by_ocr)
         if pdf_info_dict is None:
             raise Exception("Both parse_pdf_by_txt and parse_pdf_by_ocr failed.")
@@ -108,4 +116,7 @@ def parse_union_pdf(pdf_bytes: bytes, pdf_models: list, imageWriter: AbsReaderWr
 
     pdf_info_dict["_version_name"] = __version__
 
+    if lang is not None:
+        pdf_info_dict["_lang"] = lang
+
     return pdf_info_dict

+ 15 - 2
docs/FAQ_en_us.md → old_docs/FAQ_en_us.md

@@ -11,7 +11,7 @@ pip install magic-pdf[full]
 
 ### 2. Encountering the error `pickle.UnpicklingError: invalid load key, 'v'.` during use
 
-This might be due to an incomplete download of the model file. You can try re-downloading the model file and then try again.  
+This might be due to an incomplete download of the model file. You can try re-downloading the model file and then try again.
 Reference: https://github.com/opendatalab/MinerU/issues/143
 
 ### 3. Where should the model files be downloaded and how should the `/models-dir` configuration be set?
@@ -24,7 +24,7 @@ The path for the model files is configured in "magic-pdf.json". just like:
 }
 ```
 
-This path is an absolute path, not a relative path. You can obtain the absolute path in the models directory using the "pwd" command.  
+This path is an absolute path, not a relative path. You can obtain the absolute path in the models directory using the "pwd" command.
 Reference: https://github.com/opendatalab/MinerU/issues/155#issuecomment-2230216874
 
 ### 4. Encountered the error `ImportError: libGL.so.1: cannot open shared object file: No such file or directory` in Ubuntu 22.04 on WSL2
@@ -38,9 +38,22 @@ sudo apt-get install libgl1-mesa-glx
 Reference: https://github.com/opendatalab/MinerU/issues/388
 
 ### 5. Encountered error `ModuleNotFoundError: No module named 'fairscale'`
+
 You need to uninstall the module and reinstall it:
+
 ```bash
 pip uninstall fairscale
 pip install fairscale
 ```
+
 Reference: https://github.com/opendatalab/MinerU/issues/411
+
+### 6. On some newer devices like the H100, the text parsed during OCR using CUDA acceleration is garbled.
+
+The compatibility of cuda11 with new graphics cards is poor, and the CUDA version used by Paddle needs to be upgraded.
+
+```bash
+pip install paddlepaddle-gpu==3.0.0b1 -i https://www.paddlepaddle.org.cn/packages/stable/cu123/
+```
+
+Reference: https://github.com/opendatalab/MinerU/issues/558

+ 21 - 3
docs/FAQ_zh_cn.md → old_docs/FAQ_zh_cn.md

@@ -1,9 +1,10 @@
 # 常见问题解答
 
-### 1.在较新版本的mac上使用命令安装pip install magic-pdf[full] zsh: no matches found: magic-pdf[full]
+### 1.在较新版本的mac上使用命令安装pip install magic-pdf\[full\] zsh: no matches found: magic-pdf\[full\]
 
 在 macOS 上,默认的 shell 从 Bash 切换到了 Z shell,而 Z shell 对于某些类型的字符串匹配有特殊的处理逻辑,这可能导致no matches found错误。
 可以通过在命令行禁用globbing特性,再尝试运行安装命令
+
 ```bash
 setopt no_nomatch
 pip install magic-pdf[full]
@@ -11,33 +12,50 @@ pip install magic-pdf[full]
 
 ### 2.使用过程中遇到_pickle.UnpicklingError: invalid load key, 'v'.错误
 
-可能是由于模型文件未下载完整导致,可尝试重新下载模型文件后再试  
+可能是由于模型文件未下载完整导致,可尝试重新下载模型文件后再试
 参考:https://github.com/opendatalab/MinerU/issues/143
 
 ### 3.模型文件应该下载到哪里/models-dir的配置应该怎么填
 
 模型文件的路径输入是在"magic-pdf.json"中通过
+
 ```json
 {
   "models-dir": "/tmp/models"
 }
 ```
+
 进行配置的。
-这个路径是绝对路径而不是相对路径,绝对路径的获取可在models目录中通过命令 "pwd" 获取。  
+这个路径是绝对路径而不是相对路径,绝对路径的获取可在models目录中通过命令 "pwd" 获取。
 参考:https://github.com/opendatalab/MinerU/issues/155#issuecomment-2230216874
 
 ### 4.在WSL2的Ubuntu22.04中遇到报错`ImportError: libGL.so.1: cannot open shared object file: No such file or directory`
 
 WSL2的Ubuntu22.04中缺少`libgl`库,可通过以下命令安装`libgl`库解决:
+
 ```bash
 sudo apt-get install libgl1-mesa-glx
 ```
+
 参考:https://github.com/opendatalab/MinerU/issues/388
 
 ### 5.遇到报错 `ModuleNotFoundError : Nomodulenamed 'fairscale'`
+
 需要卸载该模块并重新安装
+
 ```bash
 pip uninstall fairscale
 pip install fairscale
 ```
+
 参考:https://github.com/opendatalab/MinerU/issues/411
+
+### 6.在部分较新的设备如H100上,使用CUDA加速OCR时解析出的文字乱码。
+
+cuda11对新显卡的兼容性不好,需要升级paddle使用的cuda版本
+
+```bash
+pip install paddlepaddle-gpu==3.0.0b1 -i https://www.paddlepaddle.org.cn/packages/stable/cu123/
+```
+
+参考:https://github.com/opendatalab/MinerU/issues/558

+ 120 - 0
old_docs/README_Ubuntu_CUDA_Acceleration_en_US.md

@@ -0,0 +1,120 @@
+# Ubuntu 22.04 LTS
+
+### 1. Check if NVIDIA Drivers Are Installed
+
+```sh
+nvidia-smi
+```
+
+If you see information similar to the following, it means that the NVIDIA drivers are already installed, and you can skip Step 2.
+
+```plaintext
++---------------------------------------------------------------------------------------+
+| NVIDIA-SMI 537.34                 Driver Version: 537.34       CUDA Version: 12.2     |
+|-----------------------------------------+----------------------+----------------------+
+| GPU  Name                     TCC/WDDM  | Bus-Id        Disp.A | Volatile Uncorr. ECC |
+| Fan  Temp   Perf          Pwr:Usage/Cap |         Memory-Usage | GPU-Util  Compute M. |
+|                                         |                      |               MIG M. |
+|=========================================+======================+======================|
+|   0  NVIDIA GeForce RTX 3060 Ti   WDDM  | 00000000:01:00.0  On |                  N/A |
+|  0%   51C    P8              12W / 200W |   1489MiB /  8192MiB |      5%      Default |
+|                                         |                      |                  N/A |
++-----------------------------------------+----------------------+----------------------+
+```
+
+### 2. Install the Driver
+
+If no driver is installed, use the following command:
+
+```sh
+sudo apt-get update
+sudo apt-get install nvidia-driver-545
+```
+
+Install the proprietary driver and restart your computer after installation.
+
+```sh
+reboot
+```
+
+### 3. Install Anaconda
+
+If Anaconda is already installed, skip this step.
+
+```sh
+wget https://repo.anaconda.com/archive/Anaconda3-2024.06-1-Linux-x86_64.sh
+bash Anaconda3-2024.06-1-Linux-x86_64.sh
+```
+
+In the final step, enter `yes`, close the terminal, and reopen it.
+
+### 4. Create an Environment Using Conda
+
+Specify Python version 3.10.
+
+```sh
+conda create -n MinerU python=3.10
+conda activate MinerU
+```
+
+### 5. Install Applications
+
+```sh
+pip install -U magic-pdf[full] --extra-index-url https://wheels.myhloli.com
+```
+
+❗ After installation, make sure to check the version of `magic-pdf` using the following command:
+
+```sh
+magic-pdf --version
+```
+
+If the version number is less than 0.7.0, please report the issue.
+
+### 6. Download Models
+
+Refer to detailed instructions on [how to download model files](how_to_download_models_en.md).
+
+## 7. Understand the Location of the Configuration File
+
+After completing the [6. Download Models](#6-download-models) step, the script will automatically generate a `magic-pdf.json` file in the user directory and configure the default model path.
+You can find the `magic-pdf.json` file in your user directory.
+
+> The user directory for Linux is "/home/username".
+
+### 8. First Run
+
+Download a sample file from the repository and test it.
+
+```sh
+wget https://github.com/opendatalab/MinerU/raw/master/demo/small_ocr.pdf
+magic-pdf -p small_ocr.pdf
+```
+
+### 9. Test CUDA Acceleration
+
+If your graphics card has at least **8GB** of VRAM, follow these steps to test CUDA acceleration:
+
+> ❗ Due to the extremely limited nature of 8GB VRAM for running this application, you need to close all other programs using VRAM to ensure that 8GB of VRAM is available when running this application.
+
+1. Modify the value of `"device-mode"` in the `magic-pdf.json` configuration file located in your home directory.
+   ```json
+   {
+     "device-mode": "cuda"
+   }
+   ```
+2. Test CUDA acceleration with the following command:
+   ```sh
+   magic-pdf -p small_ocr.pdf
+   ```
+
+### 10. Enable CUDA Acceleration for OCR
+
+1. Download `paddlepaddle-gpu`. Installation will automatically enable OCR acceleration.
+   ```sh
+   python -m pip install paddlepaddle-gpu==3.0.0b1 -i https://www.paddlepaddle.org.cn/packages/stable/cu118/
+   ```
+2. Test OCR acceleration with the following command:
+   ```sh
+   magic-pdf -p small_ocr.pdf
+   ```

+ 45 - 28
docs/README_Ubuntu_CUDA_Acceleration_zh_CN.md → old_docs/README_Ubuntu_CUDA_Acceleration_zh_CN.md

@@ -1,10 +1,13 @@
 # Ubuntu 22.04 LTS
 
 ## 1. 检测是否已安装nvidia驱动
+
 ```bash
-nvidia-smi 
+nvidia-smi
 ```
+
 如果看到类似如下的信息,说明已经安装了nvidia驱动,可以跳过步骤2
+
 ```
 +---------------------------------------------------------------------------------------+
 | NVIDIA-SMI 537.34                 Driver Version: 537.34       CUDA Version: 12.2     |
@@ -18,96 +21,110 @@ nvidia-smi
 |                                         |                      |                  N/A |
 +-----------------------------------------+----------------------+----------------------+
 ```
+
 ## 2. 安装驱动
+
 如没有驱动,则通过如下命令
+
 ```bash
 sudo apt-get update
 sudo apt-get install nvidia-driver-545
 ```
+
 安装专有驱动,安装完成后,重启电脑
+
 ```bash
 reboot
 ```
+
 ## 3. 安装anacoda
+
 如果已安装conda,可以跳过本步骤
+
 ```bash
 wget -U NoSuchBrowser/1.0 https://mirrors.tuna.tsinghua.edu.cn/anaconda/archive/Anaconda3-2024.06-1-Linux-x86_64.sh
 bash Anaconda3-2024.06-1-Linux-x86_64.sh
 ```
+
 最后一步输入yes,关闭终端重新打开
+
 ## 4. 使用conda 创建环境
+
 需指定python版本为3.10
+
 ```bash
 conda create -n MinerU python=3.10
 conda activate MinerU
 ```
+
 ## 5. 安装应用
+
 ```bash
 pip install -U magic-pdf[full] --extra-index-url https://wheels.myhloli.com -i https://pypi.tuna.tsinghua.edu.cn/simple
 ```
+
 > ❗️下载完成后,务必通过以下命令确认magic-pdf的版本是否正确
-> 
+>
 > ```bash
 > magic-pdf --version
->```
+> ```
+>
 > 如果版本号小于0.7.0,请到issue中向我们反馈
 
 ## 6. 下载模型
-详细参考 [如何下载模型文件](how_to_download_models_zh_cn.md)  
-下载后请将models目录移动到空间较大的ssd磁盘目录  
-> ❗️模型下载后请务必检查模型文件是否下载完整
-> 
-> 请检查目录下的模型文件大小与网页上描述是否一致,如果可以的话,最好通过sha256校验模型是否下载完整
-> 
-## 7. 第一次运行前的配置
-在仓库根目录可以获得 [magic-pdf.template.json](../magic-pdf.template.json) 配置模版文件
-> ❗️务必执行以下命令将配置文件拷贝到【用户目录】下,否则程序将无法运行
->  
-> linux用户目录为 "/home/用户名"
-```bash
-wget https://gitee.com/myhloli/MinerU/raw/master/magic-pdf.template.json
-cp magic-pdf.template.json ~/magic-pdf.json
-```
 
-在用户目录中找到magic-pdf.json文件并配置"models-dir"为[6. 下载模型](#6-下载模型)中下载的模型权重文件所在目录
-> ❗️务必正确配置模型权重文件所在目录的【绝对路径】,否则会因为找不到模型文件而导致程序无法运行
-> 
-```json
-{
-  "models-dir": "/tmp/models"
-}
-```
+详细参考 [如何下载模型文件](how_to_download_models_zh_cn.md)
+
+## 7. 了解配置文件存放的位置
+
+完成[6.下载模型](#6-下载模型)步骤后,脚本会自动生成用户目录下的magic-pdf.json文件,并自动配置默认模型路径。
+您可在【用户目录】下找到magic-pdf.json文件。
+
+> linux用户目录为 "/home/用户名"
 
 ## 8. 第一次运行
+
 从仓库中下载样本文件,并测试
+
 ```bash
 wget https://gitee.com/myhloli/MinerU/raw/master/demo/small_ocr.pdf
 magic-pdf -p small_ocr.pdf
 ```
+
 ## 9. 测试CUDA加速
-如果您的显卡显存大于等于8G,可以进行以下流程,测试CUDA解析加速效果
+
+如果您的显卡显存大于等于 **8GB** ,可以进行以下流程,测试CUDA解析加速效果
+
+> ❗️因8GB显存运行本应用非常极限,需要关闭所有其他正在使用显存的程序以确保本应用运行时有足额8GB显存可用。
 
 **1.修改【用户目录】中配置文件magic-pdf.json中"device-mode"的值**
+
 ```json
 {
   "device-mode":"cuda"
 }
 ```
+
 **2.运行以下命令测试cuda加速效果**
+
 ```bash
 magic-pdf -p small_ocr.pdf
 ```
+
 > 提示:CUDA加速是否生效可以根据log中输出的各个阶段cost耗时来简单判断,通常情况下,`layout detection cost` 和 `mfr time` 应提速10倍以上。
 
 ## 10. 为ocr开启cuda加速
-> ❗️以下操作需显卡显存大于等于16G才可进行,否则会因为显存不足导致程序崩溃或运行速度下降
 
 **1.下载paddlepaddle-gpu, 安装完成后会自动开启ocr加速**
+
 ```bash
 python -m pip install paddlepaddle-gpu==3.0.0b1 -i https://www.paddlepaddle.org.cn/packages/stable/cu118/
 ```
+
 **2.运行以下命令测试ocr加速效果**
+
 ```bash
 magic-pdf -p small_ocr.pdf
 ```
+
 > 提示:CUDA加速是否生效可以根据log中输出的各个阶段cost耗时来简单判断,通常情况下,`ocr cost`应提速10倍以上。

+ 102 - 0
old_docs/README_Windows_CUDA_Acceleration_en_US.md

@@ -0,0 +1,102 @@
+# Windows 10/11
+
+### 1. Install CUDA and cuDNN
+
+Required versions: CUDA 11.8 + cuDNN 8.7.0
+
+- CUDA 11.8: https://developer.nvidia.com/cuda-11-8-0-download-archive
+- cuDNN v8.7.0 (November 28th, 2022), for CUDA 11.x: https://developer.nvidia.com/rdp/cudnn-archive
+
+### 2. Install Anaconda
+
+If Anaconda is already installed, you can skip this step.
+
+Download link: https://repo.anaconda.com/archive/Anaconda3-2024.06-1-Windows-x86_64.exe
+
+### 3. Create an Environment Using Conda
+
+Python version must be 3.10.
+
+```
+conda create -n MinerU python=3.10
+conda activate MinerU
+```
+
+### 4. Install Applications
+
+```
+pip install -U magic-pdf[full] --extra-index-url https://wheels.myhloli.com
+```
+
+> ❗️After installation, verify the version of `magic-pdf`:
+>
+> ```bash
+> magic-pdf --version
+> ```
+>
+> If the version number is less than 0.7.0, please report it in the issues section.
+
+### 5. Download Models
+
+Refer to detailed instructions on [how to download model files](how_to_download_models_en.md).
+
+### 6. Understand the Location of the Configuration File
+
+After completing the [5. Download Models](#5-download-models) step, the script will automatically generate a `magic-pdf.json` file in the user directory and configure the default model path.
+You can find the `magic-pdf.json` file in your 【user directory】 .
+
+> The user directory for Windows is "C:/Users/username".
+
+### 7. First Run
+
+Download a sample file from the repository and test it.
+
+```powershell
+  wget https://github.com/opendatalab/MinerU/raw/master/demo/small_ocr.pdf -O small_ocr.pdf
+  magic-pdf -p small_ocr.pdf
+```
+
+### 8. Test CUDA Acceleration
+
+If your graphics card has at least 8GB of VRAM, follow these steps to test CUDA-accelerated parsing performance.
+
+> ❗ Due to the extremely limited nature of 8GB VRAM for running this application, you need to close all other programs using VRAM to ensure that 8GB of VRAM is available when running this application.
+
+1. **Overwrite the installation of torch and torchvision** supporting CUDA.
+
+   ```
+   pip install --force-reinstall torch==2.3.1 torchvision==0.18.1 --index-url https://download.pytorch.org/whl/cu118
+   ```
+
+   > ❗️Ensure the following versions are specified in the command:
+   >
+   > ```
+   > torch==2.3.1 torchvision==0.18.1
+   > ```
+   >
+   > These are the highest versions we support. Installing higher versions without specifying them will cause the program to fail.
+
+2. **Modify the value of `"device-mode"`** in the `magic-pdf.json` configuration file located in your user directory.
+
+   ```json
+   {
+     "device-mode": "cuda"
+   }
+   ```
+
+3. **Run the following command to test CUDA acceleration**:
+
+   ```
+   magic-pdf -p small_ocr.pdf
+   ```
+
+### 9. Enable CUDA Acceleration for OCR
+
+1. **Download paddlepaddle-gpu**, which will automatically enable OCR acceleration upon installation.
+   ```
+   pip install paddlepaddle-gpu==2.6.1
+   ```
+2. **Run the following command to test OCR acceleration**:
+   ```
+   magic-pdf -p small_ocr.pdf
+   ```

+ 40 - 35
docs/README_Windows_CUDA_Acceleration_zh_CN.md → old_docs/README_Windows_CUDA_Acceleration_zh_CN.md

@@ -3,103 +3,108 @@
 ## 1. 安装cuda和cuDNN
 
 需要安装的版本 CUDA 11.8 + cuDNN 8.7.0
+
 - CUDA 11.8 https://developer.nvidia.com/cuda-11-8-0-download-archive
 - cuDNN v8.7.0 (November 28th, 2022), for CUDA 11.x https://developer.nvidia.com/rdp/cudnn-archive
 
 ## 2. 安装anaconda
+
 如果已安装conda,可以跳过本步骤
 
 下载链接:
 https://mirrors.tuna.tsinghua.edu.cn/anaconda/archive/Anaconda3-2024.06-1-Windows-x86_64.exe
 
 ## 3. 使用conda 创建环境
+
 需指定python版本为3.10
+
 ```bash
 conda create -n MinerU python=3.10
 conda activate MinerU
 ```
+
 ## 4. 安装应用
+
 ```bash
 pip install -U magic-pdf[full] --extra-index-url https://wheels.myhloli.com -i https://pypi.tuna.tsinghua.edu.cn/simple
 ```
+
 > ❗️下载完成后,务必通过以下命令确认magic-pdf的版本是否正确
-> 
+>
 > ```bash
 > magic-pdf --version
->```
+> ```
+>
 > 如果版本号小于0.7.0,请到issue中向我们反馈
 
 ## 5. 下载模型
-详细参考 [如何下载模型文件](how_to_download_models_zh_cn.md)  
-下载后请将models目录移动到空间较大的ssd磁盘目录  
-> ❗️模型下载后请务必检查模型文件是否下载完整
-> 
-> 请检查目录下的模型文件大小与网页上描述是否一致,如果可以的话,最好通过sha256校验模型是否下载完整
-
-## 6. 第一次运行前的配置
-在仓库根目录可以获得 [magic-pdf.template.json](../magic-pdf.template.json) 配置模版文件
-> ❗️务必执行以下命令将配置文件拷贝到【用户目录】下,否则程序将无法运行
->  
-> windows用户目录为 "C:\Users\用户名"
-```powershell
-(New-Object System.Net.WebClient).DownloadFile('https://gitee.com/myhloli/MinerU/raw/master/magic-pdf.template.json', 'magic-pdf.template.json')
-cp magic-pdf.template.json ~/magic-pdf.json
-```
 
-在用户目录中找到magic-pdf.json文件并配置"models-dir"为[5. 下载模型](#5-下载模型)中下载的模型权重文件所在目录
-> ❗️务必正确配置模型权重文件所在目录的【绝对路径】,否则会因为找不到模型文件而导致程序无法运行
-> 
-> windows系统中此路径应包含盘符,且需把路径中所有的`"\"`替换为`"/"`,否则会因为转义原因导致json文件语法错误。
-> 
-> 例如:模型放在D盘根目录的models目录,则model-dir的值应为"D:/models"
-```json
-{
-  "models-dir": "/tmp/models"
-}
-```
+详细参考 [如何下载模型文件](how_to_download_models_zh_cn.md)
+
+## 6. 了解配置文件存放的位置
+
+完成[5.下载模型](#5-下载模型)步骤后,脚本会自动生成用户目录下的magic-pdf.json文件,并自动配置默认模型路径。
+您可在【用户目录】下找到magic-pdf.json文件。
+
+> windows用户目录为 "C:/Users/用户名"
 
 ## 7. 第一次运行
+
 从仓库中下载样本文件,并测试
+
 ```powershell
-(New-Object System.Net.WebClient).DownloadFile('https://gitee.com/myhloli/MinerU/raw/master/demo/small_ocr.pdf', 'small_ocr.pdf')
-magic-pdf -p small_ocr.pdf
+ wget https://github.com/opendatalab/MinerU/raw/master/demo/small_ocr.pdf -O small_ocr.pdf
+ magic-pdf -p small_ocr.pdf
 ```
 
 ## 8. 测试CUDA加速
-如果您的显卡显存大于等于8G,可以进行以下流程,测试CUDA解析加速效果
+
+如果您的显卡显存大于等于 **8GB** ,可以进行以下流程,测试CUDA解析加速效果
+
+> ❗️因8GB显存运行本应用非常极限,需要关闭所有其他正在使用显存的程序以确保本应用运行时有足额8GB显存可用。
 
 **1.覆盖安装支持cuda的torch和torchvision**
+
 ```bash
 pip install --force-reinstall torch==2.3.1 torchvision==0.18.1 --index-url https://download.pytorch.org/whl/cu118
 ```
+
 > ❗️务必在命令中指定以下版本
+>
 > ```bash
-> torch==2.3.1 torchvision==0.18.1 
+> torch==2.3.1 torchvision==0.18.1
 > ```
+>
 > 这是我们支持的最高版本,如果不指定版本会自动安装更高版本导致程序无法运行
 
 **2.修改【用户目录】中配置文件magic-pdf.json中"device-mode"的值**
+
 ```json
 {
   "device-mode":"cuda"
 }
 ```
+
 **3.运行以下命令测试cuda加速效果**
+
 ```bash
 magic-pdf -p small_ocr.pdf
 ```
-> 提示:CUDA加速是否生效可以根据log中输出的各个阶段cost耗时来简单判断,通常情况下,`layout detection cost` 和 `mfr time` 应提速10倍以上。
+
+> 提示:CUDA加速是否生效可以根据log中输出的各个阶段的耗时来简单判断,通常情况下,`layout detection time` 和 `mfr time` 应提速10倍以上。
 
 ## 9. 为ocr开启cuda加速
-> ❗️以下操作需显卡显存大于等于16G才可进行,否则会因为显存不足导致程序崩溃或运行速度下降
 
 **1.下载paddlepaddle-gpu, 安装完成后会自动开启ocr加速**
+
 ```bash
 pip install paddlepaddle-gpu==2.6.1
 ```
+
 **2.运行以下命令测试ocr加速效果**
+
 ```bash
 magic-pdf -p small_ocr.pdf
 ```
-> 提示:CUDA加速是否生效可以根据log中输出的各个阶段cost耗时来简单判断,通常情况下,`ocr cost`应提速10倍以上。
 
+> 提示:CUDA加速是否生效可以根据log中输出的各个阶段cost耗时来简单判断,通常情况下,`ocr time`应提速10倍以上。

+ 0 - 0
docs/chemical_knowledge_introduction/introduction.pdf → old_docs/chemical_knowledge_introduction/introduction.pdf


+ 0 - 0
docs/chemical_knowledge_introduction/introduction.xmind → old_docs/chemical_knowledge_introduction/introduction.xmind


+ 46 - 0
old_docs/download_models.py

@@ -0,0 +1,46 @@
+import json
+import os
+
+import requests
+from modelscope import snapshot_download
+
+
+def download_and_modify_json(url, local_filename, modifications):
+    if os.path.exists(local_filename):
+        data = json.load(open(local_filename))
+    else:
+        # 下载JSON文件
+        response = requests.get(url)
+        response.raise_for_status()  # 检查请求是否成功
+
+        # 解析JSON内容
+        data = response.json()
+
+    # 修改内容
+    for key, value in modifications.items():
+        data[key] = value
+
+    # 保存修改后的内容
+    with open(local_filename, 'w', encoding='utf-8') as f:
+        json.dump(data, f, ensure_ascii=False, indent=4)
+
+
+if __name__ == '__main__':
+    model_dir = snapshot_download('opendatalab/PDF-Extract-Kit')
+    layoutreader_model_dir = snapshot_download('ppaanngggg/layoutreader')
+    model_dir = model_dir + '/models'
+    print(f'model_dir is: {model_dir}')
+    print(f'layoutreader_model_dir is: {layoutreader_model_dir}')
+
+    json_url = 'https://gitee.com/myhloli/MinerU/raw/master/magic-pdf.template.json'
+    config_file_name = 'magic-pdf.json'
+    home_dir = os.path.expanduser('~')
+    config_file = os.path.join(home_dir, config_file_name)
+
+    json_mods = {
+        'models-dir': model_dir,
+        'layoutreader-model-dir': layoutreader_model_dir,
+    }
+
+    download_and_modify_json(json_url, config_file, json_mods)
+    print(f'The configuration file has been configured successfully, the path is: {config_file}')

+ 46 - 0
old_docs/download_models_hf.py

@@ -0,0 +1,46 @@
+import json
+import os
+
+import requests
+from huggingface_hub import snapshot_download
+
+
+def download_and_modify_json(url, local_filename, modifications):
+    if os.path.exists(local_filename):
+        data = json.load(open(local_filename))
+    else:
+        # 下载JSON文件
+        response = requests.get(url)
+        response.raise_for_status()  # 检查请求是否成功
+
+        # 解析JSON内容
+        data = response.json()
+
+    # 修改内容
+    for key, value in modifications.items():
+        data[key] = value
+
+    # 保存修改后的内容
+    with open(local_filename, 'w', encoding='utf-8') as f:
+        json.dump(data, f, ensure_ascii=False, indent=4)
+
+
+if __name__ == '__main__':
+    model_dir = snapshot_download('opendatalab/PDF-Extract-Kit')
+    layoutreader_model_dir = snapshot_download('hantian/layoutreader')
+    model_dir = model_dir + '/models'
+    print(f'model_dir is: {model_dir}')
+    print(f'layoutreader_model_dir is: {layoutreader_model_dir}')
+
+    json_url = 'https://github.com/opendatalab/MinerU/raw/master/magic-pdf.template.json'
+    config_file_name = 'magic-pdf.json'
+    home_dir = os.path.expanduser('~')
+    config_file = os.path.join(home_dir, config_file_name)
+
+    json_mods = {
+        'models-dir': model_dir,
+        'layoutreader-model-dir': layoutreader_model_dir,
+    }
+
+    download_and_modify_json(json_url, config_file, json_mods)
+    print(f'The configuration file has been configured successfully, the path is: {config_file}')

+ 29 - 0
old_docs/how_to_download_models_en.md

@@ -0,0 +1,29 @@
+Model downloads are divided into initial downloads and updates to the model directory. Please refer to the corresponding documentation for instructions on how to proceed.
+
+# Initial download of model files
+
+### 1. Download the Model from Hugging Face
+
+Use a Python Script to Download Model Files from Hugging Face
+
+```bash
+pip install huggingface_hub
+wget https://github.com/opendatalab/MinerU/raw/master/docs/download_models_hf.py -O download_models_hf.py
+python download_models_hf.py
+```
+
+The Python script will automatically download the model files and configure the model directory in the configuration file.
+
+The configuration file can be found in the user directory, with the filename `magic-pdf.json`.
+
+# How to update models previously downloaded
+
+## 1. Models downloaded via Git LFS
+
+> Due to feedback from some users that downloading model files using git lfs was incomplete or resulted in corrupted model files, this method is no longer recommended.
+
+If you previously downloaded model files via git lfs, you can navigate to the previous download directory and use the `git pull` command to update the model.
+
+## 2. Models downloaded via Hugging Face or Model Scope
+
+If you previously downloaded models via Hugging Face or Model Scope, you can rerun the Python script used for the initial download. This will automatically update the model directory to the latest version.

+ 48 - 0
old_docs/how_to_download_models_zh_cn.md

@@ -0,0 +1,48 @@
+模型下载分为首次下载和更新模型目录,请参考对应的文档内容进行操作
+
+# 首次下载模型文件
+
+模型文件可以从 Hugging Face 或 Model Scope 下载,由于网络原因,国内用户访问HF可能会失败,请使用 ModelScope。
+
+<details>
+  <summary>方法一:从 Hugging Face 下载模型</summary>
+  <p>使用python脚本 从Hugging Face下载模型文件</p>
+  <pre><code>pip install huggingface_hub
+wget https://gitee.com/myhloli/MinerU/raw/master/docs/download_models_hf.py -O download_models_hf.py
+python download_models_hf.py</code></pre>
+</details>
+
+## 方法二:从 ModelScope 下载模型
+
+### 使用python脚本 从ModelScope下载模型文件
+
+```bash
+pip install modelscope
+wget https://gitee.com/myhloli/MinerU/raw/master/docs/download_models.py -O download_models.py
+python download_models.py
+```
+
+python脚本会自动下载模型文件并配置好配置文件中的模型目录
+
+配置文件可以在用户目录中找到,文件名为`magic-pdf.json`
+
+> windows的用户目录为 "C:\\Users\\用户名", linux用户目录为 "/home/用户名", macOS用户目录为 "/Users/用户名"
+
+# 此前下载过模型,如何更新
+
+## 1. 通过git lfs下载过模型
+
+> 由于部分用户反馈通过git lfs下载模型文件遇到下载不全和模型文件损坏情况,现已不推荐使用该方式下载。
+
+如此前通过 git lfs 下载过模型文件,可以进入到之前的下载目录中,通过`git pull`命令更新模型。
+
+> 0.9.x及以后版本由于新增layout排序模型,且该模型和此前的模型不在同一仓库,不能通过`git pull`命令更新,需要单独下载。
+>
+> ```
+> from modelscope import snapshot_download
+> snapshot_download('ppaanngggg/layoutreader')
+> ```
+
+## 2. 通过 Hugging Face 或 Model Scope 下载过模型
+
+如此前通过 HuggingFace 或 Model Scope 下载过模型,可以重复执行此前的模型下载python脚本,将会自动将模型目录更新到最新版本。

+ 0 - 0
docs/images/MinerU-logo-hq.png → old_docs/images/MinerU-logo-hq.png


BIN
old_docs/images/MinerU-logo.png


+ 0 - 0
docs/images/datalab_logo.png → old_docs/images/datalab_logo.png


+ 0 - 0
docs/images/flowchart_en.png → old_docs/images/flowchart_en.png


+ 0 - 0
docs/images/flowchart_zh_cn.png → old_docs/images/flowchart_zh_cn.png


+ 0 - 0
docs/images/layout_example.png → old_docs/images/layout_example.png


+ 0 - 0
docs/images/poly.png → old_docs/images/poly.png


+ 0 - 0
docs/images/project_panorama_en.png → old_docs/images/project_panorama_en.png


+ 0 - 0
docs/images/project_panorama_zh_cn.png → old_docs/images/project_panorama_zh_cn.png


+ 0 - 0
docs/images/spans_example.png → old_docs/images/spans_example.png


BIN
old_docs/images/web_demo_1.png


+ 0 - 0
docs/output_file_en_us.md → old_docs/output_file_en_us.md


+ 0 - 0
docs/output_file_zh_cn.md → old_docs/output_file_zh_cn.md


+ 4 - 0
projects/README.md

@@ -3,4 +3,8 @@
 ## Project List
 
 - [llama_index_rag](./llama_index_rag/README.md): Build a lightweight RAG system based on llama_index
+- [gradio_app](./gradio_app/README.md): Build a web app based on gradio
+- [web_demo](./web_demo/README.md): MinerU online [demo](https://opendatalab.com/OpenSourceTools/Extractor/PDF/) localized deployment version
+- [web_api](./web_api/README.md): Web API Based on FastAPI
+
 

+ 4 - 0
projects/README_zh-CN.md

@@ -3,3 +3,7 @@
 ## 项目列表
 
 - [llama_index_rag](./llama_index_rag/README_zh-CN.md): 基于 llama_index 构建轻量级 RAG 系统
+- [gradio_app](./gradio_app/README_zh-CN.md): 基于 Gradio 的 Web 应用
+- [web_demo](./web_demo/README_zh-CN.md): MinerU在线[demo](https://opendatalab.com/OpenSourceTools/Extractor/PDF/)本地化部署版本
+- [web_api](./web_api/README.md): 基于 FastAPI 的 Web API
+

+ 24 - 0
projects/gradio_app/README.md

@@ -0,0 +1,24 @@
+## Installation
+
+MinerU(>=0.8.0)
+ > If you already have a functioning MinerU environment, you can skip this step.
+ > 
+[Deploy in CPU environment](https://github.com/opendatalab/MinerU?tab=readme-ov-file#quick-cpu-demo)
+
+[Deploy in GPU environment](https://github.com/opendatalab/MinerU?tab=readme-ov-file#using-gpu)
+
+Third-party Software
+
+```bash
+pip install gradio gradio-pdf
+```
+
+## Start Gradio App
+
+```bash
+python app.py
+```
+
+## Use Gradio App
+
+Access http://127.0.0.1:7860 in your web browser

+ 24 - 0
projects/gradio_app/README_zh-CN.md

@@ -0,0 +1,24 @@
+## 安装
+
+MinerU(>=0.8.0)
+ >如已有正常运行的MinerU环境则可以跳过此步骤
+> 
+[在CPU环境部署](https://github.com/opendatalab/MinerU/blob/master/README_zh-CN.md#%E4%BD%BF%E7%94%A8cpu%E5%BF%AB%E9%80%9F%E4%BD%93%E9%AA%8C)
+
+[在GPU环境部署](https://github.com/opendatalab/MinerU/blob/master/README_zh-CN.md#%E4%BD%BF%E7%94%A8gpu)
+
+第三方软件
+
+```bash
+pip install gradio gradio-pdf
+```
+
+## 启动gradio应用
+
+```bash
+python app.py
+```
+
+## 使用gradio应用
+
+在浏览器中访问 http://127.0.0.1:7860

+ 23 - 18
app.py → projects/gradio_app/app.py

@@ -14,8 +14,6 @@ from magic_pdf.rw.AbsReaderWriter import AbsReaderWriter
 from magic_pdf.rw.DiskReaderWriter import DiskReaderWriter
 from magic_pdf.tools.common import do_parse, prepare_env
 
-os.system("pip install gradio")
-os.system("pip install gradio-pdf")
 import gradio as gr
 from gradio_pdf import PDF
 
@@ -25,13 +23,16 @@ def read_fn(path):
     return disk_rw.read(os.path.basename(path), AbsReaderWriter.MODE_BIN)
 
 
-def parse_pdf(doc_path, output_dir, end_page_id):
+def parse_pdf(doc_path, output_dir, end_page_id, is_ocr):
     os.makedirs(output_dir, exist_ok=True)
 
     try:
         file_name = f"{str(Path(doc_path).stem)}_{time.time()}"
         pdf_data = read_fn(doc_path)
-        parse_method = "auto"
+        if is_ocr:
+            parse_method = "ocr"
+        else:
+            parse_method = "auto"
         local_image_dir, local_md_dir = prepare_env(output_dir, file_name, parse_method)
         do_parse(
             output_dir,
@@ -92,9 +93,9 @@ def replace_image_with_base64(markdown_text, image_dir_path):
     return re.sub(pattern, replace, markdown_text)
 
 
-def to_markdown(file_path, end_pages):
+def to_markdown(file_path, end_pages, is_ocr):
     # 获取识别的md文件以及压缩包文件路径
-    local_md_dir, file_name = parse_pdf(file_path, './output', end_pages - 1)
+    local_md_dir, file_name = parse_pdf(file_path, './output', end_pages - 1, is_ocr)
     archive_zip_path = os.path.join("./output", compute_sha256(local_md_dir) + ".zip")
     zip_archive_success = compress_directory_to_zip(local_md_dir, archive_zip_path)
     if zip_archive_success == 0:
@@ -111,14 +112,6 @@ def to_markdown(file_path, end_pages):
     return md_content, txt_content, archive_zip_path, new_pdf_path
 
 
-# def show_pdf(file_path):
-#     with open(file_path, "rb") as f:
-#         base64_pdf = base64.b64encode(f.read()).decode('utf-8')
-#     pdf_display = f'<embed src="data:application/pdf;base64,{base64_pdf}" ' \
-#                   f'width="100%" height="1000" type="application/pdf">'
-#     return pdf_display
-
-
 latex_delimiters = [{"left": "$$", "right": "$$", "display": True},
                     {"left": '$', "right": '$', "display": False}]
 
@@ -141,16 +134,29 @@ model_init = init_model()
 logger.info(f"model_init: {model_init}")
 
 
+with open("header.html", "r") as file:
+    header = file.read()
+
+
 if __name__ == "__main__":
     with gr.Blocks() as demo:
+        gr.HTML(header)
         with gr.Row():
             with gr.Column(variant='panel', scale=5):
                 pdf_show = gr.Markdown()
                 max_pages = gr.Slider(1, 10, 5, step=1, label="Max convert pages")
                 with gr.Row() as bu_flow:
+                    is_ocr = gr.Checkbox(label="Force enable OCR")
                     change_bu = gr.Button("Convert")
                     clear_bu = gr.ClearButton([pdf_show], value="Clear")
                 pdf_show = PDF(label="Please upload pdf", interactive=True, height=800)
+                with gr.Accordion("Examples:"):
+                    example_root = os.path.join(os.path.dirname(__file__), "examples")
+                    gr.Examples(
+                        examples=[os.path.join(example_root, _) for _ in os.listdir(example_root) if
+                                  _.endswith("pdf")],
+                        inputs=pdf_show,
+                    )
 
             with gr.Column(variant='panel', scale=5):
                 output_file = gr.File(label="convert result", interactive=False)
@@ -160,8 +166,7 @@ if __name__ == "__main__":
                                          latex_delimiters=latex_delimiters, line_breaks=True)
                     with gr.Tab("Markdown text"):
                         md_text = gr.TextArea(lines=45, show_copy_button=True)
-        change_bu.click(fn=to_markdown, inputs=[pdf_show, max_pages], outputs=[md, md_text, output_file, pdf_show])
-        clear_bu.add([md, pdf_show, md_text, output_file])
-
-    demo.launch()
+        change_bu.click(fn=to_markdown, inputs=[pdf_show, max_pages, is_ocr], outputs=[md, md_text, output_file, pdf_show])
+        clear_bu.add([md, pdf_show, md_text, output_file, is_ocr])
 
+    demo.launch()

BIN
projects/gradio_app/examples/academic_paper_formula.pdf


BIN
projects/gradio_app/examples/academic_paper_img_formula.pdf


BIN
projects/gradio_app/examples/garbled_formula.pdf


BIN
projects/gradio_app/examples/garbled_formula2.pdf


BIN
projects/gradio_app/examples/garbled_img_formula.pdf


BIN
projects/gradio_app/examples/scanned.pdf


+ 119 - 0
projects/gradio_app/header.html

@@ -0,0 +1,119 @@
+<html><head>
+  <link rel="stylesheet" href="https://use.fontawesome.com/releases/v5.15.4/css/all.css">
+<style>
+  .link-block {
+    border: 1px solid transparent;
+    border-radius: 24px;
+    background-color: rgba(54, 54, 54, 1);
+    cursor: pointer !important;
+  }
+  .link-block:hover {
+    background-color: rgba(54, 54, 54, 0.75) !important;
+    cursor: pointer !important;
+  }
+  .external-link {
+    display: inline-flex;
+    align-items: center;
+    height: 36px;
+    line-height: 36px;
+    padding: 0 16px;
+    cursor: pointer !important;
+  }
+  .external-link,
+  .external-link:hover {
+    cursor: pointer !important;
+  }
+  a {
+    text-decoration: none;
+  }
+</style></head>
+
+<body>
+  <div style="
+      display: flex;
+      flex-direction: column;
+      justify-content: center;
+      align-items: center;
+      text-align: center;
+      background: linear-gradient(45deg, #007bff 0%, #0056b3 100%);
+      padding: 24px;
+      gap: 24px;
+      border-radius: 8px;
+    ">
+    <div style="
+        display: flex;
+        flex-direction: column;
+        align-items: center;
+        gap: 16px;
+      ">
+      <div style="display: flex; flex-direction: column; gap: 8px">
+        <h1 style="
+            font-size: 48px;
+            color: #fafafa;
+            margin: 0;
+            font-family: 'Trebuchet MS', 'Lucida Sans Unicode',
+              'Lucida Grande', 'Lucida Sans', Arial, sans-serif;
+          ">
+          MinerU: PDF Extraction Demo
+        </h1>
+      </div>
+    </div>
+
+    <p style="
+        margin: 0;
+        line-height: 1.6rem;
+        font-size: 16px;
+        color: #fafafa;
+        opacity: 0.8;
+      ">
+      A one-stop, open-source, high-quality data extraction tool, supports
+      PDF/webpage/e-book extraction.<br>
+    </p>
+    <style>
+      .link-block {
+        display: inline-block;
+      }
+      .link-block + .link-block {
+        margin-left: 20px;
+      }
+    </style>
+
+    <div class="column has-text-centered">
+      <div class="publication-links">
+        <!-- Code Link. -->
+        <span class="link-block">
+          <a href="https://github.com/opendatalab/MinerU" class="external-link button is-normal is-rounded is-dark" style="text-decoration: none; cursor: pointer">
+            <span class="icon" style="margin-right: 4px">
+              <i class="fab fa-github" style="color: white; margin-right: 4px"></i>
+            </span>
+            <span style="color: white">Code</span>
+          </a>
+        </span>
+
+        <!-- arXiv Link. -->
+        <span class="link-block">
+          <a href="https://arxiv.org/abs/2409.18839" class="external-link button is-normal is-rounded is-dark" style="text-decoration: none; cursor: pointer">
+            <span class="icon" style="margin-right: 8px">
+              <i class="fas fa-file" style="color: white"></i>
+            </span>
+            <span style="color: white">Paper</span>
+          </a>
+        </span>
+
+        <!-- Homepage Link. -->
+        <span class="link-block">
+          <a href="https://opendatalab.com/" class="external-link button is-normal is-rounded is-dark" style="text-decoration: none; cursor: pointer">
+            <span class="icon" style="margin-right: 8px">
+              <i class="fas fa-globe" style="color: white"></i>
+            </span>
+            <span style="color: white">Homepage</span>
+          </a>
+        </span>
+      </div>
+    </div>
+
+    <!-- New Demo Links -->
+  </div>
+
+
+</body></html>

Some files were not shown because too many files changed in this diff