Browse Source

Merge pull request #3980 from myhloli/dev

Dev
Xiaomeng Zhao 2 weeks ago
parent
commit
2ac829ca32

+ 9 - 6
README.md

@@ -632,12 +632,13 @@ A WebUI developed based on Gradio, with a simple interface and only core parsing
         <tr>
         <tr>
             <th rowspan="2">Parsing Backend</th>
             <th rowspan="2">Parsing Backend</th>
             <th rowspan="2">pipeline <br> (Accuracy<sup>1</sup> 82+)</th>
             <th rowspan="2">pipeline <br> (Accuracy<sup>1</sup> 82+)</th>
-            <th colspan="4">vlm (Accuracy<sup>1</sup> 90+)</th>
+            <th colspan="5">vlm (Accuracy<sup>1</sup> 90+)</th>
         </tr>
         </tr>
         <tr>
         <tr>
             <th>transformers</th>
             <th>transformers</th>
             <th>mlx-engine</th>
             <th>mlx-engine</th>
             <th>vllm-engine / <br>vllm-async-engine</th>
             <th>vllm-engine / <br>vllm-async-engine</th>
+            <th>lmdeploy-engine</th>
             <th>http-client</th>
             <th>http-client</th>
         </tr>
         </tr>
     </thead>
     </thead>
@@ -648,6 +649,7 @@ A WebUI developed based on Gradio, with a simple interface and only core parsing
             <td>Good compatibility, <br>but slower</td>
             <td>Good compatibility, <br>but slower</td>
             <td>Faster than transformers</td>
             <td>Faster than transformers</td>
             <td>Fast, compatible with the vLLM ecosystem</td>
             <td>Fast, compatible with the vLLM ecosystem</td>
+            <td>Fast, compatible with the LMDeploy ecosystem</td>
             <td>Suitable for OpenAI-compatible servers<sup>5</sup></td>
             <td>Suitable for OpenAI-compatible servers<sup>5</sup></td>
         </tr>
         </tr>
         <tr>
         <tr>
@@ -655,33 +657,34 @@ A WebUI developed based on Gradio, with a simple interface and only core parsing
             <td colspan="2" style="text-align:center;">Linux<sup>2</sup> / Windows / macOS</td>
             <td colspan="2" style="text-align:center;">Linux<sup>2</sup> / Windows / macOS</td>
             <td style="text-align:center;">macOS<sup>3</sup></td>
             <td style="text-align:center;">macOS<sup>3</sup></td>
             <td style="text-align:center;">Linux<sup>2</sup> / Windows<sup>4</sup> </td>
             <td style="text-align:center;">Linux<sup>2</sup> / Windows<sup>4</sup> </td>
+            <td style="text-align:center;">Linux<sup>2</sup> / Windows </td>
             <td>Any</td>
             <td>Any</td>
         </tr>
         </tr>
         <tr>
         <tr>
             <th>CPU inference support</th>
             <th>CPU inference support</th>
             <td colspan="2" style="text-align:center;">✅</td>
             <td colspan="2" style="text-align:center;">✅</td>
-            <td colspan="2" style="text-align:center;">❌</td>
+            <td colspan="3" style="text-align:center;">❌</td>
             <td>Not required</td>
             <td>Not required</td>
         </tr>
         </tr>
         <tr>
         <tr>
             <th>GPU Requirements</th><td colspan="2" style="text-align:center;">Volta or later architectures, 6 GB VRAM or more, or Apple Silicon</td>
             <th>GPU Requirements</th><td colspan="2" style="text-align:center;">Volta or later architectures, 6 GB VRAM or more, or Apple Silicon</td>
             <td>Apple Silicon</td>
             <td>Apple Silicon</td>
-            <td>Volta or later architectures, 8 GB VRAM or more</td>
+            <td colspan="2" style="text-align:center;">Volta or later architectures, 8 GB VRAM or more</td>
             <td>Not required</td>
             <td>Not required</td>
         </tr>
         </tr>
         <tr>
         <tr>
             <th>Memory Requirements</th>
             <th>Memory Requirements</th>
-            <td colspan="4" style="text-align:center;">Minimum 16 GB, 32 GB recommended</td>
+            <td colspan="5" style="text-align:center;">Minimum 16 GB, 32 GB recommended</td>
             <td>8 GB</td>
             <td>8 GB</td>
         </tr>
         </tr>
         <tr>
         <tr>
             <th>Disk Space Requirements</th>
             <th>Disk Space Requirements</th>
-            <td colspan="4" style="text-align:center;">20 GB or more, SSD recommended</td>
+            <td colspan="5" style="text-align:center;">20 GB or more, SSD recommended</td>
             <td>2 GB</td>
             <td>2 GB</td>
         </tr>
         </tr>
         <tr>
         <tr>
             <th>Python Version</th>
             <th>Python Version</th>
-            <td colspan="5" style="text-align:center;">3.10-3.13</td>
+            <td colspan="6" style="text-align:center;">3.10-3.13</td>
         </tr>
         </tr>
     </tbody>
     </tbody>
 </table>
 </table>

+ 9 - 6
README_zh-CN.md

@@ -619,12 +619,13 @@ https://github.com/user-attachments/assets/4bea02c9-6d54-4cd6-97ed-dff14340982c
         <tr>
         <tr>
             <th rowspan="2">解析后端</th>
             <th rowspan="2">解析后端</th>
             <th rowspan="2">pipeline <br> (精度<sup>1</sup> 82+)</th>
             <th rowspan="2">pipeline <br> (精度<sup>1</sup> 82+)</th>
-            <th colspan="4">vlm (精度<sup>1</sup> 90+)</th>
+            <th colspan="5">vlm (精度<sup>1</sup> 90+)</th>
         </tr>
         </tr>
         <tr>
         <tr>
             <th>transformers</th>
             <th>transformers</th>
             <th>mlx-engine</th>
             <th>mlx-engine</th>
             <th>vllm-engine / <br>vllm-async-engine</th>
             <th>vllm-engine / <br>vllm-async-engine</th>
+            <th>lmdeploy-engine</th>
             <th>http-client</th>
             <th>http-client</th>
         </tr>
         </tr>
     </thead>
     </thead>
@@ -635,6 +636,7 @@ https://github.com/user-attachments/assets/4bea02c9-6d54-4cd6-97ed-dff14340982c
             <td>兼容性好, 速度较慢</td>
             <td>兼容性好, 速度较慢</td>
             <td>比transformers快</td>
             <td>比transformers快</td>
             <td>速度快, 兼容vllm生态</td>
             <td>速度快, 兼容vllm生态</td>
+            <td>速度快, 兼容lmdeploy生态</td>
             <td>适用于OpenAI兼容服务器<sup>5</sup></td>
             <td>适用于OpenAI兼容服务器<sup>5</sup></td>
         </tr>
         </tr>
         <tr>
         <tr>
@@ -642,33 +644,34 @@ https://github.com/user-attachments/assets/4bea02c9-6d54-4cd6-97ed-dff14340982c
             <td colspan="2" style="text-align:center;">Linux<sup>2</sup> / Windows / macOS</td>
             <td colspan="2" style="text-align:center;">Linux<sup>2</sup> / Windows / macOS</td>
             <td style="text-align:center;">macOS<sup>3</sup></td>
             <td style="text-align:center;">macOS<sup>3</sup></td>
             <td style="text-align:center;">Linux<sup>2</sup> / Windows<sup>4</sup> </td>
             <td style="text-align:center;">Linux<sup>2</sup> / Windows<sup>4</sup> </td>
+            <td style="text-align:center;">Linux<sup>2</sup> / Windows </td>
             <td>不限</td>
             <td>不限</td>
         </tr>
         </tr>
         <tr>
         <tr>
             <th>CPU推理支持</th>
             <th>CPU推理支持</th>
             <td colspan="2" style="text-align:center;">✅</td>
             <td colspan="2" style="text-align:center;">✅</td>
-            <td colspan="2" style="text-align:center;">❌</td>
+            <td colspan="3" style="text-align:center;">❌</td>
             <td >不需要</td>
             <td >不需要</td>
         </tr>
         </tr>
         <tr>
         <tr>
             <th>GPU要求</th><td colspan="2" style="text-align:center;">Volta及以后架构, 6G显存以上或Apple Silicon</td>
             <th>GPU要求</th><td colspan="2" style="text-align:center;">Volta及以后架构, 6G显存以上或Apple Silicon</td>
             <td>Apple Silicon</td>
             <td>Apple Silicon</td>
-            <td>Volta及以后架构, 8G显存以上</td>
+            <td colspan="2" style="text-align:center;">Volta及以后架构, 8G显存以上</td>
             <td>不需要</td>
             <td>不需要</td>
         </tr>
         </tr>
         <tr>
         <tr>
             <th>内存要求</th>
             <th>内存要求</th>
-            <td colspan="4" style="text-align:center;">最低16GB以上, 推荐32GB以上</td>
+            <td colspan="5" style="text-align:center;">最低16GB以上, 推荐32GB以上</td>
             <td>8GB</td>
             <td>8GB</td>
         </tr>
         </tr>
         <tr>
         <tr>
             <th>磁盘空间要求</th>
             <th>磁盘空间要求</th>
-            <td colspan="4" style="text-align:center;">20GB以上, 推荐使用SSD</td>
+            <td colspan="5" style="text-align:center;">20GB以上, 推荐使用SSD</td>
             <td>2GB</td>
             <td>2GB</td>
         </tr>
         </tr>
         <tr>
         <tr>
             <th>python版本</th>
             <th>python版本</th>
-            <td colspan="5" style="text-align:center;">3.10-3.13</td>
+            <td colspan="6" style="text-align:center;">3.10-3.13</td>
         </tr>
         </tr>
     </tbody>
     </tbody>
 </table> 
 </table> 

+ 27 - 0
mineru/backend/vlm/utils.py

@@ -3,6 +3,7 @@ import os
 from loguru import logger
 from loguru import logger
 from packaging import version
 from packaging import version
 
 
+from mineru.utils.check_sys_env import is_windows_environment, is_linux_environment
 from mineru.utils.config_reader import get_device
 from mineru.utils.config_reader import get_device
 from mineru.utils.model_utils import get_vram
 from mineru.utils.model_utils import get_vram
 
 
@@ -44,6 +45,32 @@ def enable_custom_logits_processors() -> bool:
         return True
         return True
 
 
 
 
+def set_lmdeploy_backend(device_type:str) -> str:
+    lmdeploy_backend = ""
+    if device_type.lower() in ["ascend", "maca", "camb"]:
+        lmdeploy_backend = "pytorch"
+    elif device_type.lower() in ["cuda"]:
+        import torch
+        if not torch.cuda.is_available():
+            raise ValueError("CUDA is not available.")
+        if is_windows_environment():
+            lmdeploy_backend = "turbomind"
+        elif is_linux_environment():
+            major, minor = torch.cuda.get_device_capability()
+            compute_capability = f"{major}.{minor}"
+            if version.parse(compute_capability) >= version.parse("8.0"):
+                lmdeploy_backend = "pytorch"
+            else:
+                lmdeploy_backend = "turbomind"
+        else:
+            raise ValueError("Unsupported operating system.")
+    else:
+        raise ValueError(f"Unsupported device type: {device_type}")
+    return lmdeploy_backend
+
+
+
+
 def set_default_gpu_memory_utilization() -> float:
 def set_default_gpu_memory_utilization() -> float:
     from vllm import __version__ as vllm_version
     from vllm import __version__ as vllm_version
     if version.parse(vllm_version) >= version.parse("0.11.0"):
     if version.parse(vllm_version) >= version.parse("0.11.0"):

+ 18 - 8
mineru/backend/vlm/vlm_analyze.py

@@ -4,7 +4,8 @@ import time
 
 
 from loguru import logger
 from loguru import logger
 
 
-from .utils import enable_custom_logits_processors, set_default_gpu_memory_utilization, set_default_batch_size
+from .utils import enable_custom_logits_processors, set_default_gpu_memory_utilization, set_default_batch_size, \
+    set_lmdeploy_backend
 from .model_output_to_middle_json import result_to_middle_json
 from .model_output_to_middle_json import result_to_middle_json
 from ...data.data_reader_writer import DataWriter
 from ...data.data_reader_writer import DataWriter
 from mineru.utils.pdf_image_tools import load_images_from_pdf
 from mineru.utils.pdf_image_tools import load_images_from_pdf
@@ -128,15 +129,24 @@ class ModelSingleton:
                         if "cache_max_entry_count" not in kwargs:
                         if "cache_max_entry_count" not in kwargs:
                             kwargs["cache_max_entry_count"] = 0.5
                             kwargs["cache_max_entry_count"] = 0.5
 
 
-                        # 默认使用 turbomind
-                        lm_backend = "turbomind"
-                        device = kwargs.get("device", "cuda").lower()
-                        # 特定设备强制使用 pytorch backend
-                        if device in ["ascend", "maca", "camb"]:
-                            lm_backend = "pytorch"
-                            backend_config = PytorchEngineConfig(**kwargs)
+                        if "device" in kwargs:
+                            device_type = kwargs.pop("device")
                         else:
                         else:
+                            device_type = os.getenv('MINERU_DEVICE_MODE', "cuda").lower()
+                        # device_type 如果有则去除":"
+                        if ":" in device_type:
+                            device_type = device_type.split(":")[0]
+
+                        lm_backend = set_lmdeploy_backend(device_type)
+                        logger.info(f"Set lmdeploy_backend to: {lm_backend}")
+
+                        if lm_backend == "pytorch":
+                            kwargs["device_type"] = device_type
+                            backend_config = PytorchEngineConfig(**kwargs)
+                        elif lm_backend == "turbomind":
                             backend_config = TurbomindEngineConfig(**kwargs)
                             backend_config = TurbomindEngineConfig(**kwargs)
+                        else:
+                            raise ValueError(f"Unsupported lmdeploy backend: {lm_backend}")
 
 
                         log_level = 'ERROR'
                         log_level = 'ERROR'
                         from lmdeploy.utils import get_logger
                         from lmdeploy.utils import get_logger

+ 26 - 7
mineru/model/vlm/lmdeploy_server.py

@@ -1,6 +1,9 @@
 import os
 import os
 import sys
 import sys
 
 
+from loguru import logger
+
+from mineru.backend.vlm.utils import set_lmdeploy_backend
 from mineru.utils.models_download_utils import auto_download_and_get_model_root_path
 from mineru.utils.models_download_utils import auto_download_and_get_model_root_path
 
 
 
 
@@ -10,8 +13,9 @@ def main():
     has_port_arg = False
     has_port_arg = False
     has_gpu_memory_utilization_arg = False
     has_gpu_memory_utilization_arg = False
     has_log_level_arg = False
     has_log_level_arg = False
-    has_device_arg = False
+    has_backend_arg = False
     device_type = "cuda"
     device_type = "cuda"
+    lm_backend = ""
 
 
     # 检查现有参数
     # 检查现有参数
     for i, arg in enumerate(args):
     for i, arg in enumerate(args):
@@ -21,12 +25,17 @@ def main():
             has_gpu_memory_utilization_arg = True
             has_gpu_memory_utilization_arg = True
         if arg == "--log-level" or arg.startswith("--log-level="):
         if arg == "--log-level" or arg.startswith("--log-level="):
             has_log_level_arg = True
             has_log_level_arg = True
+        if arg == "--backend":
+            has_backend_arg = True
+            if i + 1 < len(args):
+                lm_backend = args[i + 1]
+        if arg.startswith("--backend="):
+            has_backend_arg = True
+            lm_backend = arg.split("=", 1)[1]
         if arg == "--device":
         if arg == "--device":
-            has_device_arg = True
             if i + 1 < len(args):
             if i + 1 < len(args):
                 device_type = args[i + 1]
                 device_type = args[i + 1]
-        elif arg.startswith("--device="):
-            has_device_arg = True
+        if arg.startswith("--device="):
             device_type = arg.split("=", 1)[1]
             device_type = arg.split("=", 1)[1]
 
 
     # 添加默认参数
     # 添加默认参数
@@ -36,9 +45,19 @@ def main():
         args.extend(["--cache-max-entry-count", "0.5"])
         args.extend(["--cache-max-entry-count", "0.5"])
     if not has_log_level_arg:
     if not has_log_level_arg:
         args.extend(["--log-level", "ERROR"])
         args.extend(["--log-level", "ERROR"])
-    if has_device_arg:
-        if device_type.lower() in ["ascend", "maca", "camb"]:
-            args.extend(["--backend", "pytorch"])
+
+    if ":" in device_type:
+        device_type = device_type.split(":")[0]
+    if lm_backend == "":
+        lm_backend = set_lmdeploy_backend(device_type)
+    logger.info(f"Set lmdeploy_backend to: {lm_backend}")
+
+    if lm_backend == "pytorch":
+        os.environ["TOKENIZERS_PARALLELISM"] = "false"
+
+    # args中如果有--backend参数,则不设置
+    if not has_backend_arg:
+        args.extend(["--backend", lm_backend])
 
 
     model_path = auto_download_and_get_model_root_path("/", "vlm")
     model_path = auto_download_and_get_model_root_path("/", "vlm")
 
 

+ 4 - 0
mineru/utils/check_sys_env.py

@@ -13,6 +13,10 @@ def is_mac_environment() -> bool:
     return platform.system() == "Darwin"
     return platform.system() == "Darwin"
 
 
 
 
+def is_linux_environment() -> bool:
+    return platform.system() == "Linux"
+
+
 # Detect if CPU is Apple Silicon architecture
 # Detect if CPU is Apple Silicon architecture
 def is_apple_silicon_cpu() -> bool:
 def is_apple_silicon_cpu() -> bool:
     return platform.machine() in ["arm64", "aarch64"]
     return platform.machine() in ["arm64", "aarch64"]