Browse Source

support load form md file in pp-translation pipeline

zhouchangda 5 months ago
parent
commit
107644be76

+ 33 - 27
api_examples/pipelines/test_pp_translation.py

@@ -14,9 +14,10 @@
 
 from paddlex import create_pipeline
 
-pipeline = create_pipeline(pipeline="PP-Translation")
+pipeline = create_pipeline(pipeline="PP-DocTranslation")
 
-img_path = "2504_10258v1.pdf"
+input_path = "docs/pipeline_usage/tutorials/ocr_pipelines/PP-Translation.md"
+output_path = "./output"
 
 chat_bot_config = {
     "module_name": "chat_bot",
@@ -27,31 +28,36 @@ chat_bot_config = {
 }
 
 
-visual_predict_res = pipeline.visual_predict(
-    img_path,
-    use_doc_orientation_classify=False,
-    use_doc_unwarping=False,
-    use_common_ocr=True,
-    use_seal_recognition=True,
-    use_table_recognition=True,
-)
-
-ori_md_info_list = []
-for res in visual_predict_res:
-    layout_parsing_result = res["layout_parsing_result"]
-    ori_md_info_list.append(layout_parsing_result.markdown)
-    layout_parsing_result.print()
-    layout_parsing_result.save_to_img("./output")
-    layout_parsing_result.save_to_json("./output")
-    layout_parsing_result.save_to_xlsx("./output")
-    layout_parsing_result.save_to_html("./output")
-    layout_parsing_result.save_to_markdown("./output")
-
-
-src_md_info, tgt_md_info = pipeline.translate(
+if input_path.lower().endswith(".md"):
+    ori_md_info_list = pipeline.load_from_markdown(input_path)
+else:
+    # Use PP-StructureV3 to get original markdown info list
+    visual_predict_res = pipeline.visual_predict(
+        input_path,
+        use_doc_orientation_classify=False,
+        use_doc_unwarping=False,
+        use_common_ocr=True,
+        use_seal_recognition=True,
+        use_table_recognition=True,
+    )
+
+    ori_md_info_list = []
+    for res in visual_predict_res:
+        layout_parsing_result = res["layout_parsing_result"]
+        ori_md_info_list.append(layout_parsing_result.markdown)
+        layout_parsing_result.save_to_img(output_path)
+        layout_parsing_result.save_to_markdown(output_path)
+
+    # To concatenate markdown pages into a single markdown file, when input is a pdf file
+    if input_path.lower().endswith(".pdf"):
+        ori_md_info = pipeline.concatenate_markdown_pages(ori_md_info_list)
+        ori_md_info.save_to_markdown(output_path)
+
+tgt_md_info_list = pipeline.translate(
     ori_md_info_list=ori_md_info_list,
-    target_language="zh",
+    target_language="en",
+    chunk_size=5000,
     chat_bot_config=chat_bot_config,
 )
-src_md_info.save_to_markdown("./output")
-tgt_md_info.save_to_markdown("./output")
+for tgt_md_info in tgt_md_info_list:
+    tgt_md_info.save_to_markdown(output_path)

+ 0 - 261
paddlex/configs/pipelines/PP-Translation.yaml

@@ -1,261 +0,0 @@
-
-pipeline_name: PP-Translation
-
-use_layout_parser: True
-
-SubModules:
-  LLM_Chat:
-    module_name: chat_bot
-    model_name: ernie-3.5-8k
-    base_url: "https://qianfan.baidubce.com/v2"
-    api_type: openai
-    api_key: "api_key" # Set this to a real API key
-
-  PromptEngneering:
-    Translate_CommonText:
-      module_name: prompt_engneering
-      task_type: translate_prompt
-      
-      task_description: '你是一位多语种语言翻译专家,精通多种语言的语法、词汇和文化背景。你的任务是将文本从一种语言准确地转换为另一种语言,同时保留原文的语义、风格和语调。'
-
-      output_format: '输出应为翻译后的文本,并与原文保持格式一致,包括标点符号和段落结构。如果原文中包含特定的格式(如表格、公式、列表等),翻译后的文本也应保持相同的格式。'
-
-      rules_str: '通用规则:
-              1. 翻译应确保语义准确完整,并符合目标语言的表达习惯。
-              2. 保留原文的风格和语调,以传达相同的情感和意图。
-              3. 专有名词(如人名、地名、品牌名等)应保持不变,除非它们在目标语言中有公认的翻译。
-              4. 文化特定的表达或成语需根据目标语言的文化背景进行适当的转换或解释。
-              5. 避免使用机器翻译工具的简单直译,需根据上下文进行调整和优化。
-              6. 原文中可能包含的非文本元素(如HTML语法中的图片、表格、公式等)应保持不变。
-              7. 对于原文中的HTML结构代码,不要做任何调整,确保HTML结构代码的完整性。
-              8. 翻译完成后,应仔细校对,确保没有语法和拼写错误'
-      few_shot_demo_text_content:
-      few_shot_demo_key_value_list:
-
-SubPipelines:
-  LayoutParser:
-    pipeline_name: PP-StructureV3
-
-    batch_size: 8
-
-    use_doc_preprocessor: True
-    use_seal_recognition: True
-    use_table_recognition: True
-    use_formula_recognition: True
-    use_chart_recognition: True
-    use_region_detection: True
-
-    SubModules:
-      LayoutDetection:
-        module_name: layout_detection
-        model_name: PP-DocLayout_plus-L
-        model_dir: null
-        batch_size: 8
-        threshold: 
-          0: 0.3  # paragraph_title
-          1: 0.5  # image
-          2: 0.4  # text
-          3: 0.5  # number
-          4: 0.5  # abstract
-          5: 0.5  # content
-          6: 0.5  # figure_table_chart_title
-          7: 0.3  # formula
-          8: 0.5  # table
-          9: 0.5  # reference
-          10: 0.5 # doc_title
-          11: 0.5 # footnote
-          12: 0.5 # header
-          13: 0.5 # algorithm
-          14: 0.5 # footer
-          15: 0.45 # seal
-          16: 0.5 # chart
-          17: 0.5 # formula_number
-          18: 0.5 # aside_text
-          19: 0.5 # reference_content
-        layout_nms: True
-        layout_unclip_ratio: [1.0, 1.0] 
-        layout_merge_bboxes_mode: 
-          0: "large"  # paragraph_title
-          1: "large"  # image
-          2: "union"  # text
-          3: "union"  # number
-          4: "union"  # abstract
-          5: "union"  # content
-          6: "union"  # figure_table_chart_title
-          7: "large"  # formula
-          8: "union"  # table
-          9: "union"  # reference
-          10: "union" # doc_title
-          11: "union" # footnote
-          12: "union" # header
-          13: "union" # algorithm
-          14: "union" # footer
-          15: "union" # seal
-          16: "large" # chart
-          17: "union" # formula_number
-          18: "union" # aside_text
-          19: "union" # reference_content
-      ChartRecognition:
-        module_name: chart_recognition
-        model_name: PP-Chart2Table
-        model_dir: null
-        batch_size: 1 
-      RegionDetection:
-        module_name: layout_detection
-        model_name: PP-DocBlockLayout
-        model_dir: null
-        layout_nms: True
-        layout_merge_bboxes_mode: "small"
-
-    SubPipelines:
-      DocPreprocessor:
-        pipeline_name: doc_preprocessor
-        batch_size: 8
-        use_doc_orientation_classify: True
-        use_doc_unwarping: True
-        SubModules:
-          DocOrientationClassify:
-            module_name: doc_text_orientation
-            model_name: PP-LCNet_x1_0_doc_ori
-            model_dir: null
-            batch_size: 8
-          DocUnwarping:
-            module_name: image_unwarping
-            model_name: UVDoc
-            model_dir: null
-
-      GeneralOCR:
-        pipeline_name: OCR
-        batch_size: 8
-        text_type: general
-        use_doc_preprocessor: False
-        use_textline_orientation: True
-        SubModules:
-          TextDetection:
-            module_name: text_detection
-            model_name: PP-OCRv5_server_det
-            model_dir: null
-            limit_side_len: 736
-            limit_type: min
-            max_side_limit: 4000
-            thresh: 0.3
-            box_thresh: 0.6
-            unclip_ratio: 1.5
-          TextLineOrientation:
-            module_name: textline_orientation
-            model_name: PP-LCNet_x1_0_textline_ori
-            model_dir: null
-            batch_size: 8
-          TextRecognition:
-            module_name: text_recognition
-            model_name: PP-OCRv5_server_rec
-            model_dir: null
-            batch_size: 8
-            score_thresh: 0.0
-    
-
-      TableRecognition:
-        pipeline_name: table_recognition_v2
-        use_layout_detection: False
-        use_doc_preprocessor: False
-        use_ocr_model: False
-        SubModules:  
-          TableClassification:
-            module_name: table_classification
-            model_name: PP-LCNet_x1_0_table_cls
-            model_dir: null
-
-          WiredTableStructureRecognition:
-            module_name: table_structure_recognition
-            model_name: SLANeXt_wired
-            model_dir: null
-          
-          WirelessTableStructureRecognition:
-            module_name: table_structure_recognition
-            model_name: SLANet_plus
-            model_dir: null
-          
-          WiredTableCellsDetection:
-            module_name: table_cells_detection
-            model_name: RT-DETR-L_wired_table_cell_det
-            model_dir: null
-          
-          WirelessTableCellsDetection:
-            module_name: table_cells_detection
-            model_name: RT-DETR-L_wireless_table_cell_det
-            model_dir: null
-
-          TableOrientationClassify:
-            module_name: doc_text_orientation
-            model_name: PP-LCNet_x1_0_doc_ori
-            model_dir: null
-        SubPipelines:
-          GeneralOCR:
-            pipeline_name: OCR
-            text_type: general
-            use_doc_preprocessor: False
-            use_textline_orientation: True
-            SubModules:
-              TextDetection:
-                module_name: text_detection
-                model_name: PP-OCRv5_server_det
-                model_dir: null
-                limit_side_len: 736
-                limit_type: min
-                max_side_limit: 4000
-                thresh: 0.3
-                box_thresh: 0.4
-                unclip_ratio: 1.5
-              TextLineOrientation:
-                module_name: textline_orientation
-                model_name: PP-LCNet_x1_0_textline_ori
-                model_dir: null
-                batch_size: 8
-              TextRecognition:
-                module_name: text_recognition
-                model_name: PP-OCRv5_server_rec
-                model_dir: null
-                batch_size: 8
-            score_thresh: 0.0
-
-      SealRecognition:
-        pipeline_name: seal_recognition
-        batch_size: 8
-        use_layout_detection: False
-        use_doc_preprocessor: False
-        SubPipelines:
-          SealOCR:
-            pipeline_name: OCR
-            batch_size: 8
-            text_type: seal
-            use_doc_preprocessor: False
-            use_textline_orientation: False
-            SubModules:
-              TextDetection:
-                module_name: seal_text_detection
-                model_name: PP-OCRv4_server_seal_det
-                model_dir: null
-                limit_side_len: 736
-                limit_type: min
-                max_side_limit: 4000
-                thresh: 0.2
-                box_thresh: 0.6
-                unclip_ratio: 0.5
-              TextRecognition:
-                module_name: text_recognition
-                model_name: PP-OCRv5_server_rec
-                model_dir: null
-                batch_size: 8
-                score_thresh: 0
-        
-      FormulaRecognition:
-        pipeline_name: formula_recognition
-        batch_size: 8
-        use_layout_detection: False
-        use_doc_preprocessor: False
-        SubModules:
-          FormulaRecognition:
-            module_name: formula_recognition
-            model_name: PP-FormulaNet_plus-L
-            model_dir: null
-            batch_size: 8

+ 1 - 0
paddlex/inference/common/batch_sampler/__init__.py

@@ -17,5 +17,6 @@ from .base_batch_sampler import BaseBatchSampler
 from .det_3d_batch_sampler import Det3DBatchSampler
 from .doc_vlm_batch_sampler import DocVLMBatchSampler
 from .image_batch_sampler import ImageBatchSampler
+from .markdown_batch_sampler import MarkDownBatchSampler
 from .ts_batch_sampler import TSBatchSampler
 from .video_batch_sampler import VideoBatchSampler

+ 116 - 0
paddlex/inference/common/batch_sampler/markdown_batch_sampler.py

@@ -0,0 +1,116 @@
+# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+from pathlib import Path
+
+from ....utils import logging
+from ....utils.cache import CACHE_DIR
+from ....utils.download import download
+from ...utils.io import MarkDownReader
+from .base_batch_sampler import BaseBatchSampler, Batch
+
+
+class MarkDownBatchSampler(BaseBatchSampler):
+    """Batch sampler for markdown data, supporting markdown file inputs."""
+
+    SUFFIX = ["md", "markdown", "mdown", "mkd"]
+
+    def __init__(self, *args, **kwargs):
+        super().__init__(*args, **kwargs)
+        self.md_reader = MarkDownReader()
+
+    def _download_from_url(self, in_path: str) -> str:
+        """Download a file from a URL to a cache directory.
+
+        Args:
+            in_path (str): URL of the file to be downloaded.
+
+        Returns:
+            str: Path to the downloaded file.
+        """
+        file_name = Path(in_path).name
+        save_path = Path(CACHE_DIR) / "predict_input" / file_name
+        download(in_path, save_path, overwrite=True)
+        return save_path.as_posix()
+
+    def _get_files_list(self, fp: str) -> list:
+        """Get a list of markdown files from a directory or a single file path.
+
+        Args:
+            fp (str): Path to a directory or a single markdown file.
+
+        Returns:
+            list: Sorted list of markdown file paths.
+
+        Raises:
+            Exception: If no markdown file is found in the path.
+        """
+        file_list = []
+        if fp is None or not os.path.exists(fp):
+            raise Exception(f"Not found any markdown file in path: {fp}")
+
+        if os.path.isfile(fp) and fp.split(".")[-1] in self.SUFFIX:
+            file_list.append(fp)
+        elif os.path.isdir(fp):
+            for root, dirs, files in os.walk(fp):
+                for single_file in files:
+                    if single_file.split(".")[-1] in self.SUFFIX:
+                        file_list.append(os.path.join(root, single_file))
+        if len(file_list) == 0:
+            raise Exception("Not found any file in {}".format(fp))
+        file_list = sorted(file_list)
+        return file_list
+
+    def sample(self, inputs: list) -> list:
+        """Generate batches of data from inputs, which can only be file paths.
+
+        Args:
+            inputs (list): List of markdown file paths.
+
+        Yields:
+            list: A batch of data which is a list of markdown file paths.
+        """
+        if not isinstance(inputs, list):
+            inputs = [inputs]
+
+        batch = Batch()
+        for input in inputs:
+            if isinstance(input, str):
+                suffix = input.split(".")[-1].lower()
+                file_path = (
+                    self._download_from_url(input)
+                    if input.startswith("http")
+                    else input
+                )
+                if suffix in self.SUFFIX:
+                    markdown_text = self.md_reader.read(file_path)
+                    batch.append(markdown_text, file_path)
+                    if len(batch) == self.batch_size:
+                        yield batch
+                        batch = Batch()
+                else:
+                    file_list = self._get_files_list(file_path)
+                    for file_path in file_list:
+                        markdown_text = self.md_reader.read(file_path)
+                        batch.append(markdown_text, file_path)
+                        if len(batch) == self.batch_size:
+                            yield batch
+                            batch = Batch()
+            else:
+                logging.warning(
+                    f"Not supported input data type! Only `str` is supported! So has been ignored: {input}."
+                )
+        if len(batch) > 0:
+            yield batch

+ 0 - 4
paddlex/inference/common/result/base_cv_result.py

@@ -37,8 +37,4 @@ class BaseCVResult(BaseResult, ImgMixin):
             fp = Path(fn)
             stem, suffix = fp.stem, fp.suffix
             fn = f"{stem}_{page_idx}{suffix}"
-        if (language := self.get("language", None)) is not None:
-            fp = Path(fn)
-            stem, suffix = fp.stem, fp.suffix
-            fn = f"{stem}_{language}{suffix}"
         return fn

+ 3 - 1
paddlex/inference/common/result/mixin.py

@@ -597,6 +597,8 @@ class VideoMixin:
 class MarkdownMixin:
     """Mixin class for adding Markdown handling capabilities."""
 
+    MARKDOWN_SAVE_KEYS = ["markdown_texts"]
+
     def __init__(self, *args: list, **kwargs: dict):
         """Initializes the Markdown writer and appends the save_to_markdown method to the save functions.
 
@@ -696,7 +698,7 @@ class MarkdownMixin:
         if data is None:
             return
         for key, value in data.items():
-            if isinstance(value, str):
+            if key in self.MARKDOWN_SAVE_KEYS:
                 save_mkd_func(save_path.as_posix(), value, *args, **kwargs)
             if isinstance(value, dict):
                 base_save_path = save_path.parent

+ 1 - 1
paddlex/inference/pipelines/__init__.py

@@ -42,8 +42,8 @@ from .ocr import OCRPipeline
 from .open_vocabulary_detection import OpenVocabularyDetectionPipeline
 from .open_vocabulary_segmentation import OpenVocabularySegmentationPipeline
 from .pp_chatocr import PP_ChatOCRv3_Pipeline, PP_ChatOCRv4_Pipeline
+from .pp_doctranslation import PP_DocTranslation_Pipeline
 from .pp_shitu_v2 import ShiTuV2Pipeline
-from .pp_translation import PP_Translation_Pipeline
 from .rotated_object_detection import RotatedObjectDetectionPipeline
 from .seal_recognition import SealRecognitionPipeline
 from .semantic_segmentation import SemanticSegmentationPipeline

+ 0 - 15
paddlex/inference/pipelines/pp_translation/__init__.py

@@ -1,15 +0,0 @@
-# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from .pipeline import PP_Translation_Pipeline

+ 0 - 450
paddlex/inference/pipelines/pp_translation/pipeline.py

@@ -1,450 +0,0 @@
-# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import re
-from typing import Any, Dict, List, Optional, Tuple, Union
-
-import numpy as np
-
-from ....utils import logging
-from ....utils.deps import pipeline_requires_extra
-from ...common.batch_sampler import ImageBatchSampler
-from ...common.reader import ReadImage
-from ...utils.hpi import HPIConfig
-from ...utils.pp_option import PaddlePredictorOption
-from ..base import BasePipeline
-from .result import TranslationMarkdownResult
-
-
-@pipeline_requires_extra("ie")
-class PP_Translation_Pipeline(BasePipeline):
-    entities = ["PP-Translation"]
-
-    def __init__(
-        self,
-        config: Dict,
-        device: str = None,
-        pp_option: PaddlePredictorOption = None,
-        use_hpip: bool = False,
-        hpi_config: Optional[Union[Dict[str, Any], HPIConfig]] = None,
-        initial_predictor: bool = False,
-    ) -> None:
-        """Initializes the PP_Translation_Pipeline.
-
-        Args:
-            config (Dict): Configuration dictionary containing various settings.
-            device (str, optional): Device to run the predictions on. Defaults to None.
-            pp_option (PaddlePredictorOption, optional): PaddlePredictor options. Defaults to None.
-            use_hpip (bool, optional): Whether to use the high-performance
-                inference plugin (HPIP) by default. Defaults to False.
-            hpi_config (Optional[Union[Dict[str, Any], HPIConfig]], optional):
-                The default high-performance inference configuration dictionary.
-                Defaults to None.
-            initial_predictor (bool, optional): Whether to initialize the predictor. Defaults to True.
-        """
-
-        super().__init__(
-            device=device, pp_option=pp_option, use_hpip=use_hpip, hpi_config=hpi_config
-        )
-
-        self.pipeline_name = config["pipeline_name"]
-        self.config = config
-        self.use_layout_parser = config.get("use_layout_parser", True)
-
-        self.layout_parsing_pipeline = None
-        self.chat_bot = None
-
-        if initial_predictor:
-            self.inintial_visual_predictor(config)
-            self.inintial_chat_predictor(config)
-
-        self.batch_sampler = ImageBatchSampler(batch_size=1)
-        self.img_reader = ReadImage(format="BGR")
-
-        self.table_structure_len_max = 500
-
-    def inintial_visual_predictor(self, config: dict) -> None:
-        """
-        Initializes the visual predictor with the given configuration.
-
-        Args:
-            config (dict): The configuration dictionary containing the necessary
-                                parameters for initializing the predictor.
-        Returns:
-            None
-        """
-        self.use_layout_parser = config.get("use_layout_parser", True)
-
-        if self.use_layout_parser:
-            layout_parsing_config = config.get("SubPipelines", {}).get(
-                "LayoutParser",
-                {"pipeline_config_error": "config error for layout_parsing_pipeline!"},
-            )
-            self.layout_parsing_pipeline = self.create_pipeline(layout_parsing_config)
-        return
-
-    def inintial_chat_predictor(self, config: dict) -> None:
-        """
-        Initializes the chat predictor with the given configuration.
-
-        Args:
-            config (dict): The configuration dictionary containing the necessary
-                                parameters for initializing the predictor.
-        Returns:
-            None
-        """
-        from .. import create_chat_bot
-
-        chat_bot_config = config.get("SubModules", {}).get(
-            "LLM_Chat",
-            {"chat_bot_config_error": "config error for llm chat bot!"},
-        )
-        self.chat_bot = create_chat_bot(chat_bot_config)
-
-        from .. import create_prompt_engineering
-
-        translate_pe_config = (
-            config.get("SubModules", {})
-            .get("PromptEngneering", {})
-            .get(
-                "Translate_CommonText",
-                {"pe_config_error": "config error for translate_pe_config!"},
-            )
-        )
-        self.translate_pe = create_prompt_engineering(translate_pe_config)
-        return
-
-    def predict(self, *args, **kwargs) -> None:
-        logging.error(
-            "PP-Translation Pipeline do not support to call `predict()` directly! Please invoke `visual_predict`, `build_vector`, `chat` sequentially to obtain the result."
-        )
-        return
-
-    def visual_predict(
-        self,
-        input: Union[str, List[str], np.ndarray, List[np.ndarray]],
-        use_doc_orientation_classify: Optional[bool] = None,
-        use_doc_unwarping: Optional[bool] = None,
-        use_textline_orientation: Optional[bool] = None,
-        use_seal_recognition: Optional[bool] = None,
-        use_table_recognition: Optional[bool] = None,
-        layout_threshold: Optional[Union[float, dict]] = None,
-        layout_nms: Optional[bool] = None,
-        layout_unclip_ratio: Optional[Union[float, Tuple[float, float], dict]] = None,
-        layout_merge_bboxes_mode: Optional[str] = None,
-        text_det_limit_side_len: Optional[int] = None,
-        text_det_limit_type: Optional[str] = None,
-        text_det_thresh: Optional[float] = None,
-        text_det_box_thresh: Optional[float] = None,
-        text_det_unclip_ratio: Optional[float] = None,
-        text_rec_score_thresh: Optional[float] = None,
-        seal_det_limit_side_len: Optional[int] = None,
-        seal_det_limit_type: Optional[str] = None,
-        seal_det_thresh: Optional[float] = None,
-        seal_det_box_thresh: Optional[float] = None,
-        seal_det_unclip_ratio: Optional[float] = None,
-        seal_rec_score_thresh: Optional[float] = None,
-        **kwargs,
-    ) -> dict:
-        """
-        This function takes an input image or a list of images and performs various visual
-        prediction tasks such as document orientation classification, document unwarping,
-        general OCR, seal recognition, and table recognition based on the provided flags.
-
-        Args:
-            input (Union[str, list[str], np.ndarray, list[np.ndarray]]): Input image path, list of image paths,
-                                                                        numpy array of an image, or list of numpy arrays.
-            use_doc_orientation_classify (bool): Flag to use document orientation classification.
-            use_doc_unwarping (bool): Flag to use document unwarping.
-            use_textline_orientation (Optional[bool]): Whether to use textline orientation prediction.
-            use_seal_recognition (bool): Flag to use seal recognition.
-            use_table_recognition (bool): Flag to use table recognition.
-            layout_threshold (Optional[float]): The threshold value to filter out low-confidence predictions. Default is None.
-            layout_nms (bool, optional): Whether to use layout-aware NMS. Defaults to False.
-            layout_unclip_ratio (Optional[Union[float, Tuple[float, float]]], optional): The ratio of unclipping the bounding box.
-                Defaults to None.
-                If it's a single number, then both width and height are used.
-                If it's a tuple of two numbers, then they are used separately for width and height respectively.
-                If it's None, then no unclipping will be performed.
-            layout_merge_bboxes_mode (Optional[str], optional): The mode for merging bounding boxes. Defaults to None.
-            text_det_limit_side_len (Optional[int]): Maximum side length for text detection.
-            text_det_limit_type (Optional[str]): Type of limit to apply for text detection.
-            text_det_thresh (Optional[float]): Threshold for text detection.
-            text_det_box_thresh (Optional[float]): Threshold for text detection boxes.
-            text_det_unclip_ratio (Optional[float]): Ratio for unclipping text detection boxes.
-            text_rec_score_thresh (Optional[float]): Score threshold for text recognition.
-            seal_det_limit_side_len (Optional[int]): Maximum side length for seal detection.
-            seal_det_limit_type (Optional[str]): Type of limit to apply for seal detection.
-            seal_det_thresh (Optional[float]): Threshold for seal detection.
-            seal_det_box_thresh (Optional[float]): Threshold for seal detection boxes.
-            seal_det_unclip_ratio (Optional[float]): Ratio for unclipping seal detection boxes.
-            seal_rec_score_thresh (Optional[float]): Score threshold for seal recognition.
-            **kwargs: Additional keyword arguments.
-
-        Returns:
-            dict: A dictionary containing the layout parsing result and visual information.
-        """
-        if self.use_layout_parser == False:
-            logging.error("The models for layout parser are not initialized.")
-            yield {"error": "The models for layout parser are not initialized."}
-
-        if self.layout_parsing_pipeline is None:
-            logging.warning(
-                "The layout parsing pipeline is not initialized, will initialize it now."
-            )
-            self.inintial_visual_predictor(self.config)
-
-        for layout_parsing_result in self.layout_parsing_pipeline.predict(
-            input,
-            use_doc_orientation_classify=use_doc_orientation_classify,
-            use_doc_unwarping=use_doc_unwarping,
-            use_textline_orientation=use_textline_orientation,
-            use_seal_recognition=use_seal_recognition,
-            use_table_recognition=use_table_recognition,
-            layout_threshold=layout_threshold,
-            layout_nms=layout_nms,
-            layout_unclip_ratio=layout_unclip_ratio,
-            layout_merge_bboxes_mode=layout_merge_bboxes_mode,
-            text_det_limit_side_len=text_det_limit_side_len,
-            text_det_limit_type=text_det_limit_type,
-            text_det_thresh=text_det_thresh,
-            text_det_box_thresh=text_det_box_thresh,
-            text_det_unclip_ratio=text_det_unclip_ratio,
-            text_rec_score_thresh=text_rec_score_thresh,
-            seal_det_box_thresh=seal_det_box_thresh,
-            seal_det_limit_side_len=seal_det_limit_side_len,
-            seal_det_limit_type=seal_det_limit_type,
-            seal_det_thresh=seal_det_thresh,
-            seal_det_unclip_ratio=seal_det_unclip_ratio,
-            seal_rec_score_thresh=seal_rec_score_thresh,
-        ):
-
-            visual_predict_res = {
-                "layout_parsing_result": layout_parsing_result,
-            }
-            yield visual_predict_res
-
-    def split_markdown(self, md_text, chunk_size):
-
-        if (
-            not isinstance(md_text, str)
-            or not isinstance(chunk_size, int)
-            or chunk_size <= 0
-        ):
-            raise ValueError("Invalid input parameters.")
-
-        chunks = []
-        current_chunk = []
-
-        # if md_text less than chunk_size, return the md_text
-        if len(md_text) < chunk_size:
-            chunks.append(md_text)
-            return chunks
-
-        # split the md_text into paragraphs
-        paragraphs = md_text.split("\n")
-
-        for paragraph in paragraphs:
-            if len(paragraph) == 0:
-                # 空行直接跳过
-                continue
-
-            if len(paragraph) <= chunk_size:
-                current_chunk.append(paragraph)
-            else:
-                # if the paragraph is too long, split it into sentences
-                sentences = re.split(r"(?<=[。.!?])", paragraph)
-                for sentence in sentences:
-                    if len(sentence) == 0:
-                        continue
-
-                    if len(sentence) > chunk_size:
-                        raise ValueError("A sentence exceeds the chunk size limit.")
-
-                    # if the current chunk is too long, store it and start a new one
-                    if sum(len(s) for s in current_chunk) + len(sentence) > chunk_size:
-                        chunks.append("\n\n".join(current_chunk))
-                        current_chunk = [sentence]
-                    else:
-                        current_chunk.append(sentence)
-
-            if sum(len(s) for s in current_chunk) >= chunk_size:
-                chunks.append("\n\n".join(current_chunk))
-                current_chunk = []
-
-        if current_chunk:
-            chunks.append("\n\n".join(current_chunk))
-
-        return chunks
-
-    def translate(
-        self,
-        ori_md_info_list: List[Dict],
-        target_language: str = "zh",
-        chunk_size: int = 5000,
-        task_description: str = None,
-        output_format: str = None,
-        rules_str: str = None,
-        few_shot_demo_text_content: str = None,
-        few_shot_demo_key_value_list: str = None,
-        chat_bot_config=None,
-        **kwargs,
-    ):
-        """
-        Translate the given original text into the specified target language using the configured translation model.
-
-        Args:
-            original_text (str): The original text to be translated.
-            target_language (str): The desired target language code.
-            **kwargs: Additional keyword arguments passed to the translation model.
-
-        Returns:
-            str: The translated text in the target language.
-        """
-        if self.chat_bot is None:
-            logging.warning(
-                "The LLM chat bot is not initialized,will initialize it now."
-            )
-            self.inintial_chat_predictor(self.config)
-
-        if chat_bot_config is not None:
-            from .. import create_chat_bot
-
-            chat_bot = create_chat_bot(chat_bot_config)
-        else:
-            chat_bot = self.chat_bot
-
-        if len(ori_md_info_list) == 1:
-            # for single image or single page pdf
-            md_info = ori_md_info_list[0]
-        else:
-            # for multi page pdf
-            md_info = self.concatenate_markdown_pages(ori_md_info_list)
-
-        original_text = md_info["markdown_texts"]
-
-        chunks = self.split_markdown(original_text, chunk_size)
-
-        target_language_md_chunks = []
-
-        if len(chunks) > 1:
-            logging.info(
-                f"Get the markdown text, it's length is {len(original_text)}, will split it into {len(chunks)} parts."
-            )
-
-        logging.info(
-            "Starting to translate the markdown text, will take a while. please wait..."
-        )
-        for idx, chunk in enumerate(chunks):
-            logging.info(f"Translating the {idx+1}/{len(chunks)} part.")
-            prompt = self.translate_pe.generate_prompt(
-                original_text=chunk,
-                language=target_language,
-                task_description=task_description,
-                output_format=output_format,
-                rules_str=rules_str,
-                few_shot_demo_text_content=few_shot_demo_text_content,
-                few_shot_demo_key_value_list=few_shot_demo_key_value_list,
-            )
-            target_language_md_chunk = chat_bot.generate_chat_results(
-                prompt=prompt
-            ).get("content", "")
-
-            target_language_md_chunks.append(target_language_md_chunk)
-
-        target_language_md = "\n\n".join(target_language_md_chunks)
-
-        src_result = {
-            "language": "src",
-            "input_path": md_info["input_path"],
-            "page_index": md_info["page_index"],
-            "page_continuation_flags": md_info["page_continuation_flags"],
-            "markdown_texts": original_text,
-        }
-
-        translate_result = {
-            "language": target_language,
-            "input_path": md_info["input_path"],
-            "page_index": md_info["page_index"],
-            "page_continuation_flags": md_info["page_continuation_flags"],
-            "markdown_texts": target_language_md,
-        }
-        return TranslationMarkdownResult(src_result), TranslationMarkdownResult(
-            translate_result
-        )
-
-    def concatenate_markdown_pages(self, markdown_list: list) -> tuple:
-        """
-        Concatenate Markdown content from multiple pages into a single document.
-
-        Args:
-            markdown_list (list): A list containing Markdown data for each page.
-
-        Returns:
-            tuple: A tuple containing the processed Markdown text.
-        """
-        markdown_texts = ""
-        previous_page_last_element_paragraph_end_flag = True
-
-        if len(markdown_list) == 0:
-            raise ValueError("The length of markdown_list is zero.")
-
-        for res in markdown_list:
-            # Get the paragraph flags for the current page
-            page_first_element_paragraph_start_flag: bool = res[
-                "page_continuation_flags"
-            ][0]
-            page_last_element_paragraph_end_flag: bool = res["page_continuation_flags"][
-                1
-            ]
-
-            # Determine whether to add a space or a newline
-            if (
-                not page_first_element_paragraph_start_flag
-                and not previous_page_last_element_paragraph_end_flag
-            ):
-                last_char_of_markdown = markdown_texts[-1] if markdown_texts else ""
-                first_char_of_handler = (
-                    res["markdown_texts"][0] if res["markdown_texts"] else ""
-                )
-
-                # Check if the last character and the first character are Chinese characters
-                last_is_chinese_char = (
-                    re.match(r"[\u4e00-\u9fff]", last_char_of_markdown)
-                    if last_char_of_markdown
-                    else False
-                )
-                first_is_chinese_char = (
-                    re.match(r"[\u4e00-\u9fff]", first_char_of_handler)
-                    if first_char_of_handler
-                    else False
-                )
-                if not (last_is_chinese_char or first_is_chinese_char):
-                    markdown_texts += " " + res["markdown_texts"]
-                else:
-                    markdown_texts += res["markdown_texts"]
-            else:
-                markdown_texts += "\n\n" + res["markdown_texts"]
-            previous_page_last_element_paragraph_end_flag = (
-                page_last_element_paragraph_end_flag
-            )
-
-        concatenate_result = {
-            "input_path": markdown_list[0]["input_path"],
-            "page_index": None,
-            "page_continuation_flags": False,
-            "markdown_texts": markdown_texts,
-        }
-
-        return TranslationMarkdownResult(concatenate_result)

+ 0 - 26
paddlex/inference/pipelines/pp_translation/result.py

@@ -1,26 +0,0 @@
-# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-from ...common.result import BaseCVResult, MarkdownMixin
-
-
-class TranslationMarkdownResult(BaseCVResult, MarkdownMixin):
-    def __init__(self, data) -> None:
-        """Initializes a new instance of the class with the specified data."""
-        super().__init__(data)
-        MarkdownMixin.__init__(self)
-
-    def _to_markdown(self, pretty=True) -> dict:
-        return self

+ 1 - 0
paddlex/inference/utils/io/__init__.py

@@ -17,6 +17,7 @@ from .readers import (
     AudioReader,
     CSVReader,
     ImageReader,
+    MarkDownReader,
     PDFReader,
     ReaderType,
     VideoReader,

+ 1 - 1
paddlex/inference/utils/io/readers.py

@@ -303,7 +303,7 @@ class TXTReaderBackend(_BaseReaderBackend):
 
     def read_file(self, in_path):
         with open(in_path, "r") as f:
-            data = f.readlines()
+            data = f.read()
         return data
 
 

+ 2 - 0
paddlex/inference/utils/io/writers.py

@@ -54,6 +54,8 @@ class WriterType(enum.Enum):
     XLSX = 6
     CSV = 7
     YAML = 8
+    MARKDOWN = 9
+    TXT = 10
 
 
 class _BaseWriter(object):