pipeline.py 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450
  1. # Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import re
  15. from typing import Any, Dict, List, Optional, Tuple, Union
  16. import numpy as np
  17. from ....utils import logging
  18. from ....utils.deps import pipeline_requires_extra
  19. from ...common.batch_sampler import ImageBatchSampler
  20. from ...common.reader import ReadImage
  21. from ...utils.hpi import HPIConfig
  22. from ...utils.pp_option import PaddlePredictorOption
  23. from ..base import BasePipeline
  24. from .result import TranslationMarkdownResult
  25. @pipeline_requires_extra("ie")
  26. class PP_Translation_Pipeline(BasePipeline):
  27. entities = ["PP-Translation"]
  28. def __init__(
  29. self,
  30. config: Dict,
  31. device: str = None,
  32. pp_option: PaddlePredictorOption = None,
  33. use_hpip: bool = False,
  34. hpi_config: Optional[Union[Dict[str, Any], HPIConfig]] = None,
  35. initial_predictor: bool = False,
  36. ) -> None:
  37. """Initializes the PP_Translation_Pipeline.
  38. Args:
  39. config (Dict): Configuration dictionary containing various settings.
  40. device (str, optional): Device to run the predictions on. Defaults to None.
  41. pp_option (PaddlePredictorOption, optional): PaddlePredictor options. Defaults to None.
  42. use_hpip (bool, optional): Whether to use the high-performance
  43. inference plugin (HPIP) by default. Defaults to False.
  44. hpi_config (Optional[Union[Dict[str, Any], HPIConfig]], optional):
  45. The default high-performance inference configuration dictionary.
  46. Defaults to None.
  47. initial_predictor (bool, optional): Whether to initialize the predictor. Defaults to True.
  48. """
  49. super().__init__(
  50. device=device, pp_option=pp_option, use_hpip=use_hpip, hpi_config=hpi_config
  51. )
  52. self.pipeline_name = config["pipeline_name"]
  53. self.config = config
  54. self.use_layout_parser = config.get("use_layout_parser", True)
  55. self.layout_parsing_pipeline = None
  56. self.chat_bot = None
  57. if initial_predictor:
  58. self.inintial_visual_predictor(config)
  59. self.inintial_chat_predictor(config)
  60. self.batch_sampler = ImageBatchSampler(batch_size=1)
  61. self.img_reader = ReadImage(format="BGR")
  62. self.table_structure_len_max = 500
  63. def inintial_visual_predictor(self, config: dict) -> None:
  64. """
  65. Initializes the visual predictor with the given configuration.
  66. Args:
  67. config (dict): The configuration dictionary containing the necessary
  68. parameters for initializing the predictor.
  69. Returns:
  70. None
  71. """
  72. self.use_layout_parser = config.get("use_layout_parser", True)
  73. if self.use_layout_parser:
  74. layout_parsing_config = config.get("SubPipelines", {}).get(
  75. "LayoutParser",
  76. {"pipeline_config_error": "config error for layout_parsing_pipeline!"},
  77. )
  78. self.layout_parsing_pipeline = self.create_pipeline(layout_parsing_config)
  79. return
  80. def inintial_chat_predictor(self, config: dict) -> None:
  81. """
  82. Initializes the chat predictor with the given configuration.
  83. Args:
  84. config (dict): The configuration dictionary containing the necessary
  85. parameters for initializing the predictor.
  86. Returns:
  87. None
  88. """
  89. from .. import create_chat_bot
  90. chat_bot_config = config.get("SubModules", {}).get(
  91. "LLM_Chat",
  92. {"chat_bot_config_error": "config error for llm chat bot!"},
  93. )
  94. self.chat_bot = create_chat_bot(chat_bot_config)
  95. from .. import create_prompt_engineering
  96. translate_pe_config = (
  97. config.get("SubModules", {})
  98. .get("PromptEngneering", {})
  99. .get(
  100. "Translate_CommonText",
  101. {"pe_config_error": "config error for translate_pe_config!"},
  102. )
  103. )
  104. self.translate_pe = create_prompt_engineering(translate_pe_config)
  105. return
  106. def predict(self, *args, **kwargs) -> None:
  107. logging.error(
  108. "PP-Translation Pipeline do not support to call `predict()` directly! Please invoke `visual_predict`, `build_vector`, `chat` sequentially to obtain the result."
  109. )
  110. return
  111. def visual_predict(
  112. self,
  113. input: Union[str, List[str], np.ndarray, List[np.ndarray]],
  114. use_doc_orientation_classify: Optional[bool] = None,
  115. use_doc_unwarping: Optional[bool] = None,
  116. use_textline_orientation: Optional[bool] = None,
  117. use_seal_recognition: Optional[bool] = None,
  118. use_table_recognition: Optional[bool] = None,
  119. layout_threshold: Optional[Union[float, dict]] = None,
  120. layout_nms: Optional[bool] = None,
  121. layout_unclip_ratio: Optional[Union[float, Tuple[float, float], dict]] = None,
  122. layout_merge_bboxes_mode: Optional[str] = None,
  123. text_det_limit_side_len: Optional[int] = None,
  124. text_det_limit_type: Optional[str] = None,
  125. text_det_thresh: Optional[float] = None,
  126. text_det_box_thresh: Optional[float] = None,
  127. text_det_unclip_ratio: Optional[float] = None,
  128. text_rec_score_thresh: Optional[float] = None,
  129. seal_det_limit_side_len: Optional[int] = None,
  130. seal_det_limit_type: Optional[str] = None,
  131. seal_det_thresh: Optional[float] = None,
  132. seal_det_box_thresh: Optional[float] = None,
  133. seal_det_unclip_ratio: Optional[float] = None,
  134. seal_rec_score_thresh: Optional[float] = None,
  135. **kwargs,
  136. ) -> dict:
  137. """
  138. This function takes an input image or a list of images and performs various visual
  139. prediction tasks such as document orientation classification, document unwarping,
  140. general OCR, seal recognition, and table recognition based on the provided flags.
  141. Args:
  142. input (Union[str, list[str], np.ndarray, list[np.ndarray]]): Input image path, list of image paths,
  143. numpy array of an image, or list of numpy arrays.
  144. use_doc_orientation_classify (bool): Flag to use document orientation classification.
  145. use_doc_unwarping (bool): Flag to use document unwarping.
  146. use_textline_orientation (Optional[bool]): Whether to use textline orientation prediction.
  147. use_seal_recognition (bool): Flag to use seal recognition.
  148. use_table_recognition (bool): Flag to use table recognition.
  149. layout_threshold (Optional[float]): The threshold value to filter out low-confidence predictions. Default is None.
  150. layout_nms (bool, optional): Whether to use layout-aware NMS. Defaults to False.
  151. layout_unclip_ratio (Optional[Union[float, Tuple[float, float]]], optional): The ratio of unclipping the bounding box.
  152. Defaults to None.
  153. If it's a single number, then both width and height are used.
  154. If it's a tuple of two numbers, then they are used separately for width and height respectively.
  155. If it's None, then no unclipping will be performed.
  156. layout_merge_bboxes_mode (Optional[str], optional): The mode for merging bounding boxes. Defaults to None.
  157. text_det_limit_side_len (Optional[int]): Maximum side length for text detection.
  158. text_det_limit_type (Optional[str]): Type of limit to apply for text detection.
  159. text_det_thresh (Optional[float]): Threshold for text detection.
  160. text_det_box_thresh (Optional[float]): Threshold for text detection boxes.
  161. text_det_unclip_ratio (Optional[float]): Ratio for unclipping text detection boxes.
  162. text_rec_score_thresh (Optional[float]): Score threshold for text recognition.
  163. seal_det_limit_side_len (Optional[int]): Maximum side length for seal detection.
  164. seal_det_limit_type (Optional[str]): Type of limit to apply for seal detection.
  165. seal_det_thresh (Optional[float]): Threshold for seal detection.
  166. seal_det_box_thresh (Optional[float]): Threshold for seal detection boxes.
  167. seal_det_unclip_ratio (Optional[float]): Ratio for unclipping seal detection boxes.
  168. seal_rec_score_thresh (Optional[float]): Score threshold for seal recognition.
  169. **kwargs: Additional keyword arguments.
  170. Returns:
  171. dict: A dictionary containing the layout parsing result and visual information.
  172. """
  173. if self.use_layout_parser == False:
  174. logging.error("The models for layout parser are not initialized.")
  175. yield {"error": "The models for layout parser are not initialized."}
  176. if self.layout_parsing_pipeline is None:
  177. logging.warning(
  178. "The layout parsing pipeline is not initialized, will initialize it now."
  179. )
  180. self.inintial_visual_predictor(self.config)
  181. for layout_parsing_result in self.layout_parsing_pipeline.predict(
  182. input,
  183. use_doc_orientation_classify=use_doc_orientation_classify,
  184. use_doc_unwarping=use_doc_unwarping,
  185. use_textline_orientation=use_textline_orientation,
  186. use_seal_recognition=use_seal_recognition,
  187. use_table_recognition=use_table_recognition,
  188. layout_threshold=layout_threshold,
  189. layout_nms=layout_nms,
  190. layout_unclip_ratio=layout_unclip_ratio,
  191. layout_merge_bboxes_mode=layout_merge_bboxes_mode,
  192. text_det_limit_side_len=text_det_limit_side_len,
  193. text_det_limit_type=text_det_limit_type,
  194. text_det_thresh=text_det_thresh,
  195. text_det_box_thresh=text_det_box_thresh,
  196. text_det_unclip_ratio=text_det_unclip_ratio,
  197. text_rec_score_thresh=text_rec_score_thresh,
  198. seal_det_box_thresh=seal_det_box_thresh,
  199. seal_det_limit_side_len=seal_det_limit_side_len,
  200. seal_det_limit_type=seal_det_limit_type,
  201. seal_det_thresh=seal_det_thresh,
  202. seal_det_unclip_ratio=seal_det_unclip_ratio,
  203. seal_rec_score_thresh=seal_rec_score_thresh,
  204. ):
  205. visual_predict_res = {
  206. "layout_parsing_result": layout_parsing_result,
  207. }
  208. yield visual_predict_res
  209. def split_markdown(self, md_text, chunk_size):
  210. if (
  211. not isinstance(md_text, str)
  212. or not isinstance(chunk_size, int)
  213. or chunk_size <= 0
  214. ):
  215. raise ValueError("Invalid input parameters.")
  216. chunks = []
  217. current_chunk = []
  218. # if md_text less than chunk_size, return the md_text
  219. if len(md_text) < chunk_size:
  220. chunks.append(md_text)
  221. return chunks
  222. # split the md_text into paragraphs
  223. paragraphs = md_text.split("\n")
  224. for paragraph in paragraphs:
  225. if len(paragraph) == 0:
  226. # 空行直接跳过
  227. continue
  228. if len(paragraph) <= chunk_size:
  229. current_chunk.append(paragraph)
  230. else:
  231. # if the paragraph is too long, split it into sentences
  232. sentences = re.split(r"(?<=[。.!?])", paragraph)
  233. for sentence in sentences:
  234. if len(sentence) == 0:
  235. continue
  236. if len(sentence) > chunk_size:
  237. raise ValueError("A sentence exceeds the chunk size limit.")
  238. # if the current chunk is too long, store it and start a new one
  239. if sum(len(s) for s in current_chunk) + len(sentence) > chunk_size:
  240. chunks.append("\n\n".join(current_chunk))
  241. current_chunk = [sentence]
  242. else:
  243. current_chunk.append(sentence)
  244. if sum(len(s) for s in current_chunk) >= chunk_size:
  245. chunks.append("\n\n".join(current_chunk))
  246. current_chunk = []
  247. if current_chunk:
  248. chunks.append("\n\n".join(current_chunk))
  249. return chunks
  250. def translate(
  251. self,
  252. ori_md_info_list: List[Dict],
  253. target_language: str = "zh",
  254. chunk_size: int = 5000,
  255. task_description: str = None,
  256. output_format: str = None,
  257. rules_str: str = None,
  258. few_shot_demo_text_content: str = None,
  259. few_shot_demo_key_value_list: str = None,
  260. chat_bot_config=None,
  261. **kwargs,
  262. ):
  263. """
  264. Translate the given original text into the specified target language using the configured translation model.
  265. Args:
  266. original_text (str): The original text to be translated.
  267. target_language (str): The desired target language code.
  268. **kwargs: Additional keyword arguments passed to the translation model.
  269. Returns:
  270. str: The translated text in the target language.
  271. """
  272. if self.chat_bot is None:
  273. logging.warning(
  274. "The LLM chat bot is not initialized,will initialize it now."
  275. )
  276. self.inintial_chat_predictor(self.config)
  277. if chat_bot_config is not None:
  278. from .. import create_chat_bot
  279. chat_bot = create_chat_bot(chat_bot_config)
  280. else:
  281. chat_bot = self.chat_bot
  282. if len(ori_md_info_list) == 1:
  283. # for single image or single page pdf
  284. md_info = ori_md_info_list[0]
  285. else:
  286. # for multi page pdf
  287. md_info = self.concatenate_markdown_pages(ori_md_info_list)
  288. original_text = md_info["markdown_texts"]
  289. chunks = self.split_markdown(original_text, chunk_size)
  290. target_language_md_chunks = []
  291. if len(chunks) > 1:
  292. logging.info(
  293. f"Get the markdown text, it's length is {len(original_text)}, will split it into {len(chunks)} parts."
  294. )
  295. logging.info(
  296. "Starting to translate the markdown text, will take a while. please wait..."
  297. )
  298. for idx, chunk in enumerate(chunks):
  299. logging.info(f"Translating the {idx+1}/{len(chunks)} part.")
  300. prompt = self.translate_pe.generate_prompt(
  301. original_text=chunk,
  302. language=target_language,
  303. task_description=task_description,
  304. output_format=output_format,
  305. rules_str=rules_str,
  306. few_shot_demo_text_content=few_shot_demo_text_content,
  307. few_shot_demo_key_value_list=few_shot_demo_key_value_list,
  308. )
  309. target_language_md_chunk = chat_bot.generate_chat_results(
  310. prompt=prompt
  311. ).get("content", "")
  312. target_language_md_chunks.append(target_language_md_chunk)
  313. target_language_md = "\n\n".join(target_language_md_chunks)
  314. src_result = {
  315. "language": "src",
  316. "input_path": md_info["input_path"],
  317. "page_index": md_info["page_index"],
  318. "page_continuation_flags": md_info["page_continuation_flags"],
  319. "markdown_texts": original_text,
  320. }
  321. translate_result = {
  322. "language": target_language,
  323. "input_path": md_info["input_path"],
  324. "page_index": md_info["page_index"],
  325. "page_continuation_flags": md_info["page_continuation_flags"],
  326. "markdown_texts": target_language_md,
  327. }
  328. return TranslationMarkdownResult(src_result), TranslationMarkdownResult(
  329. translate_result
  330. )
  331. def concatenate_markdown_pages(self, markdown_list: list) -> tuple:
  332. """
  333. Concatenate Markdown content from multiple pages into a single document.
  334. Args:
  335. markdown_list (list): A list containing Markdown data for each page.
  336. Returns:
  337. tuple: A tuple containing the processed Markdown text.
  338. """
  339. markdown_texts = ""
  340. previous_page_last_element_paragraph_end_flag = True
  341. if len(markdown_list) == 0:
  342. raise ValueError("The length of markdown_list is zero.")
  343. for res in markdown_list:
  344. # Get the paragraph flags for the current page
  345. page_first_element_paragraph_start_flag: bool = res[
  346. "page_continuation_flags"
  347. ][0]
  348. page_last_element_paragraph_end_flag: bool = res["page_continuation_flags"][
  349. 1
  350. ]
  351. # Determine whether to add a space or a newline
  352. if (
  353. not page_first_element_paragraph_start_flag
  354. and not previous_page_last_element_paragraph_end_flag
  355. ):
  356. last_char_of_markdown = markdown_texts[-1] if markdown_texts else ""
  357. first_char_of_handler = (
  358. res["markdown_texts"][0] if res["markdown_texts"] else ""
  359. )
  360. # Check if the last character and the first character are Chinese characters
  361. last_is_chinese_char = (
  362. re.match(r"[\u4e00-\u9fff]", last_char_of_markdown)
  363. if last_char_of_markdown
  364. else False
  365. )
  366. first_is_chinese_char = (
  367. re.match(r"[\u4e00-\u9fff]", first_char_of_handler)
  368. if first_char_of_handler
  369. else False
  370. )
  371. if not (last_is_chinese_char or first_is_chinese_char):
  372. markdown_texts += " " + res["markdown_texts"]
  373. else:
  374. markdown_texts += res["markdown_texts"]
  375. else:
  376. markdown_texts += "\n\n" + res["markdown_texts"]
  377. previous_page_last_element_paragraph_end_flag = (
  378. page_last_element_paragraph_end_flag
  379. )
  380. concatenate_result = {
  381. "input_path": markdown_list[0]["input_path"],
  382. "page_index": None,
  383. "page_continuation_flags": False,
  384. "markdown_texts": markdown_texts,
  385. }
  386. return TranslationMarkdownResult(concatenate_result)