|
|
@@ -25,6 +25,7 @@ from ...common.batch_sampler import ImageBatchSampler
|
|
|
from ....utils import logging
|
|
|
from ...utils.pp_option import PaddlePredictorOption
|
|
|
from ..layout_parsing.result import LayoutParsingResult
|
|
|
+from ..components.chat_server import BaseChat
|
|
|
|
|
|
|
|
|
class PP_ChatOCRv4_Pipeline(PP_ChatOCR_Pipeline):
|
|
|
@@ -38,6 +39,7 @@ class PP_ChatOCRv4_Pipeline(PP_ChatOCR_Pipeline):
|
|
|
device: str = None,
|
|
|
pp_option: PaddlePredictorOption = None,
|
|
|
use_hpip: bool = False,
|
|
|
+ initial_predictor: bool = True,
|
|
|
) -> None:
|
|
|
"""Initializes the pp-chatocrv3-doc pipeline.
|
|
|
|
|
|
@@ -47,22 +49,34 @@ class PP_ChatOCRv4_Pipeline(PP_ChatOCR_Pipeline):
|
|
|
pp_option (PaddlePredictorOption, optional): PaddlePredictor options. Defaults to None.
|
|
|
use_hpip (bool, optional): Whether to use high-performance inference (hpip) for prediction. Defaults to False.
|
|
|
use_layout_parsing (bool, optional): Whether to use layout parsing. Defaults to True.
|
|
|
+ initial_predictor (bool, optional): Whether to initialize the predictor. Defaults to True.
|
|
|
"""
|
|
|
|
|
|
super().__init__(device=device, pp_option=pp_option, use_hpip=use_hpip)
|
|
|
|
|
|
self.pipeline_name = config["pipeline_name"]
|
|
|
+ self.config = config
|
|
|
+ self.use_layout_parser = config.get("use_layout_parser", True)
|
|
|
+ self.use_mllm_predict = config.get("use_mllm_predict", True)
|
|
|
+
|
|
|
+ self.layout_parsing_pipeline = None
|
|
|
+ self.chat_bot = None
|
|
|
+ self.retriever = None
|
|
|
+ self.mllm_chat_bot = None
|
|
|
|
|
|
- self.inintial_predictor(config)
|
|
|
+ if initial_predictor:
|
|
|
+ self.inintial_visual_predictor(config)
|
|
|
+ self.inintial_chat_predictor(config)
|
|
|
+ self.inintial_mllm_predictor(config)
|
|
|
|
|
|
self.batch_sampler = ImageBatchSampler(batch_size=1)
|
|
|
self.img_reader = ReadImage(format="BGR")
|
|
|
|
|
|
self.table_structure_len_max = 500
|
|
|
|
|
|
- def inintial_predictor(self, config: dict) -> None:
|
|
|
+ def inintial_visual_predictor(self, config: dict) -> None:
|
|
|
"""
|
|
|
- Initializes the predictor with the given configuration.
|
|
|
+ Initializes the visual predictor with the given configuration.
|
|
|
|
|
|
Args:
|
|
|
config (dict): The configuration dictionary containing the necessary
|
|
|
@@ -70,23 +84,26 @@ class PP_ChatOCRv4_Pipeline(PP_ChatOCR_Pipeline):
|
|
|
Returns:
|
|
|
None
|
|
|
"""
|
|
|
-
|
|
|
self.use_layout_parser = config.get("use_layout_parser", True)
|
|
|
+
|
|
|
if self.use_layout_parser:
|
|
|
layout_parsing_config = config.get("SubPipelines", {}).get(
|
|
|
"LayoutParser",
|
|
|
{"pipeline_config_error": "config error for layout_parsing_pipeline!"},
|
|
|
)
|
|
|
self.layout_parsing_pipeline = self.create_pipeline(layout_parsing_config)
|
|
|
+ return
|
|
|
|
|
|
- from .. import create_chat_bot
|
|
|
-
|
|
|
- chat_bot_config = config.get("SubModules", {}).get(
|
|
|
- "LLM_Chat",
|
|
|
- {"chat_bot_config_error": "config error for llm chat bot!"},
|
|
|
- )
|
|
|
- self.chat_bot = create_chat_bot(chat_bot_config)
|
|
|
+ def inintial_retriever_predictor(self, config: dict) -> None:
|
|
|
+ """
|
|
|
+ Initializes the retriever predictor with the given configuration.
|
|
|
|
|
|
+ Args:
|
|
|
+ config (dict): The configuration dictionary containing the necessary
|
|
|
+ parameters for initializing the predictor.
|
|
|
+ Returns:
|
|
|
+ None
|
|
|
+ """
|
|
|
from .. import create_retriever
|
|
|
|
|
|
retriever_config = config.get("SubModules", {}).get(
|
|
|
@@ -95,7 +112,25 @@ class PP_ChatOCRv4_Pipeline(PP_ChatOCR_Pipeline):
|
|
|
)
|
|
|
self.retriever = create_retriever(retriever_config)
|
|
|
|
|
|
- from .. import create_prompt_engeering
|
|
|
+ def inintial_chat_predictor(self, config: dict) -> None:
|
|
|
+ """
|
|
|
+ Initializes the chat predictor with the given configuration.
|
|
|
+
|
|
|
+ Args:
|
|
|
+ config (dict): The configuration dictionary containing the necessary
|
|
|
+ parameters for initializing the predictor.
|
|
|
+ Returns:
|
|
|
+ None
|
|
|
+ """
|
|
|
+ from .. import create_chat_bot
|
|
|
+
|
|
|
+ chat_bot_config = config.get("SubModules", {}).get(
|
|
|
+ "LLM_Chat",
|
|
|
+ {"chat_bot_config_error": "config error for llm chat bot!"},
|
|
|
+ )
|
|
|
+ self.chat_bot = create_chat_bot(chat_bot_config)
|
|
|
+
|
|
|
+ from .. import create_prompt_engineering
|
|
|
|
|
|
text_pe_config = (
|
|
|
config.get("SubModules", {})
|
|
|
@@ -105,7 +140,7 @@ class PP_ChatOCRv4_Pipeline(PP_ChatOCR_Pipeline):
|
|
|
{"pe_config_error": "config error for text_pe!"},
|
|
|
)
|
|
|
)
|
|
|
- self.text_pe = create_prompt_engeering(text_pe_config)
|
|
|
+ self.text_pe = create_prompt_engineering(text_pe_config)
|
|
|
|
|
|
table_pe_config = (
|
|
|
config.get("SubModules", {})
|
|
|
@@ -115,7 +150,20 @@ class PP_ChatOCRv4_Pipeline(PP_ChatOCR_Pipeline):
|
|
|
{"pe_config_error": "config error for table_pe!"},
|
|
|
)
|
|
|
)
|
|
|
- self.table_pe = create_prompt_engeering(table_pe_config)
|
|
|
+ self.table_pe = create_prompt_engineering(table_pe_config)
|
|
|
+ return
|
|
|
+
|
|
|
+ def inintial_mllm_predictor(self, config: dict) -> None:
|
|
|
+ """
|
|
|
+ Initializes the predictor with the given configuration.
|
|
|
+
|
|
|
+ Args:
|
|
|
+ config (dict): The configuration dictionary containing the necessary
|
|
|
+ parameters for initializing the predictor.
|
|
|
+ Returns:
|
|
|
+ None
|
|
|
+ """
|
|
|
+ from .. import create_chat_bot, create_prompt_engineering
|
|
|
|
|
|
self.use_mllm_predict = config.get("use_mllm_predict", True)
|
|
|
if self.use_mllm_predict:
|
|
|
@@ -132,7 +180,7 @@ class PP_ChatOCRv4_Pipeline(PP_ChatOCR_Pipeline):
|
|
|
{"pe_config_error": "config error for ensemble_pe!"},
|
|
|
)
|
|
|
)
|
|
|
- self.ensemble_pe = create_prompt_engeering(ensemble_pe_config)
|
|
|
+ self.ensemble_pe = create_prompt_engineering(ensemble_pe_config)
|
|
|
return
|
|
|
|
|
|
def decode_visual_result(self, layout_parsing_result: LayoutParsingResult) -> dict:
|
|
|
@@ -226,6 +274,12 @@ class PP_ChatOCRv4_Pipeline(PP_ChatOCR_Pipeline):
|
|
|
logging.error("The models for layout parser are not initialized.")
|
|
|
yield {"error": "The models for layout parser are not initialized."}
|
|
|
|
|
|
+ if self.layout_parsing_pipeline is None:
|
|
|
+ logging.warning(
|
|
|
+ "The layout parsing pipeline is not initialized, will initialize it now."
|
|
|
+ )
|
|
|
+ self.inintial_visual_predictor(self.config)
|
|
|
+
|
|
|
for layout_parsing_result in self.layout_parsing_pipeline.predict(
|
|
|
input,
|
|
|
use_doc_orientation_classify=use_doc_orientation_classify,
|
|
|
@@ -332,6 +386,7 @@ class PP_ChatOCRv4_Pipeline(PP_ChatOCR_Pipeline):
|
|
|
min_characters: int = 3500,
|
|
|
llm_request_interval: float = 1.0,
|
|
|
flag_save_bytes_vector: bool = False,
|
|
|
+ retriever_config: dict = None,
|
|
|
) -> dict:
|
|
|
"""
|
|
|
Build a vector representation from visual information.
|
|
|
@@ -341,6 +396,7 @@ class PP_ChatOCRv4_Pipeline(PP_ChatOCR_Pipeline):
|
|
|
min_characters (int): The minimum number of characters required for text processing, defaults to 3500.
|
|
|
llm_request_interval (float): The interval between LLM requests, defaults to 1.0.
|
|
|
flag_save_bytes_vector (bool): Whether to save the vector as bytes, defaults to False.
|
|
|
+ retriever_config (dict): The configuration for the retriever, defaults to None.
|
|
|
|
|
|
Returns:
|
|
|
dict: A dictionary containing the vector info and a flag indicating if the text is too short.
|
|
|
@@ -351,6 +407,18 @@ class PP_ChatOCRv4_Pipeline(PP_ChatOCR_Pipeline):
|
|
|
else:
|
|
|
visual_info_list = visual_info
|
|
|
|
|
|
+ if retriever_config is not None:
|
|
|
+ from .. import create_retriever
|
|
|
+
|
|
|
+ retriever = create_retriever(retriever_config)
|
|
|
+ else:
|
|
|
+ if self.retriever is None:
|
|
|
+ logging.warning(
|
|
|
+ "The retriever is not initialized,will initialize it now."
|
|
|
+ )
|
|
|
+ self.inintial_retriever_predictor(self.config)
|
|
|
+ retriever = self.retriever
|
|
|
+
|
|
|
all_visual_info = self.merge_visual_info_list(visual_info_list)
|
|
|
(
|
|
|
all_normal_text_list,
|
|
|
@@ -376,9 +444,9 @@ class PP_ChatOCRv4_Pipeline(PP_ChatOCR_Pipeline):
|
|
|
vector_info["flag_save_bytes_vector"] = False
|
|
|
if len(all_text_str) > min_characters:
|
|
|
vector_info["flag_too_short_text"] = False
|
|
|
- vector_info["vector"] = self.retriever.generate_vector_database(all_items)
|
|
|
+ vector_info["vector"] = retriever.generate_vector_database(all_items)
|
|
|
if flag_save_bytes_vector:
|
|
|
- vector_info["vector"] = self.retriever.encode_vector_store_to_bytes(
|
|
|
+ vector_info["vector"] = retriever.encode_vector_store_to_bytes(
|
|
|
vector_info["vector"]
|
|
|
)
|
|
|
vector_info["flag_save_bytes_vector"] = True
|
|
|
@@ -394,6 +462,10 @@ class PP_ChatOCRv4_Pipeline(PP_ChatOCR_Pipeline):
|
|
|
|
|
|
def load_vector(self, data_path: str) -> dict:
|
|
|
vector_info = None
|
|
|
+ if self.retriever is None:
|
|
|
+ logging.warning("The retriever is not initialized,will initialize it now.")
|
|
|
+ self.inintial_retriever_predictor(self.config)
|
|
|
+
|
|
|
with open(data_path, "r") as fin:
|
|
|
data = fin.readline()
|
|
|
vector_info = json.loads(data)
|
|
|
@@ -441,6 +513,10 @@ class PP_ChatOCRv4_Pipeline(PP_ChatOCR_Pipeline):
|
|
|
key_list,
|
|
|
**kwargs,
|
|
|
) -> dict:
|
|
|
+ if self.use_mllm_predict == False:
|
|
|
+ logging.error("MLLM prediction is disabled.")
|
|
|
+ return {"mllm_res": "Error:MLLM prediction is disabled!"}
|
|
|
+
|
|
|
key_list = self.format_key(key_list)
|
|
|
if len(key_list) == 0:
|
|
|
return {"mllm_res": "Error:输入的key_list无效!"}
|
|
|
@@ -457,6 +533,12 @@ class PP_ChatOCRv4_Pipeline(PP_ChatOCR_Pipeline):
|
|
|
logging.error("The input with PDF should have only one page.")
|
|
|
return {"mllm_res": "Error:The input with PDF should have only one page!"}
|
|
|
|
|
|
+ if self.mllm_chat_bot is None:
|
|
|
+ logging.warning(
|
|
|
+ "The MLLM chat bot is not initialized,will initialize it now."
|
|
|
+ )
|
|
|
+ self.inintial_mllm_predictor(self.config)
|
|
|
+
|
|
|
for image_array in image_array_list:
|
|
|
|
|
|
assert len(image_array.shape) == 3
|
|
|
@@ -477,7 +559,12 @@ class PP_ChatOCRv4_Pipeline(PP_ChatOCR_Pipeline):
|
|
|
return {"mllm_res": result}
|
|
|
|
|
|
def generate_and_merge_chat_results(
|
|
|
- self, prompt: str, key_list: list, final_results: dict, failed_results: list
|
|
|
+ self,
|
|
|
+ chat_bot: BaseChat,
|
|
|
+ prompt: str,
|
|
|
+ key_list: list,
|
|
|
+ final_results: dict,
|
|
|
+ failed_results: list,
|
|
|
) -> None:
|
|
|
"""
|
|
|
Generate and merge chat results into the final results dictionary.
|
|
|
@@ -492,7 +579,7 @@ class PP_ChatOCRv4_Pipeline(PP_ChatOCR_Pipeline):
|
|
|
None
|
|
|
"""
|
|
|
|
|
|
- llm_result = self.chat_bot.generate_chat_results(prompt)
|
|
|
+ llm_result = chat_bot.generate_chat_results(prompt)
|
|
|
if llm_result is None:
|
|
|
logging.error(
|
|
|
"chat bot error: \n [prompt:]\n %s\n [result:] %s\n"
|
|
|
@@ -510,6 +597,7 @@ class PP_ChatOCRv4_Pipeline(PP_ChatOCR_Pipeline):
|
|
|
|
|
|
def get_related_normal_text(
|
|
|
self,
|
|
|
+ retriever_config: dict,
|
|
|
use_vector_retrieval: bool,
|
|
|
vector_info: dict,
|
|
|
key_list: List[str],
|
|
|
@@ -520,6 +608,7 @@ class PP_ChatOCRv4_Pipeline(PP_ChatOCR_Pipeline):
|
|
|
Retrieve related normal text based on vector retrieval or all normal text list.
|
|
|
|
|
|
Args:
|
|
|
+ retriever_config (dict): Configuration for the retriever.
|
|
|
use_vector_retrieval (bool): Whether to use vector retrieval.
|
|
|
vector_info (dict): Dictionary containing vector information.
|
|
|
key_list (list[str]): List of keys to generate question keys.
|
|
|
@@ -531,10 +620,23 @@ class PP_ChatOCRv4_Pipeline(PP_ChatOCR_Pipeline):
|
|
|
"""
|
|
|
|
|
|
if use_vector_retrieval and vector_info is not None:
|
|
|
+
|
|
|
+ if retriever_config is not None:
|
|
|
+ from .. import create_retriever
|
|
|
+
|
|
|
+ retriever = create_retriever(retriever_config)
|
|
|
+ else:
|
|
|
+ if self.retriever is None:
|
|
|
+ logging.warning(
|
|
|
+ "The retriever is not initialized,will initialize it now."
|
|
|
+ )
|
|
|
+ self.inintial_retriever_predictor(self.config)
|
|
|
+ retriever = self.retriever
|
|
|
+
|
|
|
question_key_list = [f"{key}" for key in key_list]
|
|
|
vector = vector_info["vector"]
|
|
|
if not vector_info["flag_too_short_text"]:
|
|
|
- related_text = self.retriever.similarity_retrieval(
|
|
|
+ related_text = retriever.similarity_retrieval(
|
|
|
question_key_list, vector, topk=50, min_characters=min_characters
|
|
|
)
|
|
|
else:
|
|
|
@@ -619,8 +721,10 @@ class PP_ChatOCRv4_Pipeline(PP_ChatOCR_Pipeline):
|
|
|
table_rules_str: str = None,
|
|
|
table_few_shot_demo_text_content: str = None,
|
|
|
table_few_shot_demo_key_value_list: str = None,
|
|
|
- mllm_predict_dict: dict = None,
|
|
|
+ mllm_predict_info: dict = None,
|
|
|
mllm_integration_strategy: str = "integration",
|
|
|
+ chat_bot_config: dict = None,
|
|
|
+ retriever_config: dict = None,
|
|
|
) -> dict:
|
|
|
"""
|
|
|
Generates chat results based on the provided key list and visual information.
|
|
|
@@ -642,7 +746,9 @@ class PP_ChatOCRv4_Pipeline(PP_ChatOCR_Pipeline):
|
|
|
table_few_shot_demo_text_content (str): The text content for table few-shot demos.
|
|
|
table_few_shot_demo_key_value_list (str): The key-value list for table few-shot demos.
|
|
|
mllm_predict_dict (dict): The dictionary of mLLM predicts.
|
|
|
- mllm_integration_strategy(str): The integration strategy of mLLM and LLM, defaults to "integration", options are "integration", "llm_only" and "mllm_only".
|
|
|
+ mllm_integration_strategy (str): The integration strategy of mLLM and LLM, defaults to "integration", options are "integration", "llm_only" and "mllm_only".
|
|
|
+ chat_bot_config (dict): The parameters for LLM chatbot, including api_type, api_key... refer to config file for more details.
|
|
|
+ retriever_config (dict): The parameters for LLM retriever, including api_type, api_key... refer to config file for more details.
|
|
|
Returns:
|
|
|
dict: A dictionary containing the chat results.
|
|
|
"""
|
|
|
@@ -657,6 +763,19 @@ class PP_ChatOCRv4_Pipeline(PP_ChatOCR_Pipeline):
|
|
|
else:
|
|
|
visual_info_list = visual_info
|
|
|
|
|
|
+ if self.chat_bot is None:
|
|
|
+ logging.warning(
|
|
|
+ "The LLM chat bot is not initialized,will initialize it now."
|
|
|
+ )
|
|
|
+ self.inintial_chat_predictor(self.config)
|
|
|
+
|
|
|
+ if chat_bot_config is not None:
|
|
|
+ from .. import create_chat_bot
|
|
|
+
|
|
|
+ chat_bot = create_chat_bot(chat_bot_config)
|
|
|
+ else:
|
|
|
+ chat_bot = self.chat_bot
|
|
|
+
|
|
|
all_visual_info = self.merge_visual_info_list(visual_info_list)
|
|
|
(
|
|
|
all_normal_text_list,
|
|
|
@@ -670,6 +789,7 @@ class PP_ChatOCRv4_Pipeline(PP_ChatOCR_Pipeline):
|
|
|
|
|
|
if len(key_list) > 0:
|
|
|
related_text = self.get_related_normal_text(
|
|
|
+ retriever_config,
|
|
|
use_vector_retrieval,
|
|
|
vector_info,
|
|
|
key_list,
|
|
|
@@ -688,7 +808,7 @@ class PP_ChatOCRv4_Pipeline(PP_ChatOCR_Pipeline):
|
|
|
few_shot_demo_key_value_list=text_few_shot_demo_key_value_list,
|
|
|
)
|
|
|
self.generate_and_merge_chat_results(
|
|
|
- prompt, key_list, final_results, failed_results
|
|
|
+ chat_bot, prompt, key_list, final_results, failed_results
|
|
|
)
|
|
|
|
|
|
if len(key_list) > 0:
|
|
|
@@ -715,16 +835,24 @@ class PP_ChatOCRv4_Pipeline(PP_ChatOCR_Pipeline):
|
|
|
)
|
|
|
|
|
|
self.generate_and_merge_chat_results(
|
|
|
- prompt, key_list, final_results, failed_results
|
|
|
+ chat_bot,
|
|
|
+ prompt,
|
|
|
+ key_list,
|
|
|
+ final_results,
|
|
|
+ failed_results,
|
|
|
)
|
|
|
|
|
|
- if self.use_mllm_predict and mllm_predict_dict != "llm_only":
|
|
|
+ if (
|
|
|
+ self.use_mllm_predict
|
|
|
+ and mllm_integration_strategy != "llm_only"
|
|
|
+ and mllm_predict_info is not None
|
|
|
+ ):
|
|
|
if mllm_integration_strategy == "integration":
|
|
|
final_predict_dict = self.ensemble_ocr_llm_mllm(
|
|
|
- key_list_ori, final_results, mllm_predict_dict
|
|
|
+ key_list_ori, final_results, mllm_predict_info
|
|
|
)
|
|
|
elif mllm_integration_strategy == "mllm_only":
|
|
|
- final_predict_dict = mllm_predict_dict
|
|
|
+ final_predict_dict = mllm_predict_info
|
|
|
else:
|
|
|
return {
|
|
|
"chat_res": f"Error:Unsupported mllm_integration_strategy {mllm_integration_strategy}, only support 'integration', 'llm_only' and 'mllm_only'!"
|