|
@@ -3,9 +3,6 @@ import sys
|
|
|
import time
|
|
import time
|
|
|
from urllib.parse import quote
|
|
from urllib.parse import quote
|
|
|
|
|
|
|
|
-from magic_pdf.dict2md.ocr_mkcontent import ocr_mk_mm_markdown, ocr_mk_nlp_markdown_with_para, \
|
|
|
|
|
- ocr_mk_mm_markdown_with_para_and_pagination, ocr_mk_mm_markdown_with_para, ocr_mk_mm_standard_format, \
|
|
|
|
|
- make_standard_format_with_para
|
|
|
|
|
from magic_pdf.libs.commons import (
|
|
from magic_pdf.libs.commons import (
|
|
|
read_file,
|
|
read_file,
|
|
|
join_path,
|
|
join_path,
|
|
@@ -15,34 +12,19 @@ from magic_pdf.libs.commons import (
|
|
|
)
|
|
)
|
|
|
from magic_pdf.libs.drop_reason import DropReason
|
|
from magic_pdf.libs.drop_reason import DropReason
|
|
|
from magic_pdf.libs.json_compressor import JsonCompressor
|
|
from magic_pdf.libs.json_compressor import JsonCompressor
|
|
|
-from magic_pdf.dict2md.mkcontent import mk_nlp_markdown, mk_universal_format
|
|
|
|
|
|
|
+from magic_pdf.dict2md.mkcontent import mk_universal_format
|
|
|
from magic_pdf.pdf_parse_by_model import parse_pdf_by_model
|
|
from magic_pdf.pdf_parse_by_model import parse_pdf_by_model
|
|
|
from magic_pdf.filter.pdf_classify_by_type import classify
|
|
from magic_pdf.filter.pdf_classify_by_type import classify
|
|
|
from magic_pdf.filter.pdf_meta_scan import pdf_meta_scan
|
|
from magic_pdf.filter.pdf_meta_scan import pdf_meta_scan
|
|
|
from loguru import logger
|
|
from loguru import logger
|
|
|
|
|
|
|
|
-from magic_pdf.pdf_parse_by_ocr import parse_pdf_by_ocr
|
|
|
|
|
from magic_pdf.pdf_parse_for_train import parse_pdf_for_train
|
|
from magic_pdf.pdf_parse_for_train import parse_pdf_for_train
|
|
|
-from magic_pdf.spark.base import exception_handler, get_data_source
|
|
|
|
|
|
|
+from magic_pdf.spark.base import exception_handler, get_data_source, get_bookname, get_pdf_bytes
|
|
|
from magic_pdf.train_utils.convert_to_train_format import convert_to_train_format
|
|
from magic_pdf.train_utils.convert_to_train_format import convert_to_train_format
|
|
|
-from app.common.s3 import get_s3_config, get_s3_client
|
|
|
|
|
|
|
+from magic_pdf.spark.s3 import get_s3_config, get_s3_client
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
-def get_data_type(jso: dict):
|
|
|
|
|
- data_type = jso.get("data_type")
|
|
|
|
|
- if data_type is None:
|
|
|
|
|
- data_type = jso.get("file_type")
|
|
|
|
|
- return data_type
|
|
|
|
|
-
|
|
|
|
|
-
|
|
|
|
|
-def get_bookid(jso: dict):
|
|
|
|
|
- book_id = jso.get("bookid")
|
|
|
|
|
- if book_id is None:
|
|
|
|
|
- book_id = jso.get("original_file_id")
|
|
|
|
|
- return book_id
|
|
|
|
|
-
|
|
|
|
|
-
|
|
|
|
|
def meta_scan(jso: dict, doc_layout_check=True) -> dict:
|
|
def meta_scan(jso: dict, doc_layout_check=True) -> dict:
|
|
|
s3_pdf_path = jso.get("file_location")
|
|
s3_pdf_path = jso.get("file_location")
|
|
|
s3_config = get_s3_config(s3_pdf_path)
|
|
s3_config = get_s3_config(s3_pdf_path)
|
|
@@ -310,17 +292,7 @@ def parse_pdf(jso: dict, start_page_id=0, debug_mode=False) -> dict:
|
|
|
file_id = jso.get("file_id")
|
|
file_id = jso.get("file_id")
|
|
|
book_name = f"{data_source}/{file_id}"
|
|
book_name = f"{data_source}/{file_id}"
|
|
|
|
|
|
|
|
- # 1.23.22已修复
|
|
|
|
|
- # if debug_mode:
|
|
|
|
|
- # pass
|
|
|
|
|
- # else:
|
|
|
|
|
- # if book_name == "zlib/zlib_21929367":
|
|
|
|
|
- # jso['need_drop'] = True
|
|
|
|
|
- # jso['drop_reason'] = DropReason.SPECIAL_PDF
|
|
|
|
|
- # return jso
|
|
|
|
|
-
|
|
|
|
|
junk_img_bojids = jso["pdf_meta"]["junk_img_bojids"]
|
|
junk_img_bojids = jso["pdf_meta"]["junk_img_bojids"]
|
|
|
- # total_page = jso['pdf_meta']['total_page']
|
|
|
|
|
|
|
|
|
|
# 增加检测 max_svgs 数量的检测逻辑,如果 max_svgs 超过3000则drop
|
|
# 增加检测 max_svgs 数量的检测逻辑,如果 max_svgs 超过3000则drop
|
|
|
svgs_per_page_list = jso["pdf_meta"]["svgs_per_page"]
|
|
svgs_per_page_list = jso["pdf_meta"]["svgs_per_page"]
|
|
@@ -328,9 +300,6 @@ def parse_pdf(jso: dict, start_page_id=0, debug_mode=False) -> dict:
|
|
|
if max_svgs > 3000:
|
|
if max_svgs > 3000:
|
|
|
jso["need_drop"] = True
|
|
jso["need_drop"] = True
|
|
|
jso["drop_reason"] = DropReason.HIGH_COMPUTATIONAL_lOAD_BY_SVGS
|
|
jso["drop_reason"] = DropReason.HIGH_COMPUTATIONAL_lOAD_BY_SVGS
|
|
|
- # elif total_page > 1000:
|
|
|
|
|
- # jso['need_drop'] = True
|
|
|
|
|
- # jso['drop_reason'] = DropReason.HIGH_COMPUTATIONAL_lOAD_BY_TOTAL_PAGES
|
|
|
|
|
else:
|
|
else:
|
|
|
try:
|
|
try:
|
|
|
save_path = s3_image_save_path
|
|
save_path = s3_image_save_path
|
|
@@ -459,79 +428,10 @@ def parse_pdf_for_model_train(jso: dict, start_page_id=0, debug_mode=False) -> d
|
|
|
2.再调用ocr_dropped_parse_pdf,对之前drop的pdf进行处理
|
|
2.再调用ocr_dropped_parse_pdf,对之前drop的pdf进行处理
|
|
|
"""
|
|
"""
|
|
|
|
|
|
|
|
-
|
|
|
|
|
-def uni_parse_pdf(jso: dict, start_page_id=0, debug_mode=False) -> dict:
|
|
|
|
|
- jso = parse_pdf(jso, start_page_id=start_page_id, debug_mode=debug_mode)
|
|
|
|
|
- jso = ocr_dropped_parse_pdf(jso, start_page_id=start_page_id, debug_mode=debug_mode)
|
|
|
|
|
- return jso
|
|
|
|
|
-
|
|
|
|
|
-
|
|
|
|
|
-# 专门用来跑被drop的pdf,跑完之后需要把need_drop字段置为false
|
|
|
|
|
-def ocr_dropped_parse_pdf(jso: dict, start_page_id=0, debug_mode=False) -> dict:
|
|
|
|
|
- if not jso.get("need_drop", False):
|
|
|
|
|
- return jso
|
|
|
|
|
- else:
|
|
|
|
|
- jso = ocr_parse_pdf_core(
|
|
|
|
|
- jso, start_page_id=start_page_id, debug_mode=debug_mode
|
|
|
|
|
- )
|
|
|
|
|
- jso["need_drop"] = False
|
|
|
|
|
- return jso
|
|
|
|
|
-
|
|
|
|
|
-
|
|
|
|
|
-def ocr_parse_pdf(jso: dict, start_page_id=0, debug_mode=False) -> dict:
|
|
|
|
|
- # 检测debug开关
|
|
|
|
|
- if debug_mode:
|
|
|
|
|
- pass
|
|
|
|
|
- else: # 如果debug没开,则检测是否有needdrop字段
|
|
|
|
|
- if jso.get("need_drop", False):
|
|
|
|
|
- return jso
|
|
|
|
|
-
|
|
|
|
|
- jso = ocr_parse_pdf_core(jso, start_page_id=start_page_id, debug_mode=debug_mode)
|
|
|
|
|
- return jso
|
|
|
|
|
-
|
|
|
|
|
-
|
|
|
|
|
-def ocr_parse_pdf_core(jso: dict, start_page_id=0, debug_mode=False) -> dict:
|
|
|
|
|
-
|
|
|
|
|
- s3_pdf_path = jso.get("file_location")
|
|
|
|
|
- s3_config = get_s3_config(s3_pdf_path)
|
|
|
|
|
- pdf_bytes = read_file(s3_pdf_path, s3_config)
|
|
|
|
|
-
|
|
|
|
|
- model_output_json_list = jso.get("doc_layout_result")
|
|
|
|
|
- data_source = get_data_source(jso)
|
|
|
|
|
- file_id = jso.get("file_id")
|
|
|
|
|
- book_name = f"{data_source}/{file_id}"
|
|
|
|
|
- try:
|
|
|
|
|
- save_path = s3_image_save_path
|
|
|
|
|
- image_s3_config = get_s3_config(save_path)
|
|
|
|
|
- start_time = time.time() # 记录开始时间
|
|
|
|
|
- # 先打印一下book_name和解析开始的时间
|
|
|
|
|
- logger.info(
|
|
|
|
|
- f"book_name is:{book_name},start_time is:{formatted_time(start_time)}",
|
|
|
|
|
- file=sys.stderr,
|
|
|
|
|
- )
|
|
|
|
|
- pdf_info_dict = parse_pdf_by_ocr(
|
|
|
|
|
- pdf_bytes,
|
|
|
|
|
- model_output_json_list,
|
|
|
|
|
- save_path,
|
|
|
|
|
- book_name,
|
|
|
|
|
- pdf_model_profile=None,
|
|
|
|
|
- image_s3_config=image_s3_config,
|
|
|
|
|
- start_page_id=start_page_id,
|
|
|
|
|
- debug_mode=debug_mode,
|
|
|
|
|
- )
|
|
|
|
|
- pdf_info_dict = JsonCompressor.compress_json(pdf_info_dict)
|
|
|
|
|
- jso["pdf_intermediate_dict"] = pdf_info_dict
|
|
|
|
|
- end_time = time.time() # 记录完成时间
|
|
|
|
|
- parse_time = int(end_time - start_time) # 计算执行时间
|
|
|
|
|
- # 解析完成后打印一下book_name和耗时
|
|
|
|
|
- logger.info(
|
|
|
|
|
- f"book_name is:{book_name},end_time is:{formatted_time(end_time)},cost_time is:{parse_time}",
|
|
|
|
|
- file=sys.stderr,
|
|
|
|
|
- )
|
|
|
|
|
- jso["parse_time"] = parse_time
|
|
|
|
|
- except Exception as e:
|
|
|
|
|
- jso = exception_handler(jso, e)
|
|
|
|
|
- return jso
|
|
|
|
|
|
|
+# def uni_parse_pdf(jso: dict, start_page_id=0, debug_mode=False) -> dict:
|
|
|
|
|
+# jso = parse_pdf(jso, start_page_id=start_page_id, debug_mode=debug_mode)
|
|
|
|
|
+# jso = ocr_dropped_parse_pdf(jso, start_page_id=start_page_id, debug_mode=debug_mode)
|
|
|
|
|
+# return jso
|
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == "__main__":
|
|
if __name__ == "__main__":
|