pdf_parse_union_core_v2.py 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527
  1. import os
  2. import statistics
  3. import time
  4. from typing import List
  5. import torch
  6. from loguru import logger
  7. from magic_pdf.config.enums import SupportedPdfParseMethod
  8. from magic_pdf.data.dataset import Dataset, PageableData
  9. from magic_pdf.libs.clean_memory import clean_memory
  10. from magic_pdf.libs.commons import fitz, get_delta_time
  11. from magic_pdf.libs.config_reader import get_local_layoutreader_model_dir
  12. from magic_pdf.libs.convert_utils import dict_to_list
  13. from magic_pdf.libs.drop_reason import DropReason
  14. from magic_pdf.libs.hash_utils import compute_md5
  15. from magic_pdf.libs.local_math import float_equal
  16. from magic_pdf.libs.ocr_content_type import ContentType
  17. from magic_pdf.model.magic_model import MagicModel
  18. from magic_pdf.para.para_split_v3 import para_split
  19. from magic_pdf.pre_proc.citationmarker_remove import remove_citation_marker
  20. from magic_pdf.pre_proc.construct_page_dict import \
  21. ocr_construct_page_component_v2
  22. from magic_pdf.pre_proc.cut_image import ocr_cut_image_and_table
  23. from magic_pdf.pre_proc.equations_replace import (
  24. combine_chars_to_pymudict, remove_chars_in_text_blocks,
  25. replace_equations_in_textblock)
  26. from magic_pdf.pre_proc.ocr_detect_all_bboxes import \
  27. ocr_prepare_bboxes_for_layout_split_v2
  28. from magic_pdf.pre_proc.ocr_dict_merge import (fill_spans_in_blocks,
  29. fix_block_spans,
  30. fix_discarded_block)
  31. from magic_pdf.pre_proc.ocr_span_list_modify import (
  32. get_qa_need_list_v2, remove_overlaps_low_confidence_spans,
  33. remove_overlaps_min_spans)
  34. from magic_pdf.pre_proc.resolve_bbox_conflict import \
  35. check_useful_block_horizontal_overlap
  36. def remove_horizontal_overlap_block_which_smaller(all_bboxes):
  37. useful_blocks = []
  38. for bbox in all_bboxes:
  39. useful_blocks.append({'bbox': bbox[:4]})
  40. is_useful_block_horz_overlap, smaller_bbox, bigger_bbox = (
  41. check_useful_block_horizontal_overlap(useful_blocks)
  42. )
  43. if is_useful_block_horz_overlap:
  44. logger.warning(
  45. f'skip this page, reason: {DropReason.USEFUL_BLOCK_HOR_OVERLAP}, smaller bbox is {smaller_bbox}, bigger bbox is {bigger_bbox}'
  46. ) # noqa: E501
  47. for bbox in all_bboxes.copy():
  48. if smaller_bbox == bbox[:4]:
  49. all_bboxes.remove(bbox)
  50. return is_useful_block_horz_overlap, all_bboxes
  51. def __replace_STX_ETX(text_str: str):
  52. """Replace \u0002 and \u0003, as these characters become garbled when extracted using pymupdf. In fact, they were originally quotation marks.
  53. Drawback: This issue is only observed in English text; it has not been found in Chinese text so far.
  54. Args:
  55. text_str (str): raw text
  56. Returns:
  57. _type_: replaced text
  58. """ # noqa: E501
  59. if text_str:
  60. s = text_str.replace('\u0002', "'")
  61. s = s.replace('\u0003', "'")
  62. return s
  63. return text_str
  64. def txt_spans_extract(pdf_page, inline_equations, interline_equations):
  65. text_raw_blocks = pdf_page.get_text('dict', flags=fitz.TEXTFLAGS_TEXT)['blocks']
  66. char_level_text_blocks = pdf_page.get_text('rawdict', flags=fitz.TEXTFLAGS_TEXT)[
  67. 'blocks'
  68. ]
  69. text_blocks = combine_chars_to_pymudict(text_raw_blocks, char_level_text_blocks)
  70. text_blocks = replace_equations_in_textblock(
  71. text_blocks, inline_equations, interline_equations
  72. )
  73. text_blocks = remove_citation_marker(text_blocks)
  74. text_blocks = remove_chars_in_text_blocks(text_blocks)
  75. spans = []
  76. for v in text_blocks:
  77. for line in v['lines']:
  78. for span in line['spans']:
  79. bbox = span['bbox']
  80. if float_equal(bbox[0], bbox[2]) or float_equal(bbox[1], bbox[3]):
  81. continue
  82. if span.get('type') not in (
  83. ContentType.InlineEquation,
  84. ContentType.InterlineEquation,
  85. ):
  86. spans.append(
  87. {
  88. 'bbox': list(span['bbox']),
  89. 'content': __replace_STX_ETX(span['text']),
  90. 'type': ContentType.Text,
  91. 'score': 1.0,
  92. }
  93. )
  94. return spans
  95. def replace_text_span(pymu_spans, ocr_spans):
  96. return list(filter(lambda x: x['type'] != ContentType.Text, ocr_spans)) + pymu_spans
  97. def model_init(model_name: str):
  98. from transformers import LayoutLMv3ForTokenClassification
  99. if torch.cuda.is_available():
  100. device = torch.device('cuda')
  101. if torch.cuda.is_bf16_supported():
  102. supports_bfloat16 = True
  103. else:
  104. supports_bfloat16 = False
  105. else:
  106. device = torch.device('cpu')
  107. supports_bfloat16 = False
  108. if model_name == 'layoutreader':
  109. # 检测modelscope的缓存目录是否存在
  110. layoutreader_model_dir = get_local_layoutreader_model_dir()
  111. if os.path.exists(layoutreader_model_dir):
  112. model = LayoutLMv3ForTokenClassification.from_pretrained(
  113. layoutreader_model_dir
  114. )
  115. else:
  116. logger.warning(
  117. 'local layoutreader model not exists, use online model from huggingface'
  118. )
  119. model = LayoutLMv3ForTokenClassification.from_pretrained(
  120. 'hantian/layoutreader'
  121. )
  122. # 检查设备是否支持 bfloat16
  123. if supports_bfloat16:
  124. model.bfloat16()
  125. model.to(device).eval()
  126. else:
  127. logger.error('model name not allow')
  128. exit(1)
  129. return model
  130. class ModelSingleton:
  131. _instance = None
  132. _models = {}
  133. def __new__(cls, *args, **kwargs):
  134. if cls._instance is None:
  135. cls._instance = super().__new__(cls)
  136. return cls._instance
  137. def get_model(self, model_name: str):
  138. if model_name not in self._models:
  139. self._models[model_name] = model_init(model_name=model_name)
  140. return self._models[model_name]
  141. def do_predict(boxes: List[List[int]], model) -> List[int]:
  142. from magic_pdf.model.v3.helpers import (boxes2inputs, parse_logits,
  143. prepare_inputs)
  144. inputs = boxes2inputs(boxes)
  145. inputs = prepare_inputs(inputs, model)
  146. logits = model(**inputs).logits.cpu().squeeze(0)
  147. return parse_logits(logits, len(boxes))
  148. def cal_block_index(fix_blocks, sorted_bboxes):
  149. for block in fix_blocks:
  150. # if block['type'] in ['text', 'title', 'interline_equation']:
  151. # line_index_list = []
  152. # if len(block['lines']) == 0:
  153. # block['index'] = sorted_bboxes.index(block['bbox'])
  154. # else:
  155. # for line in block['lines']:
  156. # line['index'] = sorted_bboxes.index(line['bbox'])
  157. # line_index_list.append(line['index'])
  158. # median_value = statistics.median(line_index_list)
  159. # block['index'] = median_value
  160. #
  161. # elif block['type'] in ['table', 'image']:
  162. # block['index'] = sorted_bboxes.index(block['bbox'])
  163. line_index_list = []
  164. if len(block['lines']) == 0:
  165. block['index'] = sorted_bboxes.index(block['bbox'])
  166. else:
  167. for line in block['lines']:
  168. line['index'] = sorted_bboxes.index(line['bbox'])
  169. line_index_list.append(line['index'])
  170. median_value = statistics.median(line_index_list)
  171. block['index'] = median_value
  172. # 删除图表block中的虚拟line信息
  173. if block['type'] in ['table', 'image']:
  174. del block['lines']
  175. return fix_blocks
  176. def insert_lines_into_block(block_bbox, line_height, page_w, page_h):
  177. # block_bbox是一个元组(x0, y0, x1, y1),其中(x0, y0)是左下角坐标,(x1, y1)是右上角坐标
  178. x0, y0, x1, y1 = block_bbox
  179. block_height = y1 - y0
  180. block_weight = x1 - x0
  181. # 如果block高度小于n行正文,则直接返回block的bbox
  182. if line_height * 3 < block_height:
  183. if (
  184. block_height > page_h * 0.25 and page_w * 0.5 > block_weight > page_w * 0.25
  185. ): # 可能是双列结构,可以切细点
  186. lines = int(block_height / line_height) + 1
  187. else:
  188. # 如果block的宽度超过0.4页面宽度,则将block分成3行
  189. if block_weight > page_w * 0.4:
  190. line_height = (y1 - y0) / 3
  191. lines = 3
  192. elif block_weight > page_w * 0.25: # 否则将block分成两行
  193. line_height = (y1 - y0) / 2
  194. lines = 2
  195. else: # 判断长宽比
  196. if block_height / block_weight > 1.2: # 细长的不分
  197. return [[x0, y0, x1, y1]]
  198. else: # 不细长的还是分成两行
  199. line_height = (y1 - y0) / 2
  200. lines = 2
  201. # 确定从哪个y位置开始绘制线条
  202. current_y = y0
  203. # 用于存储线条的位置信息[(x0, y), ...]
  204. lines_positions = []
  205. for i in range(lines):
  206. lines_positions.append([x0, current_y, x1, current_y + line_height])
  207. current_y += line_height
  208. return lines_positions
  209. else:
  210. return [[x0, y0, x1, y1]]
  211. def sort_lines_by_model(fix_blocks, page_w, page_h, line_height):
  212. page_line_list = []
  213. for block in fix_blocks:
  214. if block['type'] in ['text', 'title', 'interline_equation']:
  215. if len(block['lines']) == 0:
  216. bbox = block['bbox']
  217. lines = insert_lines_into_block(bbox, line_height, page_w, page_h)
  218. for line in lines:
  219. block['lines'].append({'bbox': line, 'spans': []})
  220. page_line_list.extend(lines)
  221. else:
  222. for line in block['lines']:
  223. bbox = line['bbox']
  224. page_line_list.append(bbox)
  225. elif block['type'] in ['table', 'image']:
  226. bbox = block['bbox']
  227. lines = insert_lines_into_block(bbox, line_height, page_w, page_h)
  228. block['lines'] = []
  229. for line in lines:
  230. block['lines'].append({'bbox': line, 'spans': []})
  231. page_line_list.extend(lines)
  232. # 使用layoutreader排序
  233. x_scale = 1000.0 / page_w
  234. y_scale = 1000.0 / page_h
  235. boxes = []
  236. # logger.info(f"Scale: {x_scale}, {y_scale}, Boxes len: {len(page_line_list)}")
  237. for left, top, right, bottom in page_line_list:
  238. if left < 0:
  239. logger.warning(
  240. f'left < 0, left: {left}, right: {right}, top: {top}, bottom: {bottom}, page_w: {page_w}, page_h: {page_h}'
  241. ) # noqa: E501
  242. left = 0
  243. if right > page_w:
  244. logger.warning(
  245. f'right > page_w, left: {left}, right: {right}, top: {top}, bottom: {bottom}, page_w: {page_w}, page_h: {page_h}'
  246. ) # noqa: E501
  247. right = page_w
  248. if top < 0:
  249. logger.warning(
  250. f'top < 0, left: {left}, right: {right}, top: {top}, bottom: {bottom}, page_w: {page_w}, page_h: {page_h}'
  251. ) # noqa: E501
  252. top = 0
  253. if bottom > page_h:
  254. logger.warning(
  255. f'bottom > page_h, left: {left}, right: {right}, top: {top}, bottom: {bottom}, page_w: {page_w}, page_h: {page_h}'
  256. ) # noqa: E501
  257. bottom = page_h
  258. left = round(left * x_scale)
  259. top = round(top * y_scale)
  260. right = round(right * x_scale)
  261. bottom = round(bottom * y_scale)
  262. assert (
  263. 1000 >= right >= left >= 0 and 1000 >= bottom >= top >= 0
  264. ), f'Invalid box. right: {right}, left: {left}, bottom: {bottom}, top: {top}' # noqa: E126, E121
  265. boxes.append([left, top, right, bottom])
  266. model_manager = ModelSingleton()
  267. model = model_manager.get_model('layoutreader')
  268. with torch.no_grad():
  269. orders = do_predict(boxes, model)
  270. sorted_bboxes = [page_line_list[i] for i in orders]
  271. return sorted_bboxes
  272. def get_line_height(blocks):
  273. page_line_height_list = []
  274. for block in blocks:
  275. if block['type'] in ['text', 'title', 'interline_equation']:
  276. for line in block['lines']:
  277. bbox = line['bbox']
  278. page_line_height_list.append(int(bbox[3] - bbox[1]))
  279. if len(page_line_height_list) > 0:
  280. return statistics.median(page_line_height_list)
  281. else:
  282. return 10
  283. def parse_page_core(
  284. page_doc: PageableData, magic_model, page_id, pdf_bytes_md5, imageWriter, parse_mode
  285. ):
  286. need_drop = False
  287. drop_reason = []
  288. """从magic_model对象中获取后面会用到的区块信息"""
  289. img_blocks = magic_model.get_imgs(page_id)
  290. table_blocks = magic_model.get_tables(page_id)
  291. discarded_blocks = magic_model.get_discarded(page_id)
  292. text_blocks = magic_model.get_text_blocks(page_id)
  293. title_blocks = magic_model.get_title_blocks(page_id)
  294. inline_equations, interline_equations, interline_equation_blocks = (
  295. magic_model.get_equations(page_id)
  296. )
  297. page_w, page_h = magic_model.get_page_size(page_id)
  298. spans = magic_model.get_all_spans(page_id)
  299. """根据parse_mode,构造spans"""
  300. if parse_mode == SupportedPdfParseMethod.TXT:
  301. """ocr 中文本类的 span 用 pymu spans 替换!"""
  302. pymu_spans = txt_spans_extract(page_doc, inline_equations, interline_equations)
  303. spans = replace_text_span(pymu_spans, spans)
  304. elif parse_mode == SupportedPdfParseMethod.OCR:
  305. pass
  306. else:
  307. raise Exception('parse_mode must be txt or ocr')
  308. """删除重叠spans中置信度较低的那些"""
  309. spans, dropped_spans_by_confidence = remove_overlaps_low_confidence_spans(spans)
  310. """删除重叠spans中较小的那些"""
  311. spans, dropped_spans_by_span_overlap = remove_overlaps_min_spans(spans)
  312. """对image和table截图"""
  313. spans = ocr_cut_image_and_table(
  314. spans, page_doc, page_id, pdf_bytes_md5, imageWriter
  315. )
  316. """将所有区块的bbox整理到一起"""
  317. # interline_equation_blocks参数不够准,后面切换到interline_equations上
  318. interline_equation_blocks = []
  319. if len(interline_equation_blocks) > 0:
  320. all_bboxes, all_discarded_blocks = ocr_prepare_bboxes_for_layout_split_v2(
  321. img_blocks,
  322. table_blocks,
  323. discarded_blocks,
  324. text_blocks,
  325. title_blocks,
  326. interline_equation_blocks,
  327. page_w,
  328. page_h,
  329. )
  330. else:
  331. all_bboxes, all_discarded_blocks = ocr_prepare_bboxes_for_layout_split_v2(
  332. img_blocks,
  333. table_blocks,
  334. discarded_blocks,
  335. text_blocks,
  336. title_blocks,
  337. interline_equations,
  338. page_w,
  339. page_h,
  340. )
  341. """先处理不需要排版的discarded_blocks"""
  342. discarded_block_with_spans, spans = fill_spans_in_blocks(
  343. all_discarded_blocks, spans, 0.4
  344. )
  345. fix_discarded_blocks = fix_discarded_block(discarded_block_with_spans)
  346. """如果当前页面没有bbox则跳过"""
  347. if len(all_bboxes) == 0:
  348. logger.warning(f'skip this page, not found useful bbox, page_id: {page_id}')
  349. return ocr_construct_page_component_v2(
  350. [],
  351. [],
  352. page_id,
  353. page_w,
  354. page_h,
  355. [],
  356. [],
  357. [],
  358. interline_equations,
  359. fix_discarded_blocks,
  360. need_drop,
  361. drop_reason,
  362. )
  363. """将span填入blocks中"""
  364. block_with_spans, spans = fill_spans_in_blocks(all_bboxes, spans, 0.5)
  365. """对block进行fix操作"""
  366. fix_blocks = fix_block_spans(block_with_spans, img_blocks, table_blocks)
  367. """获取所有line并计算正文line的高度"""
  368. line_height = get_line_height(fix_blocks)
  369. """获取所有line并对line排序"""
  370. sorted_bboxes = sort_lines_by_model(fix_blocks, page_w, page_h, line_height)
  371. """根据line的中位数算block的序列关系"""
  372. fix_blocks = cal_block_index(fix_blocks, sorted_bboxes)
  373. """重排block"""
  374. sorted_blocks = sorted(fix_blocks, key=lambda b: b['index'])
  375. """获取QA需要外置的list"""
  376. images, tables, interline_equations = get_qa_need_list_v2(sorted_blocks)
  377. """构造pdf_info_dict"""
  378. page_info = ocr_construct_page_component_v2(
  379. sorted_blocks,
  380. [],
  381. page_id,
  382. page_w,
  383. page_h,
  384. [],
  385. images,
  386. tables,
  387. interline_equations,
  388. fix_discarded_blocks,
  389. need_drop,
  390. drop_reason,
  391. )
  392. return page_info
  393. def pdf_parse_union(
  394. dataset: Dataset,
  395. model_list,
  396. imageWriter,
  397. parse_mode,
  398. start_page_id=0,
  399. end_page_id=None,
  400. debug_mode=False,
  401. ):
  402. pdf_bytes_md5 = compute_md5(dataset.data_bits())
  403. """初始化空的pdf_info_dict"""
  404. pdf_info_dict = {}
  405. """用model_list和docs对象初始化magic_model"""
  406. magic_model = MagicModel(model_list, dataset)
  407. """根据输入的起始范围解析pdf"""
  408. # end_page_id = end_page_id if end_page_id else len(pdf_docs) - 1
  409. end_page_id = (
  410. end_page_id
  411. if end_page_id is not None and end_page_id >= 0
  412. else len(dataset) - 1
  413. )
  414. if end_page_id > len(dataset) - 1:
  415. logger.warning('end_page_id is out of range, use pdf_docs length')
  416. end_page_id = len(dataset) - 1
  417. """初始化启动时间"""
  418. start_time = time.time()
  419. for page_id, page in enumerate(dataset):
  420. """debug时输出每页解析的耗时."""
  421. if debug_mode:
  422. time_now = time.time()
  423. logger.info(
  424. f'page_id: {page_id}, last_page_cost_time: {get_delta_time(start_time)}'
  425. )
  426. start_time = time_now
  427. """解析pdf中的每一页"""
  428. if start_page_id <= page_id <= end_page_id:
  429. page_info = parse_page_core(
  430. page, magic_model, page_id, pdf_bytes_md5, imageWriter, parse_mode
  431. )
  432. else:
  433. page_info = page.get_page_info()
  434. page_w = page_info.w
  435. page_h = page_info.h
  436. page_info = ocr_construct_page_component_v2(
  437. [], [], page_id, page_w, page_h, [], [], [], [], [], True, 'skip page'
  438. )
  439. pdf_info_dict[f'page_{page_id}'] = page_info
  440. """分段"""
  441. para_split(pdf_info_dict, debug_mode=debug_mode)
  442. """dict转list"""
  443. pdf_info_list = dict_to_list(pdf_info_dict)
  444. new_pdf_info_dict = {
  445. 'pdf_info': pdf_info_list,
  446. }
  447. clean_memory()
  448. return new_pdf_info_dict
  449. if __name__ == '__main__':
  450. pass