pdf_parse_union_core_v2.py 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880
  1. import copy
  2. import os
  3. import statistics
  4. import time
  5. from typing import List
  6. import torch
  7. from loguru import logger
  8. from magic_pdf.config.drop_reason import DropReason
  9. from magic_pdf.config.enums import SupportedPdfParseMethod
  10. from magic_pdf.config.ocr_content_type import BlockType, ContentType
  11. from magic_pdf.data.dataset import Dataset, PageableData
  12. from magic_pdf.libs.boxbase import calculate_overlap_area_in_bbox1_area_ratio
  13. from magic_pdf.libs.clean_memory import clean_memory
  14. from magic_pdf.libs.commons import fitz, get_delta_time
  15. from magic_pdf.libs.config_reader import get_local_layoutreader_model_dir
  16. from magic_pdf.libs.convert_utils import dict_to_list
  17. from magic_pdf.libs.hash_utils import compute_md5
  18. from magic_pdf.libs.local_math import float_equal
  19. from magic_pdf.libs.pdf_image_tools import cut_image_to_pil_image
  20. from magic_pdf.model.magic_model import MagicModel
  21. os.environ['NO_ALBUMENTATIONS_UPDATE'] = '1' # 禁止albumentations检查更新
  22. os.environ['YOLO_VERBOSE'] = 'False' # disable yolo logger
  23. try:
  24. import torchtext
  25. if torchtext.__version__ >= "0.18.0":
  26. torchtext.disable_torchtext_deprecation_warning()
  27. except ImportError:
  28. pass
  29. from magic_pdf.model.sub_modules.model_init import AtomModelSingleton
  30. from magic_pdf.para.para_split_v3 import para_split
  31. from magic_pdf.pre_proc.citationmarker_remove import remove_citation_marker
  32. from magic_pdf.pre_proc.construct_page_dict import \
  33. ocr_construct_page_component_v2
  34. from magic_pdf.pre_proc.cut_image import ocr_cut_image_and_table
  35. from magic_pdf.pre_proc.equations_replace import (
  36. combine_chars_to_pymudict, remove_chars_in_text_blocks,
  37. replace_equations_in_textblock)
  38. from magic_pdf.pre_proc.ocr_detect_all_bboxes import \
  39. ocr_prepare_bboxes_for_layout_split_v2
  40. from magic_pdf.pre_proc.ocr_dict_merge import (fill_spans_in_blocks,
  41. fix_block_spans_v2,
  42. fix_discarded_block)
  43. from magic_pdf.pre_proc.ocr_span_list_modify import (
  44. get_qa_need_list_v2, remove_overlaps_low_confidence_spans,
  45. remove_overlaps_min_spans)
  46. from magic_pdf.pre_proc.resolve_bbox_conflict import \
  47. check_useful_block_horizontal_overlap
  48. def remove_horizontal_overlap_block_which_smaller(all_bboxes):
  49. useful_blocks = []
  50. for bbox in all_bboxes:
  51. useful_blocks.append({'bbox': bbox[:4]})
  52. is_useful_block_horz_overlap, smaller_bbox, bigger_bbox = (
  53. check_useful_block_horizontal_overlap(useful_blocks)
  54. )
  55. if is_useful_block_horz_overlap:
  56. logger.warning(
  57. f'skip this page, reason: {DropReason.USEFUL_BLOCK_HOR_OVERLAP}, smaller bbox is {smaller_bbox}, bigger bbox is {bigger_bbox}'
  58. ) # noqa: E501
  59. for bbox in all_bboxes.copy():
  60. if smaller_bbox == bbox[:4]:
  61. all_bboxes.remove(bbox)
  62. return is_useful_block_horz_overlap, all_bboxes
  63. def __replace_STX_ETX(text_str: str):
  64. """Replace \u0002 and \u0003, as these characters become garbled when extracted using pymupdf. In fact, they were originally quotation marks.
  65. Drawback: This issue is only observed in English text; it has not been found in Chinese text so far.
  66. Args:
  67. text_str (str): raw text
  68. Returns:
  69. _type_: replaced text
  70. """ # noqa: E501
  71. if text_str:
  72. s = text_str.replace('\u0002', "'")
  73. s = s.replace('\u0003', "'")
  74. return s
  75. return text_str
  76. def chars_to_content(span):
  77. # 检查span中的char是否为空
  78. if len(span['chars']) == 0:
  79. span['content'] = ''
  80. else:
  81. # 先给chars按char['bbox']的中心点的x坐标排序
  82. span['chars'] = sorted(span['chars'], key=lambda x: (x['bbox'][0] + x['bbox'][2]) / 2)
  83. # 求char的平均宽度
  84. char_width_sum = sum([char['bbox'][2] - char['bbox'][0] for char in span['chars']])
  85. char_avg_width = char_width_sum / len(span['chars'])
  86. content = ''
  87. for char in span['chars']:
  88. # 如果下一个char的x0和上一个char的x1距离超过一个字符宽度,则需要在中间插入一个空格
  89. if char['bbox'][0] - span['chars'][span['chars'].index(char) - 1]['bbox'][2] > char_avg_width:
  90. content += ' '
  91. content += char['c']
  92. span['content'] = __replace_STX_ETX(content)
  93. del span['chars']
  94. LINE_STOP_FLAG = ('.', '!', '?', '。', '!', '?', ')', ')', '"', '”', ':', ':', ';', ';', ']', '】', '}', '}', '>', '》', '、', ',', ',', '-', '—', '–',)
  95. def fill_char_in_spans(spans, all_chars):
  96. for char in all_chars:
  97. for span in spans:
  98. # 判断char是否属于LINE_STOP_FLAG
  99. if char['c'] in LINE_STOP_FLAG:
  100. char_is_line_stop_flag = True
  101. else:
  102. char_is_line_stop_flag = False
  103. if calculate_char_in_span(char['bbox'], span['bbox'], char_is_line_stop_flag):
  104. span['chars'].append(char)
  105. break
  106. empty_spans = []
  107. for span in spans:
  108. chars_to_content(span)
  109. if len(span['content']) == 0:
  110. empty_spans.append(span)
  111. return empty_spans
  112. # 使用鲁棒性更强的中心点坐标判断
  113. def calculate_char_in_span(char_bbox, span_bbox, char_is_line_stop_flag):
  114. char_center_x = (char_bbox[0] + char_bbox[2]) / 2
  115. char_center_y = (char_bbox[1] + char_bbox[3]) / 2
  116. span_center_y = (span_bbox[1] + span_bbox[3]) / 2
  117. span_height = span_bbox[3] - span_bbox[1]
  118. if (
  119. span_bbox[0] < char_center_x < span_bbox[2]
  120. and span_bbox[1] < char_center_y < span_bbox[3]
  121. and abs(char_center_y - span_center_y) < span_height / 4 # 字符的中轴和span的中轴高度差不能超过1/4span高度
  122. ):
  123. return True
  124. else:
  125. # 如果char是LINE_STOP_FLAG,就不用中心点判定,换一种方案(左边界在span区域内,高度判定和之前逻辑一致)
  126. # 主要是给结尾符号一个进入span的机会,这个char还应该离span右边界较近
  127. if char_is_line_stop_flag:
  128. if (
  129. (span_bbox[2] - span_height) < char_bbox[0] < span_bbox[2]
  130. and char_center_x > span_bbox[0]
  131. and span_bbox[1] < char_center_y < span_bbox[3]
  132. and abs(char_center_y - span_center_y) < span_height / 4
  133. ):
  134. return True
  135. else:
  136. return False
  137. def txt_spans_extract_v2(pdf_page, spans, all_bboxes, all_discarded_blocks, lang):
  138. text_blocks_raw = pdf_page.get_text('rawdict', flags=fitz.TEXTFLAGS_TEXT)['blocks']
  139. # @todo: 拿到char之后把倾斜角度较大的先删一遍
  140. all_pymu_chars = []
  141. for block in text_blocks_raw:
  142. for line in block['lines']:
  143. for span in line['spans']:
  144. all_pymu_chars.extend(span['chars'])
  145. # 计算所有sapn的高度的中位数
  146. span_height_list = []
  147. for span in spans:
  148. if span['type'] in [ContentType.InterlineEquation, ContentType.Image, ContentType.Table]:
  149. continue
  150. span_height = span['bbox'][3] - span['bbox'][1]
  151. span['height'] = span_height
  152. span_height_list.append(span_height)
  153. if len(span_height_list) == 0:
  154. return spans
  155. else:
  156. median_span_height = statistics.median(span_height_list)
  157. useful_spans = []
  158. unuseful_spans = []
  159. # 纵向span的两个特征:1. 高度超过多个line 2. 高宽比超过某个值
  160. vertical_spans = []
  161. for span in spans:
  162. if span['type'] in [ContentType.InterlineEquation, ContentType.Image, ContentType.Table]:
  163. continue
  164. for block in all_bboxes + all_discarded_blocks:
  165. if block[7] in [BlockType.ImageBody, BlockType.TableBody, BlockType.InterlineEquation]:
  166. continue
  167. if calculate_overlap_area_in_bbox1_area_ratio(span['bbox'], block[0:4]) > 0.5:
  168. if span['height'] > median_span_height * 3 and span['height'] > (span['bbox'][2] - span['bbox'][0]) * 3:
  169. vertical_spans.append(span)
  170. elif block in all_bboxes:
  171. useful_spans.append(span)
  172. else:
  173. unuseful_spans.append(span)
  174. del span['height']
  175. break
  176. """垂直的span框直接用pymu的line进行填充"""
  177. if len(vertical_spans) > 0:
  178. text_blocks = pdf_page.get_text('dict', flags=fitz.TEXTFLAGS_TEXT)['blocks']
  179. all_pymu_lines = []
  180. for block in text_blocks:
  181. for line in block['lines']:
  182. all_pymu_lines.append(line)
  183. for pymu_line in all_pymu_lines:
  184. for span in vertical_spans:
  185. if calculate_overlap_area_in_bbox1_area_ratio(pymu_line['bbox'], span['bbox']) > 0.5:
  186. for pymu_span in pymu_line['spans']:
  187. span['content'] += pymu_span['text']
  188. break
  189. for span in vertical_spans:
  190. if len(span['content']) == 0:
  191. spans.remove(span)
  192. """水平的span框如果没有char则用ocr进行填充"""
  193. new_spans = []
  194. for span in useful_spans + unuseful_spans:
  195. if span['type'] in [ContentType.Text]:
  196. span['chars'] = []
  197. new_spans.append(span)
  198. empty_spans = fill_char_in_spans(new_spans, all_pymu_chars)
  199. if len(empty_spans) > 0:
  200. # 初始化ocr模型
  201. atom_model_manager = AtomModelSingleton()
  202. ocr_model = atom_model_manager.get_atom_model(
  203. atom_model_name="ocr",
  204. ocr_show_log=False,
  205. det_db_box_thresh=0.3,
  206. lang=lang
  207. )
  208. for span in empty_spans:
  209. # 对span的bbox截图再ocr
  210. span_img = cut_image_to_pil_image(span['bbox'], pdf_page, mode="cv2")
  211. ocr_res = ocr_model.ocr(span_img, det=False)
  212. if ocr_res and len(ocr_res) > 0:
  213. if len(ocr_res[0]) > 0:
  214. ocr_text, ocr_score = ocr_res[0][0]
  215. if ocr_score > 0.5 and len(ocr_text) > 0:
  216. span['content'] = ocr_text
  217. span['score'] = ocr_score
  218. else:
  219. spans.remove(span)
  220. return spans
  221. def txt_spans_extract_v1(pdf_page, inline_equations, interline_equations):
  222. text_raw_blocks = pdf_page.get_text('dict', flags=fitz.TEXTFLAGS_TEXT)['blocks']
  223. char_level_text_blocks = pdf_page.get_text('rawdict', flags=fitz.TEXTFLAGS_TEXT)[
  224. 'blocks'
  225. ]
  226. text_blocks = combine_chars_to_pymudict(text_raw_blocks, char_level_text_blocks)
  227. text_blocks = replace_equations_in_textblock(
  228. text_blocks, inline_equations, interline_equations
  229. )
  230. text_blocks = remove_citation_marker(text_blocks)
  231. text_blocks = remove_chars_in_text_blocks(text_blocks)
  232. spans = []
  233. for v in text_blocks:
  234. for line in v['lines']:
  235. for span in line['spans']:
  236. bbox = span['bbox']
  237. if float_equal(bbox[0], bbox[2]) or float_equal(bbox[1], bbox[3]):
  238. continue
  239. if span.get('type') not in (
  240. ContentType.InlineEquation,
  241. ContentType.InterlineEquation,
  242. ):
  243. spans.append(
  244. {
  245. 'bbox': list(span['bbox']),
  246. 'content': __replace_STX_ETX(span['text']),
  247. 'type': ContentType.Text,
  248. 'score': 1.0,
  249. }
  250. )
  251. return spans
  252. def replace_text_span(pymu_spans, ocr_spans):
  253. return list(filter(lambda x: x['type'] != ContentType.Text, ocr_spans)) + pymu_spans
  254. def model_init(model_name: str):
  255. from transformers import LayoutLMv3ForTokenClassification
  256. if torch.cuda.is_available():
  257. device = torch.device('cuda')
  258. if torch.cuda.is_bf16_supported():
  259. supports_bfloat16 = True
  260. else:
  261. supports_bfloat16 = False
  262. else:
  263. device = torch.device('cpu')
  264. supports_bfloat16 = False
  265. if model_name == 'layoutreader':
  266. # 检测modelscope的缓存目录是否存在
  267. layoutreader_model_dir = get_local_layoutreader_model_dir()
  268. if os.path.exists(layoutreader_model_dir):
  269. model = LayoutLMv3ForTokenClassification.from_pretrained(
  270. layoutreader_model_dir
  271. )
  272. else:
  273. logger.warning(
  274. 'local layoutreader model not exists, use online model from huggingface'
  275. )
  276. model = LayoutLMv3ForTokenClassification.from_pretrained(
  277. 'hantian/layoutreader'
  278. )
  279. # 检查设备是否支持 bfloat16
  280. if supports_bfloat16:
  281. model.bfloat16()
  282. model.to(device).eval()
  283. else:
  284. logger.error('model name not allow')
  285. exit(1)
  286. return model
  287. class ModelSingleton:
  288. _instance = None
  289. _models = {}
  290. def __new__(cls, *args, **kwargs):
  291. if cls._instance is None:
  292. cls._instance = super().__new__(cls)
  293. return cls._instance
  294. def get_model(self, model_name: str):
  295. if model_name not in self._models:
  296. self._models[model_name] = model_init(model_name=model_name)
  297. return self._models[model_name]
  298. def do_predict(boxes: List[List[int]], model) -> List[int]:
  299. from magic_pdf.model.sub_modules.reading_oreder.layoutreader.helpers import (
  300. boxes2inputs, parse_logits, prepare_inputs)
  301. inputs = boxes2inputs(boxes)
  302. inputs = prepare_inputs(inputs, model)
  303. logits = model(**inputs).logits.cpu().squeeze(0)
  304. return parse_logits(logits, len(boxes))
  305. def cal_block_index(fix_blocks, sorted_bboxes):
  306. if sorted_bboxes is not None:
  307. # 使用layoutreader排序
  308. for block in fix_blocks:
  309. line_index_list = []
  310. if len(block['lines']) == 0:
  311. block['index'] = sorted_bboxes.index(block['bbox'])
  312. else:
  313. for line in block['lines']:
  314. line['index'] = sorted_bboxes.index(line['bbox'])
  315. line_index_list.append(line['index'])
  316. median_value = statistics.median(line_index_list)
  317. block['index'] = median_value
  318. # 删除图表body block中的虚拟line信息, 并用real_lines信息回填
  319. if block['type'] in [BlockType.ImageBody, BlockType.TableBody]:
  320. block['virtual_lines'] = copy.deepcopy(block['lines'])
  321. block['lines'] = copy.deepcopy(block['real_lines'])
  322. del block['real_lines']
  323. else:
  324. # 使用xycut排序
  325. block_bboxes = []
  326. for block in fix_blocks:
  327. block_bboxes.append(block['bbox'])
  328. # 删除图表body block中的虚拟line信息, 并用real_lines信息回填
  329. if block['type'] in [BlockType.ImageBody, BlockType.TableBody]:
  330. block['virtual_lines'] = copy.deepcopy(block['lines'])
  331. block['lines'] = copy.deepcopy(block['real_lines'])
  332. del block['real_lines']
  333. import numpy as np
  334. from magic_pdf.model.sub_modules.reading_oreder.layoutreader.xycut import \
  335. recursive_xy_cut
  336. random_boxes = np.array(block_bboxes)
  337. np.random.shuffle(random_boxes)
  338. res = []
  339. recursive_xy_cut(np.asarray(random_boxes).astype(int), np.arange(len(block_bboxes)), res)
  340. assert len(res) == len(block_bboxes)
  341. sorted_boxes = random_boxes[np.array(res)].tolist()
  342. for i, block in enumerate(fix_blocks):
  343. block['index'] = sorted_boxes.index(block['bbox'])
  344. # 生成line index
  345. sorted_blocks = sorted(fix_blocks, key=lambda b: b['index'])
  346. line_inedx = 1
  347. for block in sorted_blocks:
  348. for line in block['lines']:
  349. line['index'] = line_inedx
  350. line_inedx += 1
  351. return fix_blocks
  352. def insert_lines_into_block(block_bbox, line_height, page_w, page_h):
  353. # block_bbox是一个元组(x0, y0, x1, y1),其中(x0, y0)是左下角坐标,(x1, y1)是右上角坐标
  354. x0, y0, x1, y1 = block_bbox
  355. block_height = y1 - y0
  356. block_weight = x1 - x0
  357. # 如果block高度小于n行正文,则直接返回block的bbox
  358. if line_height * 3 < block_height:
  359. if (
  360. block_height > page_h * 0.25 and page_w * 0.5 > block_weight > page_w * 0.25
  361. ): # 可能是双列结构,可以切细点
  362. lines = int(block_height / line_height) + 1
  363. else:
  364. # 如果block的宽度超过0.4页面宽度,则将block分成3行(是一种复杂布局,图不能切的太细)
  365. if block_weight > page_w * 0.4:
  366. line_height = (y1 - y0) / 3
  367. lines = 3
  368. elif block_weight > page_w * 0.25: # (可能是三列结构,也切细点)
  369. lines = int(block_height / line_height) + 1
  370. else: # 判断长宽比
  371. if block_height / block_weight > 1.2: # 细长的不分
  372. return [[x0, y0, x1, y1]]
  373. else: # 不细长的还是分成两行
  374. line_height = (y1 - y0) / 2
  375. lines = 2
  376. # 确定从哪个y位置开始绘制线条
  377. current_y = y0
  378. # 用于存储线条的位置信息[(x0, y), ...]
  379. lines_positions = []
  380. for i in range(lines):
  381. lines_positions.append([x0, current_y, x1, current_y + line_height])
  382. current_y += line_height
  383. return lines_positions
  384. else:
  385. return [[x0, y0, x1, y1]]
  386. def sort_lines_by_model(fix_blocks, page_w, page_h, line_height):
  387. page_line_list = []
  388. for block in fix_blocks:
  389. if block['type'] in [
  390. BlockType.Text, BlockType.Title, BlockType.InterlineEquation,
  391. BlockType.ImageCaption, BlockType.ImageFootnote,
  392. BlockType.TableCaption, BlockType.TableFootnote
  393. ]:
  394. if len(block['lines']) == 0:
  395. bbox = block['bbox']
  396. lines = insert_lines_into_block(bbox, line_height, page_w, page_h)
  397. for line in lines:
  398. block['lines'].append({'bbox': line, 'spans': []})
  399. page_line_list.extend(lines)
  400. else:
  401. for line in block['lines']:
  402. bbox = line['bbox']
  403. page_line_list.append(bbox)
  404. elif block['type'] in [BlockType.ImageBody, BlockType.TableBody]:
  405. bbox = block['bbox']
  406. block['real_lines'] = copy.deepcopy(block['lines'])
  407. lines = insert_lines_into_block(bbox, line_height, page_w, page_h)
  408. block['lines'] = []
  409. for line in lines:
  410. block['lines'].append({'bbox': line, 'spans': []})
  411. page_line_list.extend(lines)
  412. if len(page_line_list) > 200: # layoutreader最高支持512line
  413. return None
  414. # 使用layoutreader排序
  415. x_scale = 1000.0 / page_w
  416. y_scale = 1000.0 / page_h
  417. boxes = []
  418. # logger.info(f"Scale: {x_scale}, {y_scale}, Boxes len: {len(page_line_list)}")
  419. for left, top, right, bottom in page_line_list:
  420. if left < 0:
  421. logger.warning(
  422. f'left < 0, left: {left}, right: {right}, top: {top}, bottom: {bottom}, page_w: {page_w}, page_h: {page_h}'
  423. ) # noqa: E501
  424. left = 0
  425. if right > page_w:
  426. logger.warning(
  427. f'right > page_w, left: {left}, right: {right}, top: {top}, bottom: {bottom}, page_w: {page_w}, page_h: {page_h}'
  428. ) # noqa: E501
  429. right = page_w
  430. if top < 0:
  431. logger.warning(
  432. f'top < 0, left: {left}, right: {right}, top: {top}, bottom: {bottom}, page_w: {page_w}, page_h: {page_h}'
  433. ) # noqa: E501
  434. top = 0
  435. if bottom > page_h:
  436. logger.warning(
  437. f'bottom > page_h, left: {left}, right: {right}, top: {top}, bottom: {bottom}, page_w: {page_w}, page_h: {page_h}'
  438. ) # noqa: E501
  439. bottom = page_h
  440. left = round(left * x_scale)
  441. top = round(top * y_scale)
  442. right = round(right * x_scale)
  443. bottom = round(bottom * y_scale)
  444. assert (
  445. 1000 >= right >= left >= 0 and 1000 >= bottom >= top >= 0
  446. ), f'Invalid box. right: {right}, left: {left}, bottom: {bottom}, top: {top}' # noqa: E126, E121
  447. boxes.append([left, top, right, bottom])
  448. model_manager = ModelSingleton()
  449. model = model_manager.get_model('layoutreader')
  450. with torch.no_grad():
  451. orders = do_predict(boxes, model)
  452. sorted_bboxes = [page_line_list[i] for i in orders]
  453. return sorted_bboxes
  454. def get_line_height(blocks):
  455. page_line_height_list = []
  456. for block in blocks:
  457. if block['type'] in [
  458. BlockType.Text, BlockType.Title,
  459. BlockType.ImageCaption, BlockType.ImageFootnote,
  460. BlockType.TableCaption, BlockType.TableFootnote
  461. ]:
  462. for line in block['lines']:
  463. bbox = line['bbox']
  464. page_line_height_list.append(int(bbox[3] - bbox[1]))
  465. if len(page_line_height_list) > 0:
  466. return statistics.median(page_line_height_list)
  467. else:
  468. return 10
  469. def process_groups(groups, body_key, caption_key, footnote_key):
  470. body_blocks = []
  471. caption_blocks = []
  472. footnote_blocks = []
  473. for i, group in enumerate(groups):
  474. group[body_key]['group_id'] = i
  475. body_blocks.append(group[body_key])
  476. for caption_block in group[caption_key]:
  477. caption_block['group_id'] = i
  478. caption_blocks.append(caption_block)
  479. for footnote_block in group[footnote_key]:
  480. footnote_block['group_id'] = i
  481. footnote_blocks.append(footnote_block)
  482. return body_blocks, caption_blocks, footnote_blocks
  483. def process_block_list(blocks, body_type, block_type):
  484. indices = [block['index'] for block in blocks]
  485. median_index = statistics.median(indices)
  486. body_bbox = next((block['bbox'] for block in blocks if block.get('type') == body_type), [])
  487. return {
  488. 'type': block_type,
  489. 'bbox': body_bbox,
  490. 'blocks': blocks,
  491. 'index': median_index,
  492. }
  493. def revert_group_blocks(blocks):
  494. image_groups = {}
  495. table_groups = {}
  496. new_blocks = []
  497. for block in blocks:
  498. if block['type'] in [BlockType.ImageBody, BlockType.ImageCaption, BlockType.ImageFootnote]:
  499. group_id = block['group_id']
  500. if group_id not in image_groups:
  501. image_groups[group_id] = []
  502. image_groups[group_id].append(block)
  503. elif block['type'] in [BlockType.TableBody, BlockType.TableCaption, BlockType.TableFootnote]:
  504. group_id = block['group_id']
  505. if group_id not in table_groups:
  506. table_groups[group_id] = []
  507. table_groups[group_id].append(block)
  508. else:
  509. new_blocks.append(block)
  510. for group_id, blocks in image_groups.items():
  511. new_blocks.append(process_block_list(blocks, BlockType.ImageBody, BlockType.Image))
  512. for group_id, blocks in table_groups.items():
  513. new_blocks.append(process_block_list(blocks, BlockType.TableBody, BlockType.Table))
  514. return new_blocks
  515. def remove_outside_spans(spans, all_bboxes, all_discarded_blocks):
  516. def get_block_bboxes(blocks, block_type_list):
  517. return [block[0:4] for block in blocks if block[7] in block_type_list]
  518. image_bboxes = get_block_bboxes(all_bboxes, [BlockType.ImageBody])
  519. table_bboxes = get_block_bboxes(all_bboxes, [BlockType.TableBody])
  520. other_block_type = []
  521. for block_type in BlockType.__dict__.values():
  522. if not isinstance(block_type, str):
  523. continue
  524. if block_type not in [BlockType.ImageBody, BlockType.TableBody]:
  525. other_block_type.append(block_type)
  526. other_block_bboxes = get_block_bboxes(all_bboxes, other_block_type)
  527. discarded_block_bboxes = get_block_bboxes(all_discarded_blocks, [BlockType.Discarded])
  528. new_spans = []
  529. for span in spans:
  530. span_bbox = span['bbox']
  531. span_type = span['type']
  532. if any(calculate_overlap_area_in_bbox1_area_ratio(span_bbox, block_bbox) > 0.4 for block_bbox in
  533. discarded_block_bboxes):
  534. new_spans.append(span)
  535. continue
  536. if span_type == ContentType.Image:
  537. if any(calculate_overlap_area_in_bbox1_area_ratio(span_bbox, block_bbox) > 0.5 for block_bbox in
  538. image_bboxes):
  539. new_spans.append(span)
  540. elif span_type == ContentType.Table:
  541. if any(calculate_overlap_area_in_bbox1_area_ratio(span_bbox, block_bbox) > 0.5 for block_bbox in
  542. table_bboxes):
  543. new_spans.append(span)
  544. else:
  545. if any(calculate_overlap_area_in_bbox1_area_ratio(span_bbox, block_bbox) > 0.5 for block_bbox in
  546. other_block_bboxes):
  547. new_spans.append(span)
  548. return new_spans
  549. def parse_page_core(
  550. page_doc: PageableData, magic_model, page_id, pdf_bytes_md5, imageWriter, parse_mode, lang
  551. ):
  552. need_drop = False
  553. drop_reason = []
  554. """从magic_model对象中获取后面会用到的区块信息"""
  555. img_groups = magic_model.get_imgs_v2(page_id)
  556. table_groups = magic_model.get_tables_v2(page_id)
  557. """对image和table的区块分组"""
  558. img_body_blocks, img_caption_blocks, img_footnote_blocks = process_groups(
  559. img_groups, 'image_body', 'image_caption_list', 'image_footnote_list'
  560. )
  561. table_body_blocks, table_caption_blocks, table_footnote_blocks = process_groups(
  562. table_groups, 'table_body', 'table_caption_list', 'table_footnote_list'
  563. )
  564. discarded_blocks = magic_model.get_discarded(page_id)
  565. text_blocks = magic_model.get_text_blocks(page_id)
  566. title_blocks = magic_model.get_title_blocks(page_id)
  567. inline_equations, interline_equations, interline_equation_blocks = (
  568. magic_model.get_equations(page_id)
  569. )
  570. page_w, page_h = magic_model.get_page_size(page_id)
  571. """将所有区块的bbox整理到一起"""
  572. # interline_equation_blocks参数不够准,后面切换到interline_equations上
  573. interline_equation_blocks = []
  574. if len(interline_equation_blocks) > 0:
  575. all_bboxes, all_discarded_blocks = ocr_prepare_bboxes_for_layout_split_v2(
  576. img_body_blocks, img_caption_blocks, img_footnote_blocks,
  577. table_body_blocks, table_caption_blocks, table_footnote_blocks,
  578. discarded_blocks,
  579. text_blocks,
  580. title_blocks,
  581. interline_equation_blocks,
  582. page_w,
  583. page_h,
  584. )
  585. else:
  586. all_bboxes, all_discarded_blocks = ocr_prepare_bboxes_for_layout_split_v2(
  587. img_body_blocks, img_caption_blocks, img_footnote_blocks,
  588. table_body_blocks, table_caption_blocks, table_footnote_blocks,
  589. discarded_blocks,
  590. text_blocks,
  591. title_blocks,
  592. interline_equations,
  593. page_w,
  594. page_h,
  595. )
  596. """获取所有的spans信息"""
  597. spans = magic_model.get_all_spans(page_id)
  598. """在删除重复span之前,应该通过image_body和table_body的block过滤一下image和table的span"""
  599. """顺便删除大水印并保留abandon的span"""
  600. spans = remove_outside_spans(spans, all_bboxes, all_discarded_blocks)
  601. """删除重叠spans中置信度较低的那些"""
  602. spans, dropped_spans_by_confidence = remove_overlaps_low_confidence_spans(spans)
  603. """删除重叠spans中较小的那些"""
  604. spans, dropped_spans_by_span_overlap = remove_overlaps_min_spans(spans)
  605. """根据parse_mode,构造spans,主要是文本类的字符填充"""
  606. if parse_mode == SupportedPdfParseMethod.TXT:
  607. """之前的公式替换方案"""
  608. # pymu_spans = txt_spans_extract_v1(page_doc, inline_equations, interline_equations)
  609. # spans = replace_text_span(pymu_spans, spans)
  610. """使用新版本的混合ocr方案"""
  611. spans = txt_spans_extract_v2(page_doc, spans, all_bboxes, all_discarded_blocks, lang)
  612. elif parse_mode == SupportedPdfParseMethod.OCR:
  613. pass
  614. else:
  615. raise Exception('parse_mode must be txt or ocr')
  616. """先处理不需要排版的discarded_blocks"""
  617. discarded_block_with_spans, spans = fill_spans_in_blocks(
  618. all_discarded_blocks, spans, 0.4
  619. )
  620. fix_discarded_blocks = fix_discarded_block(discarded_block_with_spans)
  621. """如果当前页面没有有效的bbox则跳过"""
  622. if len(all_bboxes) == 0:
  623. logger.warning(f'skip this page, not found useful bbox, page_id: {page_id}')
  624. return ocr_construct_page_component_v2(
  625. [],
  626. [],
  627. page_id,
  628. page_w,
  629. page_h,
  630. [],
  631. [],
  632. [],
  633. interline_equations,
  634. fix_discarded_blocks,
  635. need_drop,
  636. drop_reason,
  637. )
  638. """对image和table截图"""
  639. spans = ocr_cut_image_and_table(
  640. spans, page_doc, page_id, pdf_bytes_md5, imageWriter
  641. )
  642. """span填充进block"""
  643. block_with_spans, spans = fill_spans_in_blocks(all_bboxes, spans, 0.5)
  644. """对block进行fix操作"""
  645. fix_blocks = fix_block_spans_v2(block_with_spans)
  646. """获取所有line并计算正文line的高度"""
  647. line_height = get_line_height(fix_blocks)
  648. """获取所有line并对line排序"""
  649. sorted_bboxes = sort_lines_by_model(fix_blocks, page_w, page_h, line_height)
  650. """根据line的中位数算block的序列关系"""
  651. fix_blocks = cal_block_index(fix_blocks, sorted_bboxes)
  652. """将image和table的block还原回group形式参与后续流程"""
  653. fix_blocks = revert_group_blocks(fix_blocks)
  654. """重排block"""
  655. sorted_blocks = sorted(fix_blocks, key=lambda b: b['index'])
  656. """获取QA需要外置的list"""
  657. images, tables, interline_equations = get_qa_need_list_v2(sorted_blocks)
  658. """构造pdf_info_dict"""
  659. page_info = ocr_construct_page_component_v2(
  660. sorted_blocks,
  661. [],
  662. page_id,
  663. page_w,
  664. page_h,
  665. [],
  666. images,
  667. tables,
  668. interline_equations,
  669. fix_discarded_blocks,
  670. need_drop,
  671. drop_reason,
  672. )
  673. return page_info
  674. def pdf_parse_union(
  675. dataset: Dataset,
  676. model_list,
  677. imageWriter,
  678. parse_mode,
  679. start_page_id=0,
  680. end_page_id=None,
  681. debug_mode=False,
  682. lang=None,
  683. ):
  684. pdf_bytes_md5 = compute_md5(dataset.data_bits())
  685. """初始化空的pdf_info_dict"""
  686. pdf_info_dict = {}
  687. """用model_list和docs对象初始化magic_model"""
  688. magic_model = MagicModel(model_list, dataset)
  689. """根据输入的起始范围解析pdf"""
  690. # end_page_id = end_page_id if end_page_id else len(pdf_docs) - 1
  691. end_page_id = (
  692. end_page_id
  693. if end_page_id is not None and end_page_id >= 0
  694. else len(dataset) - 1
  695. )
  696. if end_page_id > len(dataset) - 1:
  697. logger.warning('end_page_id is out of range, use pdf_docs length')
  698. end_page_id = len(dataset) - 1
  699. """初始化启动时间"""
  700. start_time = time.time()
  701. for page_id, page in enumerate(dataset):
  702. """debug时输出每页解析的耗时."""
  703. if debug_mode:
  704. time_now = time.time()
  705. logger.info(
  706. f'page_id: {page_id}, last_page_cost_time: {get_delta_time(start_time)}'
  707. )
  708. start_time = time_now
  709. """解析pdf中的每一页"""
  710. if start_page_id <= page_id <= end_page_id:
  711. page_info = parse_page_core(
  712. page, magic_model, page_id, pdf_bytes_md5, imageWriter, parse_mode, lang
  713. )
  714. else:
  715. page_info = page.get_page_info()
  716. page_w = page_info.w
  717. page_h = page_info.h
  718. page_info = ocr_construct_page_component_v2(
  719. [], [], page_id, page_w, page_h, [], [], [], [], [], True, 'skip page'
  720. )
  721. pdf_info_dict[f'page_{page_id}'] = page_info
  722. """分段"""
  723. para_split(pdf_info_dict)
  724. """dict转list"""
  725. pdf_info_list = dict_to_list(pdf_info_dict)
  726. new_pdf_info_dict = {
  727. 'pdf_info': pdf_info_list,
  728. }
  729. clean_memory()
  730. return new_pdf_info_dict
  731. if __name__ == '__main__':
  732. pass