pdf_extract_kit.py 5.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126
  1. import os
  2. import time
  3. import cv2
  4. import fitz
  5. import numpy as np
  6. import torch
  7. import unimernet.tasks as tasks
  8. import yaml
  9. from PIL import Image
  10. from torch.utils.data import DataLoader, Dataset
  11. from torchvision import transforms
  12. from ultralytics import YOLO
  13. from unimernet.common.config import Config
  14. from unimernet.processors import load_processor
  15. class CustomPEKModel:
  16. def __init__(self, ocr: bool = False, show_log: bool = False):
  17. ## ======== model init ========##
  18. with open('configs/model_configs.yaml') as f:
  19. model_configs = yaml.load(f, Loader=yaml.FullLoader)
  20. img_size = model_configs['model_args']['img_size']
  21. conf_thres = model_configs['model_args']['conf_thres']
  22. iou_thres = model_configs['model_args']['iou_thres']
  23. device = model_configs['model_args']['device']
  24. dpi = model_configs['model_args']['pdf_dpi']
  25. mfd_model = mfd_model_init(model_configs['model_args']['mfd_weight'])
  26. mfr_model, mfr_vis_processors = mfr_model_init(model_configs['model_args']['mfr_weight'], device=device)
  27. mfr_transform = transforms.Compose([mfr_vis_processors, ])
  28. layout_model = layout_model_init(model_configs['model_args']['layout_weight'])
  29. ocr_model = ModifiedPaddleOCR(show_log=True)
  30. print(now.strftime('%Y-%m-%d %H:%M:%S'))
  31. print('Model init done!')
  32. ## ======== model init ========##
  33. def __call__(self, image):
  34. # layout检测 + 公式检测
  35. doc_layout_result = []
  36. latex_filling_list = []
  37. mf_image_list = []
  38. img_H, img_W = image.shape[0], image.shape[1]
  39. layout_res = layout_model(image, ignore_catids=[])
  40. # 公式检测
  41. mfd_res = mfd_model.predict(image, imgsz=img_size, conf=conf_thres, iou=iou_thres, verbose=True)[0]
  42. for xyxy, conf, cla in zip(mfd_res.boxes.xyxy.cpu(), mfd_res.boxes.conf.cpu(), mfd_res.boxes.cls.cpu()):
  43. xmin, ymin, xmax, ymax = [int(p.item()) for p in xyxy]
  44. new_item = {
  45. 'category_id': 13 + int(cla.item()),
  46. 'poly': [xmin, ymin, xmax, ymin, xmax, ymax, xmin, ymax],
  47. 'score': round(float(conf.item()), 2),
  48. 'latex': '',
  49. }
  50. layout_res['layout_dets'].append(new_item)
  51. latex_filling_list.append(new_item)
  52. bbox_img = get_croped_image(Image.fromarray(image), [xmin, ymin, xmax, ymax])
  53. mf_image_list.append(bbox_img)
  54. layout_res['page_info'] = dict(
  55. page_no=idx,
  56. height=img_H,
  57. width=img_W
  58. )
  59. doc_layout_result.append(layout_res)
  60. # 公式识别,因为识别速度较慢,为了提速,把单个pdf的所有公式裁剪完,一起批量做识别。
  61. a = time.time()
  62. dataset = MathDataset(mf_image_list, transform=mfr_transform)
  63. dataloader = DataLoader(dataset, batch_size=128, num_workers=0)
  64. mfr_res = []
  65. gpu_total_cost = 0
  66. for imgs in dataloader:
  67. imgs = imgs.to(device)
  68. gpu_start = time.time()
  69. output = mfr_model.generate({'image': imgs})
  70. gpu_cost = time.time() - gpu_start
  71. gpu_total_cost += gpu_cost
  72. print(f"gpu_cost: {gpu_cost}")
  73. mfr_res.extend(output['pred_str'])
  74. print(f"gpu_total_cost: {gpu_total_cost}")
  75. for res, latex in zip(latex_filling_list, mfr_res):
  76. res['latex'] = latex_rm_whitespace(latex)
  77. b = time.time()
  78. print("formula nums:", len(mf_image_list), "mfr time:", round(b - a, 2))
  79. # ocr识别
  80. for idx, image in enumerate(img_list):
  81. pil_img = Image.fromarray(cv2.cvtColor(image, cv2.COLOR_RGB2BGR))
  82. single_page_res = doc_layout_result[idx]['layout_dets']
  83. single_page_mfdetrec_res = []
  84. for res in single_page_res:
  85. if int(res['category_id']) in [13, 14]:
  86. xmin, ymin = int(res['poly'][0]), int(res['poly'][1])
  87. xmax, ymax = int(res['poly'][4]), int(res['poly'][5])
  88. single_page_mfdetrec_res.append({
  89. "bbox": [xmin, ymin, xmax, ymax],
  90. })
  91. for res in single_page_res:
  92. if int(res['category_id']) in [0, 1, 2, 4, 6, 7]: # 需要进行ocr的类别
  93. xmin, ymin = int(res['poly'][0]), int(res['poly'][1])
  94. xmax, ymax = int(res['poly'][4]), int(res['poly'][5])
  95. crop_box = [xmin, ymin, xmax, ymax]
  96. cropped_img = Image.new('RGB', pil_img.size, 'white')
  97. cropped_img.paste(pil_img.crop(crop_box), crop_box)
  98. cropped_img = cv2.cvtColor(np.asarray(cropped_img), cv2.COLOR_RGB2BGR)
  99. ocr_res = ocr_model.ocr(cropped_img, mfd_res=single_page_mfdetrec_res)[0]
  100. if ocr_res:
  101. for box_ocr_res in ocr_res:
  102. p1, p2, p3, p4 = box_ocr_res[0]
  103. text, score = box_ocr_res[1]
  104. doc_layout_result[idx]['layout_dets'].append({
  105. 'category_id': 15,
  106. 'poly': p1 + p2 + p3 + p4,
  107. 'score': round(score, 2),
  108. 'text': text,
  109. })
  110. output_dir = args.output
  111. os.makedirs(output_dir, exist_ok=True)
  112. basename = os.path.basename(single_pdf)[0:-4]
  113. with open(os.path.join(output_dir, f'{basename}.json'), 'w') as f:
  114. json.dump(doc_layout_result, f)