ocr_mkcontent.py 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413
  1. from loguru import logger
  2. from magic_pdf.libs.MakeContentConfig import DropMode, MakeMode
  3. from magic_pdf.libs.commons import join_path
  4. from magic_pdf.libs.language import detect_lang
  5. from magic_pdf.libs.markdown_utils import ocr_escape_special_markdown_char
  6. from magic_pdf.libs.ocr_content_type import ContentType, BlockType
  7. import wordninja
  8. import re
  9. def __is_hyphen_at_line_end(line):
  10. """
  11. Check if a line ends with one or more letters followed by a hyphen.
  12. Args:
  13. line (str): The line of text to check.
  14. Returns:
  15. bool: True if the line ends with one or more letters followed by a hyphen, False otherwise.
  16. """
  17. # Use regex to check if the line ends with one or more letters followed by a hyphen
  18. return bool(re.search(r'[A-Za-z]+-\s*$', line))
  19. def split_long_words(text):
  20. segments = text.split(' ')
  21. for i in range(len(segments)):
  22. words = re.findall(r'\w+|[^\w]', segments[i], re.UNICODE)
  23. for j in range(len(words)):
  24. if len(words[j]) > 10:
  25. words[j] = ' '.join(wordninja.split(words[j]))
  26. segments[i] = ''.join(words)
  27. return ' '.join(segments)
  28. def ocr_mk_mm_markdown_with_para(pdf_info_list: list, img_buket_path):
  29. markdown = []
  30. for page_info in pdf_info_list:
  31. paras_of_layout = page_info.get("para_blocks")
  32. page_markdown = ocr_mk_markdown_with_para_core_v2(paras_of_layout, "mm", img_buket_path)
  33. markdown.extend(page_markdown)
  34. return '\n\n'.join(markdown)
  35. def ocr_mk_nlp_markdown_with_para(pdf_info_dict: list):
  36. markdown = []
  37. for page_info in pdf_info_dict:
  38. paras_of_layout = page_info.get("para_blocks")
  39. page_markdown = ocr_mk_markdown_with_para_core_v2(paras_of_layout, "nlp")
  40. markdown.extend(page_markdown)
  41. return '\n\n'.join(markdown)
  42. def ocr_mk_mm_markdown_with_para_and_pagination(pdf_info_dict: list, img_buket_path):
  43. markdown_with_para_and_pagination = []
  44. page_no = 0
  45. for page_info in pdf_info_dict:
  46. paras_of_layout = page_info.get("para_blocks")
  47. if not paras_of_layout:
  48. continue
  49. page_markdown = ocr_mk_markdown_with_para_core_v2(paras_of_layout, "mm", img_buket_path)
  50. markdown_with_para_and_pagination.append({
  51. 'page_no': page_no,
  52. 'md_content': '\n\n'.join(page_markdown)
  53. })
  54. page_no += 1
  55. return markdown_with_para_and_pagination
  56. def ocr_mk_markdown_with_para_core(paras_of_layout, mode, img_buket_path=""):
  57. page_markdown = []
  58. for paras in paras_of_layout:
  59. for para in paras:
  60. para_text = ''
  61. for line in para:
  62. for span in line['spans']:
  63. span_type = span.get('type')
  64. content = ''
  65. language = ''
  66. if span_type == ContentType.Text:
  67. content = span['content']
  68. language = detect_lang(content)
  69. if language == 'en': # 只对英文长词进行分词处理,中文分词会丢失文本
  70. content = ocr_escape_special_markdown_char(split_long_words(content))
  71. else:
  72. content = ocr_escape_special_markdown_char(content)
  73. elif span_type == ContentType.InlineEquation:
  74. content = f"${span['content']}$"
  75. elif span_type == ContentType.InterlineEquation:
  76. content = f"\n$$\n{span['content']}\n$$\n"
  77. elif span_type in [ContentType.Image, ContentType.Table]:
  78. if mode == 'mm':
  79. content = f"\n![]({join_path(img_buket_path, span['image_path'])})\n"
  80. elif mode == 'nlp':
  81. pass
  82. if content != '':
  83. if language == 'en': # 英文语境下 content间需要空格分隔
  84. para_text += content + ' '
  85. else: # 中文语境下,content间不需要空格分隔
  86. para_text += content
  87. if para_text.strip() == '':
  88. continue
  89. else:
  90. page_markdown.append(para_text.strip() + ' ')
  91. return page_markdown
  92. def ocr_mk_markdown_with_para_core_v2(paras_of_layout, mode, img_buket_path=""):
  93. page_markdown = []
  94. for para_block in paras_of_layout:
  95. para_text = ''
  96. para_type = para_block['type']
  97. if para_type == BlockType.Text:
  98. para_text = merge_para_with_text(para_block)
  99. elif para_type == BlockType.Title:
  100. para_text = f"# {merge_para_with_text(para_block)}"
  101. elif para_type == BlockType.InterlineEquation:
  102. para_text = merge_para_with_text(para_block)
  103. elif para_type == BlockType.Image:
  104. if mode == 'nlp':
  105. continue
  106. elif mode == 'mm':
  107. for block in para_block['blocks']: # 1st.拼image_body
  108. if block['type'] == BlockType.ImageBody:
  109. for line in block['lines']:
  110. for span in line['spans']:
  111. if span['type'] == ContentType.Image:
  112. para_text += f"\n![]({join_path(img_buket_path, span['image_path'])}) \n"
  113. for block in para_block['blocks']: # 2nd.拼image_caption
  114. if block['type'] == BlockType.ImageCaption:
  115. para_text += merge_para_with_text(block)
  116. elif para_type == BlockType.Table:
  117. if mode == 'nlp':
  118. continue
  119. elif mode == 'mm':
  120. table_caption = ''
  121. for block in para_block['blocks']: # 1st.拼table_caption
  122. if block['type'] == BlockType.TableCaption:
  123. para_text += merge_para_with_text(block)
  124. for block in para_block['blocks']: # 2nd.拼table_body
  125. if block['type'] == BlockType.TableBody:
  126. for line in block['lines']:
  127. for span in line['spans']:
  128. if span['type'] == ContentType.Table:
  129. # if processed by table model
  130. if span.get('latex', ''):
  131. para_text += f"\n\n$\n {span['latex']}\n$\n\n"
  132. elif span.get('html', ''):
  133. para_text += f"\n\n{span['html']}\n\n"
  134. else:
  135. para_text += f"\n![]({join_path(img_buket_path, span['image_path'])}) \n"
  136. for block in para_block['blocks']: # 3rd.拼table_footnote
  137. if block['type'] == BlockType.TableFootnote:
  138. para_text += merge_para_with_text(block)
  139. if para_text.strip() == '':
  140. continue
  141. else:
  142. page_markdown.append(para_text.strip() + ' ')
  143. return page_markdown
  144. def merge_para_with_text(para_block):
  145. def detect_language(text):
  146. en_pattern = r'[a-zA-Z]+'
  147. en_matches = re.findall(en_pattern, text)
  148. en_length = sum(len(match) for match in en_matches)
  149. if len(text) > 0:
  150. if en_length / len(text) >= 0.5:
  151. return 'en'
  152. else:
  153. return "unknown"
  154. else:
  155. return "empty"
  156. para_text = ''
  157. for line in para_block['lines']:
  158. line_text = ""
  159. line_lang = ""
  160. for span in line['spans']:
  161. span_type = span['type']
  162. if span_type == ContentType.Text:
  163. line_text += span['content'].strip()
  164. if line_text != "":
  165. line_lang = detect_lang(line_text)
  166. for span in line['spans']:
  167. span_type = span['type']
  168. content = ''
  169. if span_type == ContentType.Text:
  170. content = span['content']
  171. # language = detect_lang(content)
  172. language = detect_language(content)
  173. if language == 'en': # 只对英文长词进行分词处理,中文分词会丢失文本
  174. content = ocr_escape_special_markdown_char(split_long_words(content))
  175. else:
  176. content = ocr_escape_special_markdown_char(content)
  177. elif span_type == ContentType.InlineEquation:
  178. content = f" ${span['content']}$ "
  179. elif span_type == ContentType.InterlineEquation:
  180. content = f"\n$$\n{span['content']}\n$$\n"
  181. if content != '':
  182. langs = ['zh', 'ja', 'ko']
  183. if line_lang in langs: # 遇到一些一个字一个span的文档,这种单字语言判断不准,需要用整行文本判断
  184. para_text += content # 中文/日语/韩文语境下,content间不需要空格分隔
  185. elif line_lang == 'en':
  186. # 如果是前一行带有-连字符,那么末尾不应该加空格
  187. if __is_hyphen_at_line_end(content):
  188. para_text += content[:-1]
  189. else:
  190. para_text += content + ' '
  191. else:
  192. para_text += content + ' ' # 西方文本语境下 content间需要空格分隔
  193. return para_text
  194. def para_to_standard_format(para, img_buket_path):
  195. para_content = {}
  196. if len(para) == 1:
  197. para_content = line_to_standard_format(para[0], img_buket_path)
  198. elif len(para) > 1:
  199. para_text = ''
  200. inline_equation_num = 0
  201. for line in para:
  202. for span in line['spans']:
  203. language = ''
  204. span_type = span.get('type')
  205. content = ""
  206. if span_type == ContentType.Text:
  207. content = span['content']
  208. language = detect_lang(content)
  209. if language == 'en': # 只对英文长词进行分词处理,中文分词会丢失文本
  210. content = ocr_escape_special_markdown_char(split_long_words(content))
  211. else:
  212. content = ocr_escape_special_markdown_char(content)
  213. elif span_type == ContentType.InlineEquation:
  214. content = f"${span['content']}$"
  215. inline_equation_num += 1
  216. if language == 'en': # 英文语境下 content间需要空格分隔
  217. para_text += content + ' '
  218. else: # 中文语境下,content间不需要空格分隔
  219. para_text += content
  220. para_content = {
  221. 'type': 'text',
  222. 'text': para_text,
  223. 'inline_equation_num': inline_equation_num
  224. }
  225. return para_content
  226. def para_to_standard_format_v2(para_block, img_buket_path, page_idx):
  227. para_type = para_block['type']
  228. if para_type == BlockType.Text:
  229. para_content = {
  230. 'type': 'text',
  231. 'text': merge_para_with_text(para_block),
  232. 'page_idx': page_idx
  233. }
  234. elif para_type == BlockType.Title:
  235. para_content = {
  236. 'type': 'text',
  237. 'text': merge_para_with_text(para_block),
  238. 'text_level': 1,
  239. 'page_idx': page_idx
  240. }
  241. elif para_type == BlockType.InterlineEquation:
  242. para_content = {
  243. 'type': 'equation',
  244. 'text': merge_para_with_text(para_block),
  245. 'text_format': "latex",
  246. 'page_idx': page_idx
  247. }
  248. elif para_type == BlockType.Image:
  249. para_content = {
  250. 'type': 'image',
  251. 'page_idx': page_idx
  252. }
  253. for block in para_block['blocks']:
  254. if block['type'] == BlockType.ImageBody:
  255. para_content['img_path'] = join_path(img_buket_path, block["lines"][0]["spans"][0]['image_path'])
  256. if block['type'] == BlockType.ImageCaption:
  257. para_content['img_caption'] = merge_para_with_text(block)
  258. elif para_type == BlockType.Table:
  259. para_content = {
  260. 'type': 'table',
  261. 'page_idx': page_idx
  262. }
  263. for block in para_block['blocks']:
  264. if block['type'] == BlockType.TableBody:
  265. if block["lines"][0]["spans"][0].get('latex', ''):
  266. para_content['table_body'] = f"\n\n$\n {block['lines'][0]['spans'][0]['latex']}\n$\n\n"
  267. elif block["lines"][0]["spans"][0].get('html', ''):
  268. para_content['table_body'] = f"\n\n{block['lines'][0]['spans'][0]['html']}\n\n"
  269. para_content['img_path'] = join_path(img_buket_path, block["lines"][0]["spans"][0]['image_path'])
  270. if block['type'] == BlockType.TableCaption:
  271. para_content['table_caption'] = merge_para_with_text(block)
  272. if block['type'] == BlockType.TableFootnote:
  273. para_content['table_footnote'] = merge_para_with_text(block)
  274. return para_content
  275. def make_standard_format_with_para(pdf_info_dict: list, img_buket_path: str):
  276. content_list = []
  277. for page_info in pdf_info_dict:
  278. paras_of_layout = page_info.get("para_blocks")
  279. if not paras_of_layout:
  280. continue
  281. for para_block in paras_of_layout:
  282. para_content = para_to_standard_format_v2(para_block, img_buket_path)
  283. content_list.append(para_content)
  284. return content_list
  285. def line_to_standard_format(line, img_buket_path):
  286. line_text = ""
  287. inline_equation_num = 0
  288. for span in line['spans']:
  289. if not span.get('content'):
  290. if not span.get('image_path'):
  291. continue
  292. else:
  293. if span['type'] == ContentType.Image:
  294. content = {
  295. 'type': 'image',
  296. 'img_path': join_path(img_buket_path, span['image_path'])
  297. }
  298. return content
  299. elif span['type'] == ContentType.Table:
  300. content = {
  301. 'type': 'table',
  302. 'img_path': join_path(img_buket_path, span['image_path'])
  303. }
  304. return content
  305. else:
  306. if span['type'] == ContentType.InterlineEquation:
  307. interline_equation = span['content']
  308. content = {
  309. 'type': 'equation',
  310. 'latex': f"$$\n{interline_equation}\n$$"
  311. }
  312. return content
  313. elif span['type'] == ContentType.InlineEquation:
  314. inline_equation = span['content']
  315. line_text += f"${inline_equation}$"
  316. inline_equation_num += 1
  317. elif span['type'] == ContentType.Text:
  318. text_content = ocr_escape_special_markdown_char(span['content']) # 转义特殊符号
  319. line_text += text_content
  320. content = {
  321. 'type': 'text',
  322. 'text': line_text,
  323. 'inline_equation_num': inline_equation_num
  324. }
  325. return content
  326. def ocr_mk_mm_standard_format(pdf_info_dict: list):
  327. """
  328. content_list
  329. type string image/text/table/equation(行间的单独拿出来,行内的和text合并)
  330. latex string latex文本字段。
  331. text string 纯文本格式的文本数据。
  332. md string markdown格式的文本数据。
  333. img_path string s3://full/path/to/img.jpg
  334. """
  335. content_list = []
  336. for page_info in pdf_info_dict:
  337. blocks = page_info.get("preproc_blocks")
  338. if not blocks:
  339. continue
  340. for block in blocks:
  341. for line in block['lines']:
  342. content = line_to_standard_format(line)
  343. content_list.append(content)
  344. return content_list
  345. def union_make(pdf_info_dict: list, make_mode: str, drop_mode: str, img_buket_path: str = ""):
  346. output_content = []
  347. for page_info in pdf_info_dict:
  348. if page_info.get("need_drop", False):
  349. drop_reason = page_info.get("drop_reason")
  350. if drop_mode == DropMode.NONE:
  351. pass
  352. elif drop_mode == DropMode.WHOLE_PDF:
  353. raise Exception(f"drop_mode is {DropMode.WHOLE_PDF} , drop_reason is {drop_reason}")
  354. elif drop_mode == DropMode.SINGLE_PAGE:
  355. logger.warning(f"drop_mode is {DropMode.SINGLE_PAGE} , drop_reason is {drop_reason}")
  356. continue
  357. else:
  358. raise Exception(f"drop_mode can not be null")
  359. paras_of_layout = page_info.get("para_blocks")
  360. page_idx = page_info.get("page_idx")
  361. if not paras_of_layout:
  362. continue
  363. if make_mode == MakeMode.MM_MD:
  364. page_markdown = ocr_mk_markdown_with_para_core_v2(paras_of_layout, "mm", img_buket_path)
  365. output_content.extend(page_markdown)
  366. elif make_mode == MakeMode.NLP_MD:
  367. page_markdown = ocr_mk_markdown_with_para_core_v2(paras_of_layout, "nlp")
  368. output_content.extend(page_markdown)
  369. elif make_mode == MakeMode.STANDARD_FORMAT:
  370. for para_block in paras_of_layout:
  371. para_content = para_to_standard_format_v2(para_block, img_buket_path, page_idx)
  372. output_content.append(para_content)
  373. if make_mode in [MakeMode.MM_MD, MakeMode.NLP_MD]:
  374. return '\n\n'.join(output_content)
  375. elif make_mode == MakeMode.STANDARD_FORMAT:
  376. return output_content