ocr_mkcontent.py 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454
  1. import re
  2. import wordninja
  3. from loguru import logger
  4. from magic_pdf.libs.commons import join_path
  5. from magic_pdf.libs.language import detect_lang
  6. from magic_pdf.libs.MakeContentConfig import DropMode, MakeMode
  7. from magic_pdf.libs.markdown_utils import ocr_escape_special_markdown_char
  8. from magic_pdf.libs.ocr_content_type import BlockType, ContentType
  9. def __is_hyphen_at_line_end(line):
  10. """
  11. Check if a line ends with one or more letters followed by a hyphen.
  12. Args:
  13. line (str): The line of text to check.
  14. Returns:
  15. bool: True if the line ends with one or more letters followed by a hyphen, False otherwise.
  16. """
  17. # Use regex to check if the line ends with one or more letters followed by a hyphen
  18. return bool(re.search(r'[A-Za-z]+-\s*$', line))
  19. def split_long_words(text):
  20. segments = text.split(' ')
  21. for i in range(len(segments)):
  22. words = re.findall(r'\w+|[^\w]', segments[i], re.UNICODE)
  23. for j in range(len(words)):
  24. if len(words[j]) > 10:
  25. words[j] = ' '.join(wordninja.split(words[j]))
  26. segments[i] = ''.join(words)
  27. return ' '.join(segments)
  28. def ocr_mk_mm_markdown_with_para(pdf_info_list: list, img_buket_path):
  29. markdown = []
  30. for page_info in pdf_info_list:
  31. paras_of_layout = page_info.get('para_blocks')
  32. page_markdown = ocr_mk_markdown_with_para_core_v2(
  33. paras_of_layout, 'mm', img_buket_path)
  34. markdown.extend(page_markdown)
  35. return '\n\n'.join(markdown)
  36. def ocr_mk_nlp_markdown_with_para(pdf_info_dict: list):
  37. markdown = []
  38. for page_info in pdf_info_dict:
  39. paras_of_layout = page_info.get('para_blocks')
  40. page_markdown = ocr_mk_markdown_with_para_core_v2(
  41. paras_of_layout, 'nlp')
  42. markdown.extend(page_markdown)
  43. return '\n\n'.join(markdown)
  44. def ocr_mk_mm_markdown_with_para_and_pagination(pdf_info_dict: list,
  45. img_buket_path):
  46. markdown_with_para_and_pagination = []
  47. page_no = 0
  48. for page_info in pdf_info_dict:
  49. paras_of_layout = page_info.get('para_blocks')
  50. if not paras_of_layout:
  51. continue
  52. page_markdown = ocr_mk_markdown_with_para_core_v2(
  53. paras_of_layout, 'mm', img_buket_path)
  54. markdown_with_para_and_pagination.append({
  55. 'page_no':
  56. page_no,
  57. 'md_content':
  58. '\n\n'.join(page_markdown)
  59. })
  60. page_no += 1
  61. return markdown_with_para_and_pagination
  62. def ocr_mk_markdown_with_para_core(paras_of_layout, mode, img_buket_path=''):
  63. page_markdown = []
  64. for paras in paras_of_layout:
  65. for para in paras:
  66. para_text = ''
  67. for line in para:
  68. for span in line['spans']:
  69. span_type = span.get('type')
  70. content = ''
  71. language = ''
  72. if span_type == ContentType.Text:
  73. content = span['content']
  74. language = detect_lang(content)
  75. if (language == 'en'): # 只对英文长词进行分词处理,中文分词会丢失文本
  76. content = ocr_escape_special_markdown_char(
  77. split_long_words(content))
  78. else:
  79. content = ocr_escape_special_markdown_char(content)
  80. elif span_type == ContentType.InlineEquation:
  81. content = f"${span['content']}$"
  82. elif span_type == ContentType.InterlineEquation:
  83. content = f"\n$$\n{span['content']}\n$$\n"
  84. elif span_type in [ContentType.Image, ContentType.Table]:
  85. if mode == 'mm':
  86. content = f"\n![]({join_path(img_buket_path, span['image_path'])})\n"
  87. elif mode == 'nlp':
  88. pass
  89. if content != '':
  90. if language == 'en': # 英文语境下 content间需要空格分隔
  91. para_text += content + ' '
  92. else: # 中文语境下,content间不需要空格分隔
  93. para_text += content
  94. if para_text.strip() == '':
  95. continue
  96. else:
  97. page_markdown.append(para_text.strip() + ' ')
  98. return page_markdown
  99. def ocr_mk_markdown_with_para_core_v2(paras_of_layout,
  100. mode,
  101. img_buket_path='',
  102. parse_type="auto",
  103. lang=None
  104. ):
  105. page_markdown = []
  106. for para_block in paras_of_layout:
  107. para_text = ''
  108. para_type = para_block['type']
  109. if para_type == BlockType.Text:
  110. para_text = merge_para_with_text(para_block, parse_type=parse_type, lang=lang)
  111. elif para_type == BlockType.Title:
  112. para_text = f'# {merge_para_with_text(para_block, parse_type=parse_type, lang=lang)}'
  113. elif para_type == BlockType.InterlineEquation:
  114. para_text = merge_para_with_text(para_block, parse_type=parse_type, lang=lang)
  115. elif para_type == BlockType.Image:
  116. if mode == 'nlp':
  117. continue
  118. elif mode == 'mm':
  119. for block in para_block['blocks']: # 1st.拼image_body
  120. if block['type'] == BlockType.ImageBody:
  121. for line in block['lines']:
  122. for span in line['spans']:
  123. if span['type'] == ContentType.Image:
  124. para_text += f"\n![]({join_path(img_buket_path, span['image_path'])}) \n"
  125. for block in para_block['blocks']: # 2nd.拼image_caption
  126. if block['type'] == BlockType.ImageCaption:
  127. para_text += merge_para_with_text(block, parse_type=parse_type, lang=lang)
  128. for block in para_block['blocks']: # 2nd.拼image_caption
  129. if block['type'] == BlockType.ImageFootnote:
  130. para_text += merge_para_with_text(block, parse_type=parse_type, lang=lang)
  131. elif para_type == BlockType.Table:
  132. if mode == 'nlp':
  133. continue
  134. elif mode == 'mm':
  135. for block in para_block['blocks']: # 1st.拼table_caption
  136. if block['type'] == BlockType.TableCaption:
  137. para_text += merge_para_with_text(block, parse_type=parse_type, lang=lang)
  138. for block in para_block['blocks']: # 2nd.拼table_body
  139. if block['type'] == BlockType.TableBody:
  140. for line in block['lines']:
  141. for span in line['spans']:
  142. if span['type'] == ContentType.Table:
  143. # if processed by table model
  144. if span.get('latex', ''):
  145. para_text += f"\n\n$\n {span['latex']}\n$\n\n"
  146. elif span.get('html', ''):
  147. para_text += f"\n\n{span['html']}\n\n"
  148. else:
  149. para_text += f"\n![]({join_path(img_buket_path, span['image_path'])}) \n"
  150. for block in para_block['blocks']: # 3rd.拼table_footnote
  151. if block['type'] == BlockType.TableFootnote:
  152. para_text += merge_para_with_text(block, parse_type=parse_type, lang=lang)
  153. if para_text.strip() == '':
  154. continue
  155. else:
  156. page_markdown.append(para_text.strip() + ' ')
  157. return page_markdown
  158. def merge_para_with_text(para_block, parse_type="auto", lang=None):
  159. def detect_language(text):
  160. en_pattern = r'[a-zA-Z]+'
  161. en_matches = re.findall(en_pattern, text)
  162. en_length = sum(len(match) for match in en_matches)
  163. if len(text) > 0:
  164. if en_length / len(text) >= 0.5:
  165. return 'en'
  166. else:
  167. return 'unknown'
  168. else:
  169. return 'empty'
  170. para_text = ''
  171. for line in para_block['lines']:
  172. line_text = ''
  173. line_lang = ''
  174. for span in line['spans']:
  175. span_type = span['type']
  176. if span_type == ContentType.Text:
  177. line_text += span['content'].strip()
  178. if line_text != '':
  179. line_lang = detect_lang(line_text)
  180. for span in line['spans']:
  181. span_type = span['type']
  182. content = ''
  183. if span_type == ContentType.Text:
  184. content = span['content']
  185. # language = detect_lang(content)
  186. language = detect_language(content)
  187. # 判断是否小语种
  188. if lang is not None and lang != 'en':
  189. content = ocr_escape_special_markdown_char(content)
  190. else: # 非小语种逻辑
  191. if language == 'en' and parse_type == 'ocr': # 只对英文长词进行分词处理,中文分词会丢失文本
  192. content = ocr_escape_special_markdown_char(
  193. split_long_words(content))
  194. else:
  195. content = ocr_escape_special_markdown_char(content)
  196. elif span_type == ContentType.InlineEquation:
  197. content = f" ${span['content']}$ "
  198. elif span_type == ContentType.InterlineEquation:
  199. content = f"\n$$\n{span['content']}\n$$\n"
  200. if content != '':
  201. langs = ['zh', 'ja', 'ko']
  202. if line_lang in langs: # 遇到一些一个字一个span的文档,这种单字语言判断不准,需要用整行文本判断
  203. para_text += content # 中文/日语/韩文语境下,content间不需要空格分隔
  204. elif line_lang == 'en':
  205. # 如果是前一行带有-连字符,那么末尾不应该加空格
  206. if __is_hyphen_at_line_end(content):
  207. para_text += content[:-1]
  208. else:
  209. para_text += content + ' '
  210. else:
  211. para_text += content + ' ' # 西方文本语境下 content间需要空格分隔
  212. return para_text
  213. def para_to_standard_format(para, img_buket_path):
  214. para_content = {}
  215. if len(para) == 1:
  216. para_content = line_to_standard_format(para[0], img_buket_path)
  217. elif len(para) > 1:
  218. para_text = ''
  219. inline_equation_num = 0
  220. for line in para:
  221. for span in line['spans']:
  222. language = ''
  223. span_type = span.get('type')
  224. content = ''
  225. if span_type == ContentType.Text:
  226. content = span['content']
  227. language = detect_lang(content)
  228. if language == 'en': # 只对英文长词进行分词处理,中文分词会丢失文本
  229. content = ocr_escape_special_markdown_char(
  230. split_long_words(content))
  231. else:
  232. content = ocr_escape_special_markdown_char(content)
  233. elif span_type == ContentType.InlineEquation:
  234. content = f"${span['content']}$"
  235. inline_equation_num += 1
  236. if language == 'en': # 英文语境下 content间需要空格分隔
  237. para_text += content + ' '
  238. else: # 中文语境下,content间不需要空格分隔
  239. para_text += content
  240. para_content = {
  241. 'type': 'text',
  242. 'text': para_text,
  243. 'inline_equation_num': inline_equation_num,
  244. }
  245. return para_content
  246. def para_to_standard_format_v2(para_block, img_buket_path, page_idx, parse_type="auto", lang=None, drop_reason=None):
  247. para_type = para_block['type']
  248. para_content = {}
  249. if para_type == BlockType.Text:
  250. para_content = {
  251. 'type': 'text',
  252. 'text': merge_para_with_text(para_block, parse_type=parse_type, lang=lang),
  253. }
  254. elif para_type == BlockType.Title:
  255. para_content = {
  256. 'type': 'text',
  257. 'text': merge_para_with_text(para_block, parse_type=parse_type, lang=lang),
  258. 'text_level': 1,
  259. }
  260. elif para_type == BlockType.InterlineEquation:
  261. para_content = {
  262. 'type': 'equation',
  263. 'text': merge_para_with_text(para_block, parse_type=parse_type, lang=lang),
  264. 'text_format': 'latex',
  265. }
  266. elif para_type == BlockType.Image:
  267. para_content = {'type': 'image'}
  268. for block in para_block['blocks']:
  269. if block['type'] == BlockType.ImageBody:
  270. para_content['img_path'] = join_path(
  271. img_buket_path,
  272. block['lines'][0]['spans'][0]['image_path'])
  273. if block['type'] == BlockType.ImageCaption:
  274. para_content['img_caption'] = merge_para_with_text(block, parse_type=parse_type, lang=lang)
  275. if block['type'] == BlockType.ImageFootnote:
  276. para_content['img_footnote'] = merge_para_with_text(block, parse_type=parse_type, lang=lang)
  277. elif para_type == BlockType.Table:
  278. para_content = {'type': 'table'}
  279. for block in para_block['blocks']:
  280. if block['type'] == BlockType.TableBody:
  281. if block["lines"][0]["spans"][0].get('latex', ''):
  282. para_content['table_body'] = f"\n\n$\n {block['lines'][0]['spans'][0]['latex']}\n$\n\n"
  283. elif block["lines"][0]["spans"][0].get('html', ''):
  284. para_content['table_body'] = f"\n\n{block['lines'][0]['spans'][0]['html']}\n\n"
  285. para_content['img_path'] = join_path(img_buket_path, block["lines"][0]["spans"][0]['image_path'])
  286. if block['type'] == BlockType.TableCaption:
  287. para_content['table_caption'] = merge_para_with_text(block, parse_type=parse_type, lang=lang)
  288. if block['type'] == BlockType.TableFootnote:
  289. para_content['table_footnote'] = merge_para_with_text(block, parse_type=parse_type, lang=lang)
  290. para_content['page_idx'] = page_idx
  291. if drop_reason is not None:
  292. para_content['drop_reason'] = drop_reason
  293. return para_content
  294. def make_standard_format_with_para(pdf_info_dict: list, img_buket_path: str):
  295. content_list = []
  296. for page_info in pdf_info_dict:
  297. paras_of_layout = page_info.get('para_blocks')
  298. if not paras_of_layout:
  299. continue
  300. for para_block in paras_of_layout:
  301. para_content = para_to_standard_format_v2(para_block,
  302. img_buket_path)
  303. content_list.append(para_content)
  304. return content_list
  305. def line_to_standard_format(line, img_buket_path):
  306. line_text = ''
  307. inline_equation_num = 0
  308. for span in line['spans']:
  309. if not span.get('content'):
  310. if not span.get('image_path'):
  311. continue
  312. else:
  313. if span['type'] == ContentType.Image:
  314. content = {
  315. 'type': 'image',
  316. 'img_path': join_path(img_buket_path,
  317. span['image_path']),
  318. }
  319. return content
  320. elif span['type'] == ContentType.Table:
  321. content = {
  322. 'type': 'table',
  323. 'img_path': join_path(img_buket_path,
  324. span['image_path']),
  325. }
  326. return content
  327. else:
  328. if span['type'] == ContentType.InterlineEquation:
  329. interline_equation = span['content']
  330. content = {
  331. 'type': 'equation',
  332. 'latex': f'$$\n{interline_equation}\n$$'
  333. }
  334. return content
  335. elif span['type'] == ContentType.InlineEquation:
  336. inline_equation = span['content']
  337. line_text += f'${inline_equation}$'
  338. inline_equation_num += 1
  339. elif span['type'] == ContentType.Text:
  340. text_content = ocr_escape_special_markdown_char(
  341. span['content']) # 转义特殊符号
  342. line_text += text_content
  343. content = {
  344. 'type': 'text',
  345. 'text': line_text,
  346. 'inline_equation_num': inline_equation_num,
  347. }
  348. return content
  349. def ocr_mk_mm_standard_format(pdf_info_dict: list):
  350. """content_list type string
  351. image/text/table/equation(行间的单独拿出来,行内的和text合并) latex string
  352. latex文本字段。 text string 纯文本格式的文本数据。 md string
  353. markdown格式的文本数据。 img_path string s3://full/path/to/img.jpg."""
  354. content_list = []
  355. for page_info in pdf_info_dict:
  356. blocks = page_info.get('preproc_blocks')
  357. if not blocks:
  358. continue
  359. for block in blocks:
  360. for line in block['lines']:
  361. content = line_to_standard_format(line)
  362. content_list.append(content)
  363. return content_list
  364. def union_make(pdf_info_dict: list,
  365. make_mode: str,
  366. drop_mode: str,
  367. img_buket_path: str = '',
  368. parse_type: str = "auto",
  369. lang=None):
  370. output_content = []
  371. for page_info in pdf_info_dict:
  372. drop_reason_flag = False
  373. drop_reason = None
  374. if page_info.get('need_drop', False):
  375. drop_reason = page_info.get('drop_reason')
  376. if drop_mode == DropMode.NONE:
  377. pass
  378. elif drop_mode == DropMode.NONE_WITH_REASON:
  379. drop_reason_flag = True
  380. elif drop_mode == DropMode.WHOLE_PDF:
  381. raise Exception((f'drop_mode is {DropMode.WHOLE_PDF} ,'
  382. f'drop_reason is {drop_reason}'))
  383. elif drop_mode == DropMode.SINGLE_PAGE:
  384. logger.warning((f'drop_mode is {DropMode.SINGLE_PAGE} ,'
  385. f'drop_reason is {drop_reason}'))
  386. continue
  387. else:
  388. raise Exception('drop_mode can not be null')
  389. paras_of_layout = page_info.get('para_blocks')
  390. page_idx = page_info.get('page_idx')
  391. if not paras_of_layout:
  392. continue
  393. if make_mode == MakeMode.MM_MD:
  394. page_markdown = ocr_mk_markdown_with_para_core_v2(
  395. paras_of_layout, 'mm', img_buket_path, parse_type=parse_type, lang=lang)
  396. output_content.extend(page_markdown)
  397. elif make_mode == MakeMode.NLP_MD:
  398. page_markdown = ocr_mk_markdown_with_para_core_v2(
  399. paras_of_layout, 'nlp', parse_type=parse_type, lang=lang)
  400. output_content.extend(page_markdown)
  401. elif make_mode == MakeMode.STANDARD_FORMAT:
  402. for para_block in paras_of_layout:
  403. if drop_reason_flag:
  404. para_content = para_to_standard_format_v2(
  405. para_block, img_buket_path, page_idx, parse_type=parse_type, lang=lang, drop_reason=drop_reason)
  406. else:
  407. para_content = para_to_standard_format_v2(
  408. para_block, img_buket_path, page_idx, parse_type=parse_type, lang=lang)
  409. output_content.append(para_content)
  410. if make_mode in [MakeMode.MM_MD, MakeMode.NLP_MD]:
  411. return '\n\n'.join(output_content)
  412. elif make_mode == MakeMode.STANDARD_FORMAT:
  413. return output_content