ppstructurev3_single_process.py 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299
  1. import json
  2. import time
  3. import os
  4. import traceback
  5. import argparse
  6. import sys
  7. from pathlib import Path
  8. from typing import List, Dict, Any
  9. import cv2
  10. import numpy as np
  11. from paddlex import create_pipeline
  12. from paddlex.utils.device import constr_device, parse_device
  13. from tqdm import tqdm
  14. from dotenv import load_dotenv
  15. load_dotenv(override=True)
  16. def process_images_single_process(image_paths: List[str],
  17. pipeline_name: str = "PP-StructureV3",
  18. device: str = "gpu:0",
  19. batch_size: int = 1,
  20. output_dir: str = "./output") -> List[Dict[str, Any]]:
  21. """
  22. 单进程版本的图像处理函数
  23. Args:
  24. image_paths: 图像路径列表
  25. pipeline_name: Pipeline名称
  26. device: 设备字符串,如"gpu:0"或"cpu"
  27. batch_size: 批处理大小
  28. output_dir: 输出目录
  29. Returns:
  30. 处理结果列表
  31. """
  32. # 创建输出目录
  33. output_path = Path(output_dir)
  34. output_path.mkdir(parents=True, exist_ok=True)
  35. print(f"Initializing pipeline '{pipeline_name}' on device '{device}'...")
  36. try:
  37. # 初始化pipeline
  38. pipeline = create_pipeline(pipeline_name, device=device)
  39. print(f"Pipeline initialized successfully on {device}")
  40. except Exception as e:
  41. print(f"Failed to initialize pipeline: {e}", file=sys.stderr)
  42. traceback.print_exc()
  43. return []
  44. all_results = []
  45. total_images = len(image_paths)
  46. print(f"Processing {total_images} images with batch size {batch_size}")
  47. # 使用tqdm显示进度
  48. with tqdm(total=total_images, desc="Processing images", unit="img") as pbar:
  49. # 按批次处理图像
  50. for i in range(0, total_images, batch_size):
  51. batch = image_paths[i:i + batch_size]
  52. batch_start_time = time.time()
  53. try:
  54. # 使用pipeline预测
  55. results = pipeline.predict(
  56. batch,
  57. use_doc_orientation_classify=True,
  58. use_doc_unwarping=False,
  59. use_seal_recognition=True,
  60. use_chart_recognition=True,
  61. use_table_recognition=True,
  62. use_formula_recognition=True,
  63. )
  64. batch_processing_time = time.time() - batch_start_time
  65. batch_results = []
  66. # 处理每个结果
  67. for result in results:
  68. try:
  69. input_path = Path(result["input_path"])
  70. # 生成输出文件名
  71. if result.get("page_index") is not None:
  72. output_filename = f"{input_path.stem}_{result['page_index']}"
  73. else:
  74. output_filename = f"{input_path.stem}"
  75. # 保存JSON和Markdown文件
  76. json_output_path = str(Path(output_dir, f"{output_filename}.json"))
  77. md_output_path = str(Path(output_dir, f"{output_filename}.md"))
  78. result.save_to_json(json_output_path)
  79. result.save_to_markdown(md_output_path)
  80. # 记录处理结果
  81. batch_results.append({
  82. "image_path": input_path.name,
  83. "processing_time": batch_processing_time / len(batch), # 平均时间
  84. "success": True,
  85. "device": device,
  86. "output_json": json_output_path,
  87. "output_md": md_output_path
  88. })
  89. except Exception as e:
  90. print(f"Error saving result for {result.get('input_path', 'unknown')}: {e}", file=sys.stderr)
  91. traceback.print_exc()
  92. batch_results.append({
  93. "image_path": Path(result["input_path"]).name,
  94. "processing_time": 0,
  95. "success": False,
  96. "device": device,
  97. "error": str(e)
  98. })
  99. all_results.extend(batch_results)
  100. # 更新进度条
  101. success_count = sum(1 for r in batch_results if r.get('success', False))
  102. pbar.update(len(batch))
  103. pbar.set_postfix({
  104. 'batch_time': f"{batch_processing_time:.2f}s",
  105. 'batch_success': f"{success_count}/{len(batch)}",
  106. 'total_success': f"{sum(1 for r in all_results if r.get('success', False))}/{len(all_results)}"
  107. })
  108. except Exception as e:
  109. print(f"Error processing batch {[Path(p).name for p in batch]}: {e}", file=sys.stderr)
  110. traceback.print_exc()
  111. # 为批次中的所有图像添加错误结果
  112. error_results = []
  113. for img_path in batch:
  114. error_results.append({
  115. "image_path": Path(img_path).name,
  116. "processing_time": 0,
  117. "success": False,
  118. "device": device,
  119. "error": str(e)
  120. })
  121. all_results.extend(error_results)
  122. pbar.update(len(batch))
  123. return all_results
  124. def main():
  125. """主函数"""
  126. parser = argparse.ArgumentParser(description="PaddleX PP-StructureV3 Single Process Processing")
  127. # 参数定义
  128. parser.add_argument("--input_dir", type=str, default="../../OmniDocBench/OpenDataLab___OmniDocBench/images", help="Input directory")
  129. parser.add_argument("--output_dir", type=str, default="./OmniDocBench_Results_Single", help="Output directory")
  130. parser.add_argument("--pipeline", type=str, default="PP-StructureV3", help="Pipeline name")
  131. parser.add_argument("--device", type=str, default="gpu:0", help="Device string (e.g., 'gpu:0', 'cpu')")
  132. parser.add_argument("--batch_size", type=int, default=4, help="Batch size")
  133. parser.add_argument("--input_pattern", type=str, default="*", help="Input file pattern")
  134. parser.add_argument("--test_mode", action="store_true", help="Test mode (process only 20 images)")
  135. args = parser.parse_args()
  136. try:
  137. # 获取图像文件列表
  138. input_dir = Path(args.input_dir).resolve()
  139. output_dir = Path(args.output_dir).resolve()
  140. print(f"Input dir: {input_dir}")
  141. print(f"Output dir: {output_dir}")
  142. if not input_dir.exists():
  143. print(f"Input directory does not exist: {input_dir}")
  144. return 1
  145. # 查找图像文件
  146. image_extensions = ['.jpg', '.jpeg', '.png', '.bmp', '.tiff', '.tif']
  147. image_files = []
  148. for ext in image_extensions:
  149. image_files.extend(list(input_dir.glob(f"*{ext}")))
  150. image_files.extend(list(input_dir.glob(f"*{ext.upper()}")))
  151. if not image_files:
  152. print(f"No image files found in {input_dir}")
  153. return 1
  154. image_files = [str(f) for f in image_files]
  155. print(f"Found {len(image_files)} image files")
  156. if args.test_mode:
  157. image_files = image_files[:20]
  158. print(f"Test mode: processing only {len(image_files)} images")
  159. # 验证设备
  160. if args.device.startswith('gpu'):
  161. try:
  162. import paddle
  163. if not paddle.device.is_compiled_with_cuda():
  164. print("GPU requested but CUDA not available, falling back to CPU")
  165. args.device = "cpu"
  166. else:
  167. gpu_count = paddle.device.cuda.device_count()
  168. device_id = int(args.device.split(':')[1]) if ':' in args.device else 0
  169. if device_id >= gpu_count:
  170. print(f"GPU {device_id} not available (only {gpu_count} GPUs), falling back to GPU 0")
  171. args.device = "gpu:0"
  172. except Exception as e:
  173. print(f"Error checking GPU availability: {e}, falling back to CPU")
  174. args.device = "cpu"
  175. print(f"Using device: {args.device}")
  176. print(f"Batch size: {args.batch_size}")
  177. # 开始处理
  178. start_time = time.time()
  179. results = process_images_single_process(
  180. image_files,
  181. args.pipeline,
  182. args.device,
  183. args.batch_size,
  184. str(output_dir)
  185. )
  186. total_time = time.time() - start_time
  187. # 统计结果
  188. success_count = sum(1 for r in results if r.get('success', False))
  189. error_count = len(results) - success_count
  190. print(f"\n" + "="*50)
  191. print(f"Processing completed!")
  192. print(f"Total files: {len(image_files)}")
  193. print(f"Successful: {success_count}")
  194. print(f"Failed: {error_count}")
  195. if len(image_files) > 0:
  196. print(f"Success rate: {success_count / len(image_files) * 100:.2f}%")
  197. print(f"Total time: {total_time:.2f} seconds")
  198. if total_time > 0:
  199. print(f"Throughput: {len(image_files) / total_time:.2f} images/second")
  200. # 保存结果统计
  201. stats = {
  202. "total_files": len(image_files),
  203. "success_count": success_count,
  204. "error_count": error_count,
  205. "success_rate": success_count / len(image_files) if len(image_files) > 0 else 0,
  206. "total_time": total_time,
  207. "throughput": len(image_files) / total_time if total_time > 0 else 0,
  208. "batch_size": args.batch_size,
  209. "device": args.device,
  210. "pipeline": args.pipeline
  211. }
  212. # 保存最终结果
  213. output_file = os.path.join(output_dir, f"OmniDocBench_Single_batch{args.batch_size}.json")
  214. final_results = {
  215. "stats": stats,
  216. "results": results
  217. }
  218. with open(output_file, 'w', encoding='utf-8') as f:
  219. json.dump(final_results, f, ensure_ascii=False, indent=2)
  220. print(f"Results saved to: {output_file}")
  221. return 0
  222. except Exception as e:
  223. print(f"Processing failed: {e}", file=sys.stderr)
  224. traceback.print_exc()
  225. return 1
  226. if __name__ == "__main__":
  227. print(f"🚀 启动单进程OCR程序...")
  228. print(f"CUDA_VISIBLE_DEVICES: {os.environ.get('CUDA_VISIBLE_DEVICES', 'Not set')}")
  229. if len(sys.argv) == 1:
  230. # 如果没有命令行参数,使用默认配置运行
  231. print("No command line arguments provided. Running with default configuration...")
  232. # 默认配置
  233. default_config = {
  234. "input_dir": "../../OmniDocBench/OpenDataLab___OmniDocBench/images",
  235. "output_dir": "./OmniDocBench_Results_Single",
  236. "pipeline": "PP-StructureV3",
  237. "device": "gpu:0",
  238. "batch_size": 4,
  239. }
  240. # 构造参数
  241. sys.argv = [sys.argv[0]]
  242. for key, value in default_config.items():
  243. sys.argv.extend([f"--{key}", str(value)])
  244. # 测试模式
  245. sys.argv.append("--test_mode")
  246. sys.exit(main())