ppstructurev3_scheduler.py 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380
  1. import json
  2. import time
  3. import os
  4. import argparse
  5. import sys
  6. import subprocess
  7. import tempfile
  8. from pathlib import Path
  9. from typing import List, Dict, Any, Tuple
  10. from concurrent.futures import ProcessPoolExecutor, as_completed
  11. import threading
  12. from queue import Queue
  13. from tqdm import tqdm
  14. def split_files(file_list: List[str], num_splits: int) -> List[List[str]]:
  15. """
  16. 将文件列表分割成指定数量的子列表
  17. Args:
  18. file_list: 文件路径列表
  19. num_splits: 分割数量
  20. Returns:
  21. 分割后的文件列表
  22. """
  23. if num_splits <= 0:
  24. return [file_list]
  25. chunk_size = len(file_list) // num_splits
  26. remainder = len(file_list) % num_splits
  27. chunks = []
  28. start = 0
  29. for i in range(num_splits):
  30. # 前remainder个chunk多分配一个文件
  31. current_chunk_size = chunk_size + (1 if i < remainder else 0)
  32. if current_chunk_size > 0:
  33. chunks.append(file_list[start:start + current_chunk_size])
  34. start += current_chunk_size
  35. return [chunk for chunk in chunks if chunk] # 过滤空列表
  36. def create_temp_file_list(file_chunk: List[str]) -> str:
  37. """
  38. 创建临时文件列表文件
  39. Args:
  40. file_chunk: 文件路径列表
  41. Returns:
  42. 临时文件路径
  43. """
  44. with tempfile.NamedTemporaryFile(mode='w', suffix='.txt', delete=False) as f:
  45. for file_path in file_chunk:
  46. f.write(f"{file_path}\n")
  47. return f.name
  48. def run_single_process(args: Tuple[List[str], Dict[str, Any], int]) -> Dict[str, Any]:
  49. """
  50. 运行单个ppstructurev3_single_process.py进程
  51. Args:
  52. args: (file_chunk, config, process_id)
  53. Returns:
  54. 处理结果
  55. """
  56. file_chunk, config, process_id = args
  57. if not file_chunk:
  58. return {"process_id": process_id, "success": False, "error": "Empty file chunk"}
  59. # 创建临时文件列表
  60. temp_file_list = create_temp_file_list(file_chunk)
  61. try:
  62. # 创建进程专用的输出目录
  63. process_output_dir = Path(config["output_dir"]) / f"process_{process_id}"
  64. process_output_dir.mkdir(parents=True, exist_ok=True)
  65. # 构建命令行参数
  66. cmd = [
  67. sys.executable,
  68. config["single_process_script"],
  69. "--input_file_list", temp_file_list, # 需要修改single_process脚本支持文件列表
  70. "--output_dir", str(process_output_dir),
  71. "--pipeline", config["pipeline"],
  72. "--device", config["device"],
  73. "--batch_size", str(config["batch_size"]),
  74. ]
  75. # 添加可选参数
  76. if config.get("test_mode", False):
  77. cmd.append("--test_mode")
  78. print(f"Process {process_id} starting with {len(file_chunk)} files on device {config['device']}")
  79. # 执行子进程
  80. start_time = time.time()
  81. result = subprocess.run(
  82. cmd,
  83. capture_output=True,
  84. text=True,
  85. timeout=config.get("timeout", 3600) # 1小时超时
  86. )
  87. processing_time = time.time() - start_time
  88. if result.returncode == 0:
  89. print(f"Process {process_id} completed successfully in {processing_time:.2f}s")
  90. # 读取结果文件
  91. result_files = list(process_output_dir.glob("*.json"))
  92. return {
  93. "process_id": process_id,
  94. "success": True,
  95. "processing_time": processing_time,
  96. "file_count": len(file_chunk),
  97. "device": config["device"],
  98. "output_dir": str(process_output_dir),
  99. "result_files": [str(f) for f in result_files],
  100. "stdout": result.stdout,
  101. "stderr": result.stderr
  102. }
  103. else:
  104. print(f"Process {process_id} failed with return code {result.returncode}")
  105. return {
  106. "process_id": process_id,
  107. "success": False,
  108. "error": f"Process failed with return code {result.returncode}",
  109. "stdout": result.stdout,
  110. "stderr": result.stderr
  111. }
  112. except subprocess.TimeoutExpired:
  113. print(f"Process {process_id} timed out")
  114. return {
  115. "process_id": process_id,
  116. "success": False,
  117. "error": "Process timeout"
  118. }
  119. except Exception as e:
  120. print(f"Process {process_id} error: {e}")
  121. return {
  122. "process_id": process_id,
  123. "success": False,
  124. "error": str(e)
  125. }
  126. finally:
  127. # 清理临时文件
  128. try:
  129. os.unlink(temp_file_list)
  130. except:
  131. pass
  132. def monitor_progress(total_files: int, completed_queue: Queue):
  133. """
  134. 监控处理进度
  135. """
  136. with tqdm(total=total_files, desc="Total Progress", unit="files") as pbar:
  137. completed_count = 0
  138. while completed_count < total_files:
  139. try:
  140. batch_count = completed_queue.get(timeout=1)
  141. completed_count += batch_count
  142. pbar.update(batch_count)
  143. except:
  144. continue
  145. def main():
  146. """主函数"""
  147. parser = argparse.ArgumentParser(description="PaddleX PP-StructureV3 Multi-Process Scheduler")
  148. # 输入输出参数
  149. parser.add_argument("--input_dir", type=str, required=True, help="Input directory")
  150. parser.add_argument("--output_dir", type=str, required=True, help="Output directory")
  151. parser.add_argument("--single_process_script", type=str,
  152. default="./ppstructurev3_single_process.py",
  153. help="Path to single process script")
  154. # 并行参数
  155. parser.add_argument("--num_processes", type=int, default=4, help="Number of parallel processes")
  156. parser.add_argument("--devices", type=str, default="gpu:0,gpu:1,gpu:2,gpu:3",
  157. help="Device list (comma separated)")
  158. # Pipeline参数
  159. parser.add_argument("--pipeline", type=str, default="PP-StructureV3", help="Pipeline name")
  160. parser.add_argument("--batch_size", type=int, default=4, help="Batch size per process")
  161. parser.add_argument("--timeout", type=int, default=3600, help="Process timeout in seconds")
  162. # 其他参数
  163. parser.add_argument("--test_mode", action="store_true", help="Test mode")
  164. parser.add_argument("--max_files", type=int, default=None, help="Maximum files to process")
  165. args = parser.parse_args()
  166. try:
  167. # 获取图像文件列表
  168. input_dir = Path(args.input_dir).resolve()
  169. output_dir = Path(args.output_dir).resolve()
  170. print(f"Input dir: {input_dir}")
  171. print(f"Output dir: {output_dir}")
  172. if not input_dir.exists():
  173. print(f"Input directory does not exist: {input_dir}")
  174. return 1
  175. # 查找图像文件
  176. image_extensions = ['.jpg', '.jpeg', '.png', '.bmp', '.tiff', '.tif']
  177. image_files = []
  178. for ext in image_extensions:
  179. image_files.extend(list(input_dir.glob(f"*{ext}")))
  180. image_files.extend(list(input_dir.glob(f"*{ext.upper()}")))
  181. if not image_files:
  182. print(f"No image files found in {input_dir}")
  183. return 1
  184. # 去重并排序
  185. image_files = sorted(list(set(str(f) for f in image_files)))
  186. # 限制文件数量
  187. if args.max_files:
  188. image_files = image_files[:args.max_files]
  189. if args.test_mode:
  190. image_files = image_files[:20]
  191. print(f"Test mode: processing only {len(image_files)} images")
  192. print(f"Found {len(image_files)} image files")
  193. # 解析设备列表
  194. devices = [d.strip() for d in args.devices.split(',')]
  195. if len(devices) < args.num_processes:
  196. # 如果设备数少于进程数,循环使用设备
  197. devices = devices * ((args.num_processes // len(devices)) + 1)
  198. devices = devices[:args.num_processes]
  199. print(f"Using {args.num_processes} processes with devices: {devices}")
  200. # 分割文件列表
  201. file_chunks = split_files(image_files, args.num_processes)
  202. print(f"Split into {len(file_chunks)} chunks: {[len(chunk) for chunk in file_chunks]}")
  203. # 创建输出目录
  204. output_dir.mkdir(parents=True, exist_ok=True)
  205. # 准备进程参数
  206. process_configs = []
  207. for i, (chunk, device) in enumerate(zip(file_chunks, devices)):
  208. config = {
  209. "single_process_script": str(Path(args.single_process_script).resolve()),
  210. "output_dir": str(output_dir),
  211. "pipeline": args.pipeline,
  212. "device": device,
  213. "batch_size": args.batch_size,
  214. "timeout": args.timeout,
  215. "test_mode": args.test_mode
  216. }
  217. process_configs.append((chunk, config, i))
  218. # 启动进度监控
  219. completed_queue = Queue()
  220. progress_thread = threading.Thread(
  221. target=monitor_progress,
  222. args=(len(image_files), completed_queue)
  223. )
  224. progress_thread.daemon = True
  225. progress_thread.start()
  226. # 执行并行处理
  227. start_time = time.time()
  228. results = []
  229. with ProcessPoolExecutor(max_workers=args.num_processes) as executor:
  230. # 提交所有任务
  231. future_to_process = {
  232. executor.submit(run_single_process, config): i
  233. for i, config in enumerate(process_configs)
  234. }
  235. # 收集结果
  236. for future in as_completed(future_to_process):
  237. process_id = future_to_process[future]
  238. try:
  239. result = future.result()
  240. results.append(result)
  241. # 更新进度
  242. if result.get("success", False):
  243. completed_queue.put(result.get("file_count", 0))
  244. print(f"Process {process_id} finished: {result.get('success', False)}")
  245. except Exception as e:
  246. print(f"Process {process_id} generated an exception: {e}")
  247. results.append({
  248. "process_id": process_id,
  249. "success": False,
  250. "error": str(e)
  251. })
  252. total_time = time.time() - start_time
  253. # 统计结果
  254. successful_processes = sum(1 for r in results if r.get('success', False))
  255. total_processed_files = sum(r.get('file_count', 0) for r in results if r.get('success', False))
  256. print(f"\n" + "="*60)
  257. print(f"🎉 Parallel processing completed!")
  258. print(f"📊 Statistics:")
  259. print(f" Total processes: {len(results)}")
  260. print(f" Successful processes: {successful_processes}")
  261. print(f" Total files processed: {total_processed_files}/{len(image_files)}")
  262. print(f" Success rate: {total_processed_files/len(image_files)*100:.2f}%")
  263. print(f"⏱️ Performance:")
  264. print(f" Total time: {total_time:.2f} seconds")
  265. print(f" Throughput: {total_processed_files/total_time:.2f} files/second")
  266. print(f" Avg time per file: {total_time/total_processed_files:.2f} seconds")
  267. # 保存调度结果
  268. scheduler_stats = {
  269. "total_files": len(image_files),
  270. "total_processes": len(results),
  271. "successful_processes": successful_processes,
  272. "total_processed_files": total_processed_files,
  273. "success_rate": total_processed_files / len(image_files) if len(image_files) > 0 else 0,
  274. "total_time": total_time,
  275. "throughput": total_processed_files / total_time if total_time > 0 else 0,
  276. "avg_time_per_file": total_time / total_processed_files if total_processed_files > 0 else 0,
  277. "num_processes": args.num_processes,
  278. "devices": devices,
  279. "batch_size": args.batch_size,
  280. "pipeline": args.pipeline,
  281. "timestamp": time.strftime("%Y-%m-%d %H:%M:%S")
  282. }
  283. final_results = {
  284. "scheduler_stats": scheduler_stats,
  285. "process_results": results
  286. }
  287. # 保存结果
  288. output_file = output_dir / f"scheduler_results_{args.num_processes}procs.json"
  289. with open(output_file, 'w', encoding='utf-8') as f:
  290. json.dump(final_results, f, ensure_ascii=False, indent=2)
  291. print(f"💾 Scheduler results saved to: {output_file}")
  292. return 0 if successful_processes == len(results) else 1
  293. except Exception as e:
  294. print(f"❌ Scheduler failed: {e}")
  295. import traceback
  296. traceback.print_exc()
  297. return 1
  298. if __name__ == "__main__":
  299. print(f"🚀 启动多进程调度程序...")
  300. if len(sys.argv) == 1:
  301. # 默认配置
  302. default_config = {
  303. "input_dir": "../../OmniDocBench/OpenDataLab___OmniDocBench/images",
  304. "output_dir": "./OmniDocBench_Results_Scheduler",
  305. "num_processes": 4,
  306. "devices": "gpu:0,gpu:1,gpu:2,gpu:3",
  307. "batch_size": 2,
  308. }
  309. sys.argv = [sys.argv[0]]
  310. for key, value in default_config.items():
  311. sys.argv.extend([f"--{key}", str(value)])
  312. sys.argv.append("--test_mode")
  313. sys.exit(main())