batch_runner_all.py 7.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243
  1. #!/usr/bin/env python3
  2. """
  3. 批量运行器 - 批量执行Complete Agent Flow
  4. ========================================
  5. 此脚本可以批量运行多次完整的智能体工作流,每次运行会创建独立的日志文件夹。
  6. 使用方法:
  7. python batch_runner.py
  8. 配置参数:
  9. - 运行次数: RUNS = 10
  10. - 行业: INDUSTRY = "农业"
  11. - 数据文件: DATA_FILE = "data_files/交易流水样例数据.csv"
  12. - 查询问题: QUESTION = "请生成一份详细的农业经营贷流水分析报告..."
  13. 文件夹结构:
  14. api_results_1/ # 第一次运行的日志
  15. api_results_2/ # 第二次运行的日志
  16. ...
  17. api_results_10/ # 第十次运行的日志
  18. 作者: Big Agent Team
  19. 版本: 1.0.0
  20. 创建时间: 2024-12-20
  21. """
  22. import asyncio
  23. import os
  24. from datetime import datetime
  25. from typing import List, Dict, Any
  26. import sys
  27. import os
  28. # 添加项目根目录到路径,以便导入config
  29. current_dir = os.path.dirname(os.path.abspath(__file__))
  30. parent_dir = os.path.dirname(current_dir)
  31. sys.path.insert(0, parent_dir)
  32. os.environ["LANGCHAIN_TRACING_V2"] = "false"
  33. os.environ["LANGCHAIN_API_KEY"] = ""
  34. # 禁用 LangGraph 的追踪
  35. os.environ["LANGSMITH_TRACING"] = "false"
  36. # 根据执行方式选择导入方式
  37. if __name__ == "__main__":
  38. # 直接执行文件时,使用绝对导入
  39. from llmops.complete_agent_flow_rule import run_complete_agent_flow, run_flow
  40. from llmops.agents.data_manager import DataManager
  41. else:
  42. # 作为模块导入时,使用相对导入
  43. from .complete_agent_flow_rule import run_complete_agent_flow
  44. from .agents.data_manager import DataManager
  45. import config
  46. # ========== 配置参数 ==========
  47. RUNS = 1 # 运行次数
  48. INDUSTRY = "农业" # 行业
  49. ORIGINAL_DATA_FILE="data_files/11111.png" # 原始测试数据文件PDF
  50. DATA_FILE = "data_files/交易流水样例数据.csv" # 数据文件路径
  51. QUESTION = "请生成一份详细的农业经营贷流水分析报告,需要包含:1.总收入和总支出统计 2.收入笔数和支出笔数 3.各类型收入支出占比分析 4.交易对手收入支出TOP3排名 5.按月份的收入支出趋势分析 6.账户数量和交易时间范围统计 7.资金流入流出月度统计等全面指标" # 分析查询
  52. # ==============================
  53. async def run_single_flow(run_id: str, question: str, industry: str, file_name: str) -> Dict[str, Any]:
  54. """
  55. 运行单个工作流实例
  56. Args:
  57. run_id: 运行ID
  58. question: 用户查询
  59. industry: 行业
  60. file_name: 文件名
  61. Returns:
  62. 运行结果
  63. """
  64. print(f"\n{'='*60}")
  65. print(f"🚀 开始运行 #{run_id}")
  66. print(f"📁 日志文件夹: api_results_{run_id}")
  67. print(f"{'='*60}")
  68. # 设置环境变量,让所有agent使用正确的文件夹
  69. os.environ['FLOW_RUN_ID'] = run_id
  70. from llmops.config import LLM_API_KEY,LLM_BASE_URL, LLM_MODEL_NAME
  71. try:
  72. result = await run_flow(
  73. question=question,
  74. industry=industry,
  75. original_file_path=file_name,
  76. api_key=LLM_API_KEY,
  77. base_url=LLM_BASE_URL,
  78. model_name=LLM_MODEL_NAME,
  79. session_id=f"batch-run-{run_id}"
  80. )
  81. if result.get('success'):
  82. summary = result.get('execution_summary', {})
  83. print(f"✅ 运行 #{run_id} 成功完成")
  84. print(f" 规划步骤: {summary.get('planning_steps', 0)}")
  85. print(f" 指标计算: {summary.get('metrics_computed', 0)}")
  86. else:
  87. print(f"❌ 运行 #{run_id} 失败: {result.get('error', '未知错误')}")
  88. return result
  89. except Exception as e:
  90. print(f"❌ 运行 #{run_id} 发生异常: {e}")
  91. return {
  92. "success": False,
  93. "error": str(e),
  94. "run_id": run_id
  95. }
  96. async def data_standardize():
  97. """
  98. 提取数据,进行标准化处理
  99. """
  100. from llmops.agents.data_stardard import TransactionParserAgent
  101. from llmops.config import DEEPSEEK_API_KEY, multimodal_api_url
  102. # 数据标准化agent
  103. standard_agent = TransactionParserAgent(
  104. api_key=DEEPSEEK_API_KEY,
  105. multimodal_api_url=multimodal_api_url
  106. )
  107. # 执行解析
  108. full_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", ORIGINAL_DATA_FILE)
  109. try:
  110. result = await standard_agent.run_workflow_task(full_path)
  111. if result["status"] == "success":
  112. print(f"🎯 数据标准化任务完成!")
  113. # 标准化后的文件
  114. standard_file_path = result['file_path']
  115. return standard_file_path
  116. else:
  117. print(f"❌ 数据标准化任务失败: {result['message']}")
  118. raise ValueError("数据标准化异常")
  119. except Exception as e:
  120. print(f"数据标准化处理异常:{e}")
  121. raise e
  122. async def run_batch(runs: int, question: str, industry: str, data_file: str):
  123. """
  124. 批量运行工作流
  125. Args:
  126. runs: 运行次数
  127. question: 用户查询
  128. industry: 行业
  129. data_file: 原始数据文件路径,格式支持pdf/img/csv
  130. """
  131. from llmops.config import LLM_MODEL_NAME
  132. print("🚀 批量运行器启动")
  133. print(f"📊 计划运行次数: {runs}")
  134. print(f"🏭 行业: {industry}")
  135. print(f"📁 数据文件: {data_file}")
  136. print(f"❓ 查询: {question}")
  137. print(f"LLM模型名:{LLM_MODEL_NAME}")
  138. print(f"{'='*80}")
  139. # 运行结果统计
  140. successful_runs = 0
  141. failed_runs = 0
  142. results = []
  143. # 运行总时长,秒
  144. total_time = 0
  145. import time
  146. # 逐个运行
  147. for i in range(1, runs + 1):
  148. run_id = str(i)
  149. # 设置环境变量,让所有agent使用正确的文件夹
  150. os.environ['FLOW_RUN_ID'] = run_id
  151. start_time = time.perf_counter()
  152. # 单次执行
  153. result = await run_single_flow(run_id, question, industry, data_file)
  154. end_time = time.perf_counter()
  155. total_time += (end_time - start_time)
  156. results.append(result)
  157. if result.get('success'):
  158. successful_runs += 1
  159. else:
  160. failed_runs += 1
  161. # 添加短暂延迟,避免API调用过于频繁
  162. if i < runs: # 最后一次不需要延迟
  163. await asyncio.sleep(1)
  164. # 输出统计结果
  165. print(f"\n{'='*80}")
  166. print("📊 批量运行完成统计")
  167. print(f"{'='*80}")
  168. print(f"总运行次数: {runs}")
  169. print(f"总运行总用时: {total_time:.2f}秒,单次用时:{total_time/runs:.2f}秒")
  170. print(f"成功次数: {successful_runs}")
  171. print(f"失败次数: {failed_runs}")
  172. print(f"成功率: {successful_runs/runs*100:.1f}%")
  173. # 显示各运行的日志文件夹
  174. print(f"\n📁 日志文件夹列表:")
  175. for i in range(1, runs + 1):
  176. folder_name = f"api_results_{i}"
  177. status = "✅" if results[i-1].get('success') else "❌"
  178. print(f" {status} {folder_name}")
  179. print("\n🎉 批量运行完成!")
  180. print(f"💡 提示: 每次运行的完整日志保存在对应的 api_results_[数字] 文件夹中")
  181. def main():
  182. """主函数"""
  183. print("🚀 使用配置参数运行批量任务")
  184. print(f"📊 运行次数: {RUNS}")
  185. print(f"🏭 行业: {INDUSTRY}")
  186. print(f"📁 数据文件: {ORIGINAL_DATA_FILE}")
  187. print(f"❓ 查询: {QUESTION[:50]}...")
  188. print("-" * 80)
  189. curr_dir = os.path.dirname(os.path.abspath(__file__))
  190. file_path = os.path.join(curr_dir, "..", ORIGINAL_DATA_FILE)
  191. # 运行批量任务
  192. asyncio.run(run_batch(
  193. runs=RUNS,
  194. question=QUESTION,
  195. industry=INDUSTRY,
  196. data_file=file_path
  197. ))
  198. if __name__ == "__main__":
  199. main()