2 Commity f0a27d4f97 ... dd340c94d0

Autor SHA1 Wiadomość Data
  jiaqiang dd340c94d0 增加LLM配置项 3 dni temu
  jiaqiang b62b95d035 将数据标准化节点纳入planner管理 4 dni temu

+ 15 - 1
llmops/agents/data_stardard.py

@@ -365,7 +365,6 @@ JSON Array:
             api_results_dir = "data_files"
             saved_path = await self.parse_and_save_to_file(input_file_path, api_results_dir)
 
-
             return {
                 "status": "success",
                 "file_path": saved_path,
@@ -383,6 +382,21 @@ JSON Array:
             print(f"⏱️ 执行总耗时: {elapsed_time:.2f} 秒")
             print(f"END---数据标准化任务结束")
 
+
+
+async def data_standize(api_key: str, base_url: str, multimodal_api_url: str, input_file_path: str) -> dict:
+    """
+    数据标准化入口方法
+    """
+    # 创建Agent
+    agent = TransactionParserAgent(
+        api_key="sk-8634dbc2866540c4b6003bb5733f23d8",
+        base_url=base_url,
+        multimodal_api_url=multimodal_api_url
+    )
+    # 执行标准化处理
+    return await agent.run_workflow_task(input_file_path)
+
 # --- 运行 ---
 async def main():
     agent = TransactionParserAgent(

+ 14 - 10
llmops/agents/planning_agent.py

@@ -91,20 +91,23 @@ class PlanningAgent:
             ("system", """
             你是报告规划总控智能体,核心职责是精准分析当前状态并决定下一步行动。
 
-            ### 决策选项(三选一)
-            1. data_classify: 数据未分类打标或分类打标数据集数量为0
-            2. generate_outline:大纲未生成或大纲无效
-            3. compute_metrics:大纲已生成但指标未完成
+            ### 决策选项(四选一)
+            1. data_standardize: 数据未做标准化或数据标准化失败
+            2. data_classify: 数据未分类打标或分类打标数据集数量为0
+            3. generate_outline:大纲未生成或大纲无效
+            4. compute_metrics:大纲已生成但指标未完成
 
             ### 决策规则(按顺序检查)
-            1. 检查 data_set_classified 是否为空 或 数量为0时 → 选择 data_classify
-            2. 检查 outline_draft 是否为空 → 空则选择 generate_outline
-            3. 检查 metrics_requirements 是否为空 → 空则选择 generate_outline
-            4. 检查是否有待计算指标 → 有则选择 compute_metrics
-            5. 所有指标都已计算完成 → 选择 finalize_report
-            6. 如果无法理解需求 → 选择 clarify_requirements
+            1. 检查 is_standardized 是否为0,值是0时 → 选择 data_standardize
+            2. 检查 data_set_classified 是否为空 或 数量为0时 → 选择 data_classify
+            3. 检查 outline_draft 是否为空 → 空则选择 generate_outline
+            4. 检查 metrics_requirements 是否为空 → 空则选择 generate_outline
+            5. 检查是否有待计算指标 → 有则选择 compute_metrics
+            6. 所有指标都已计算完成 → 选择 finalize_report
+            7. 如果无法理解需求 → 选择 clarify_requirements
 
             ### 重要原则
+            - 数据标准化成功后,不要重复处理
             - 数据已分类打标后,不要重复分类打标
             - 大纲草稿已存在时,不要重复生成大纲
             - 决策为 compute_metrics 时,必须从状态信息中的"有效待计算指标ID列表"中选择
@@ -284,6 +287,7 @@ class PlanningAgent:
         return f"""
             当前状态评估:
             - 规划步骤: {state.get('planning_step', 0)}
+            - 数据标准化: {state.get('is_standardized', 0)}
             - 数据分类打标数量: {len(state.get('data_set_classified', 0))}
             - 大纲版本: {state.get('outline_version', 0)}
             - 大纲草稿存在: {state.get('outline_draft') is not None}

+ 10 - 20
llmops/batch_runner_all.py

@@ -47,7 +47,7 @@ os.environ["LANGSMITH_TRACING"] = "false"
 # 根据执行方式选择导入方式
 if __name__ == "__main__":
     # 直接执行文件时,使用绝对导入
-    from llmops.complete_agent_flow_rule import run_complete_agent_flow
+    from llmops.complete_agent_flow_rule import run_complete_agent_flow, run_flow
     from llmops.agents.data_manager import DataManager
 else:
     # 作为模块导入时,使用相对导入
@@ -57,7 +57,7 @@ else:
 import config
 
 # ========== 配置参数 ==========
-RUNS = 10  # 运行次数
+RUNS = 1  # 运行次数
 INDUSTRY = "农业"  # 行业
 ORIGINAL_DATA_FILE="data_files/11111.png"   # 原始测试数据文件PDF
 DATA_FILE = "data_files/交易流水样例数据.csv"  # 数据文件路径
@@ -65,7 +65,7 @@ QUESTION = "请生成一份详细的农业经营贷流水分析报告,需要
 # ==============================
 
 
-async def run_single_flow(run_id: str, question: str, industry: str, data: List[Dict[str, Any]], file_name: str) -> Dict[str, Any]:
+async def run_single_flow(run_id: str, question: str, industry: str, file_name: str) -> Dict[str, Any]:
     """
     运行单个工作流实例
 
@@ -73,7 +73,6 @@ async def run_single_flow(run_id: str, question: str, industry: str, data: List[
         run_id: 运行ID
         question: 用户查询
         industry: 行业
-        data: 数据集
         file_name: 文件名
 
     Returns:
@@ -88,11 +87,10 @@ async def run_single_flow(run_id: str, question: str, industry: str, data: List[
     os.environ['FLOW_RUN_ID'] = run_id
 
     try:
-        result = await run_complete_agent_flow(
+        result = await run_flow(
             question=question,
             industry=industry,
-            data=data,
-            file_name=file_name,
+            original_file_path=file_name,
             api_key=config.DEEPSEEK_API_KEY,
             session_id=f"batch-run-{run_id}"
         )
@@ -185,20 +183,9 @@ async def run_batch(runs: int, question: str, industry: str, data_file: str):
         os.environ['FLOW_RUN_ID'] = run_id
 
         start_time = time.perf_counter()
-        try:
-            # 执行数据标准化
-            standard_file_path = await data_standardize()
-            print(f"标准化后文件:{standard_file_path}")
-            data_file = standard_file_path
-            # 标准化数据加载
-            data = DataManager.load_data_from_csv_file(data_file)
-            print(f"📊 数据加载成功: {len(data)} 条记录")
-        except Exception as e:
-            print(f"❌ 数据加载失败: {e}")
-            return
 
         # 单次执行
-        result = await run_single_flow(run_id, question, industry, data, os.path.basename(data_file))
+        result = await run_single_flow(run_id, question, industry, data_file)
         end_time = time.perf_counter()
         total_time += (end_time - start_time)
 
@@ -243,12 +230,15 @@ def main():
     print(f"❓ 查询: {QUESTION[:50]}...")
     print("-" * 80)
 
+    curr_dir = os.path.dirname(os.path.abspath(__file__))
+    file_path = os.path.join(curr_dir, "..", ORIGINAL_DATA_FILE)
+
     # 运行批量任务
     asyncio.run(run_batch(
         runs=RUNS,
         question=QUESTION,
         industry=INDUSTRY,
-        data_file=ORIGINAL_DATA_FILE
+        data_file=file_path
     ))
 
 

+ 87 - 19
llmops/complete_agent_flow_rule.py

@@ -40,7 +40,7 @@ from llmops.workflow_state import (
     update_state_with_planning_decision,
     update_state_with_data_classified,
     convert_numpy_types,
-
+    update_state_with_data_standardize
 )
 from llmops.agents.outline_agent import  generate_report_outline
 from llmops.agents.planning_agent import  plan_next_action
@@ -48,7 +48,8 @@ from llmops.agents.rules_engine_metric_calculation_agent import RulesEngineMetri
 from llmops.agents.data_manager import DataManager
 import os
 from llmops.agents.data_classify_agent import data_classify
-from llmops.config import DEEPSEEK_API_KEY
+from llmops.config import DEEPSEEK_API_KEY, multimodal_api_url
+from llmops.agents.data_stardard import data_standize
 
 class CompleteAgentFlow:
     """完整的智能体工作流"""
@@ -79,6 +80,7 @@ class CompleteAgentFlow:
         workflow.add_node("outline_generator", self._outline_generator_node)
         workflow.add_node("metric_calculator", self._metric_calculator_node)
         workflow.add_node("data_classify", self._data_classify_node)
+        workflow.add_node("data_standardize", self._data_standardize_node)
 
         # 设置入口点
         workflow.set_entry_point("planning_node")
@@ -91,11 +93,13 @@ class CompleteAgentFlow:
                 "outline_generator": "outline_generator",
                 "metric_calculator": "metric_calculator",
                 "data_classify": "data_classify",
+                "data_standardize": "data_standardize",
                 END: END
             }
         )
 
         # 从各个节点返回规划节点重新决策
+        workflow.add_edge("data_standardize", "planning_node")
         workflow.add_edge("data_classify", "planning_node")
         workflow.add_edge("outline_generator", "planning_node")
         workflow.add_edge("metric_calculator", "planning_node")
@@ -122,6 +126,11 @@ class CompleteAgentFlow:
             print("⚠️ 规划步骤超过30次,强制结束流程")
             return END
 
+        # 数据标准化状态 0 → 数据标准化
+        if state.get("is_standardized", 0) == 0:
+            print("→ 路由到 data_standardize(数据标准化)")
+            return "data_standardize"
+
         # 数据分类打标数量为0 → 分类打标
         if len(state.get("data_set_classified", [])) == 0:
             print("→ 路由到 data_classify(分类打标)")
@@ -240,12 +249,21 @@ class CompleteAgentFlow:
     async def _data_classify_node(self, state: IntegratedWorkflowState) -> IntegratedWorkflowState:
         """数据分类打标节点"""
         try:
+            standardized_file_path = state["standardized_file_path"]
+            file_name = os.path.basename(standardized_file_path)
+
+            # 读取标准化后的数据文件
+            data_set = DataManager.load_data_from_csv_file(standardized_file_path)
+            # 加载测试数据集并展示两条样例
+            print(f"📊 读取标准化数据文件: {file_name}, 加载 {len(data_set)} 条记录")
+            print(f"测试数据样例: {data_set[0:1]}")
+
             print("📝 正在对数据进行分类打标...")
 
             # 对数据进行分类打标
             data_set_classified = await data_classify(
                 industry=state["industry"],
-                data_set=state["data_set"],
+                data_set=data_set,
                 file_name=state["file_name"]
             )
 
@@ -263,6 +281,39 @@ class CompleteAgentFlow:
             return convert_numpy_types(new_state)
 
 
+    async def _data_standardize_node(self, state: IntegratedWorkflowState) -> IntegratedWorkflowState:
+        """数据标准化节点"""
+        try:
+            print("📝 正在对数据进行标准化处理...")
+
+            # 数据标准化处理
+            result = await data_standize(
+                api_key=self.api_key,
+                base_url=self.base_url,
+                multimodal_api_url=multimodal_api_url,
+                input_file_path=state["original_file_path"]
+            )
+            is_succ = 0
+            standardized_file_path = None
+            if result["status"] == "success":  # 数据标准化成功
+                is_succ = 1
+                standardized_file_path = result["file_path"]
+
+            # 更新状态
+            new_state = update_state_with_data_standardize(state, is_succ, standardized_file_path)
+
+            print(f"✅ 数据标准化完成,处理状态: {is_succ},标准化文件路径:{standardized_file_path}")
+
+            return convert_numpy_types(new_state)
+
+        except Exception as e:
+            print(f"❌ 数据标准化失败: {e}")
+            new_state = state.copy()
+            new_state["errors"].append(f"数据标准化错误: {str(e)}")
+            return convert_numpy_types(new_state)
+
+
+
     def _print_ai_selection_analysis(self, outline):
         """打印AI指标选择的推理过程分析 - 完全通用版本"""
         print()
@@ -527,7 +578,7 @@ class CompleteAgentFlow:
         except:
             return "🤔 规划决策已完成"
 
-    async def run_workflow(self, question: str, industry: str, data: List[Dict[str, Any]], file_name: str, session_id: str = None, use_rules_engine_only: bool = False, use_traditional_engine_only: bool = False) -> Dict[str, Any]:
+    async def run_workflow(self, question: str, industry: str, original_file_path: str, session_id: str = None, use_rules_engine_only: bool = False, use_traditional_engine_only: bool = False) -> Dict[str, Any]:
         """
         运行完整的工作流
 
@@ -535,7 +586,7 @@ class CompleteAgentFlow:
             question: 用户查询
             industry: 行业
             data: 数据集
-            file_name: 数据文件名称
+            original_file_path: 原始文件路径
             session_id: 会话ID
             use_rules_engine_only: 是否只使用规则引擎指标计算
             use_traditional_engine_only: 是否只使用传统引擎指标计算
@@ -547,8 +598,8 @@ class CompleteAgentFlow:
             print("🚀 启动完整智能体工作流...")
             print(f"问题:{question}")
             print(f"行业:{industry}")
-            print(f"数据文件:{file_name}")
-            print(f"数据条数:{len(data)}")
+            print(f"数据文件:{original_file_path}")
+            # print(f"数据条数:{len(data)}")
 
             if use_rules_engine_only:
                 print("计算模式:只使用规则引擎")
@@ -558,7 +609,7 @@ class CompleteAgentFlow:
                 print("计算模式:标准模式")
 
             # 创建初始状态
-            initial_state = create_initial_integrated_state(question, industry, data, file_name, session_id)
+            initial_state = create_initial_integrated_state(question, industry, original_file_path, session_id)
 
             # 设置计算模式标记
             if use_rules_engine_only:
@@ -622,6 +673,30 @@ async def run_complete_agent_flow(question: str, industry: str, data: List[Dict[
     return await workflow.run_workflow(question, industry, data, file_name, session_id, use_rules_engine_only, use_traditional_engine_only)
 
 
+
+# 便捷函数
+async def run_flow(question: str, industry: str, original_file_path: str, api_key: str, session_id: str = None, use_rules_engine_only: bool = False, use_traditional_engine_only: bool = False) -> Dict[str, Any]:
+    """
+    运行完整智能体工作流的便捷函数
+
+    Args:
+        question: 用户查询
+        data: 数据集
+        original_file_path: 原始文件路径(pdf/img/csv)
+        api_key: API密钥
+        session_id: 会话ID
+        use_rules_engine_only: 是否只使用规则引擎指标计算
+        use_traditional_engine_only: 是否只使用传统引擎指标计算
+
+    Returns:
+        工作流结果
+    """
+    workflow = CompleteAgentFlow(api_key)
+    return await workflow.run_workflow(question, industry, original_file_path, session_id, use_rules_engine_only, use_traditional_engine_only)
+
+
+
+
 # 主函数用于测试
 async def main():
     """主函数:执行系统测试"""
@@ -641,23 +716,16 @@ async def main():
     # 行业
     industry = "农业"
 
-    # 测试文件
-    file_name = "交易流水样例数据.csv"
+    # 测试文件(pdf/img/csv)
+    file_name = "11111.png"
     curr_dir = os.path.dirname(os.path.abspath(__file__))
     file_path = os.path.join(curr_dir, "..", "data_files", file_name)
 
-    # 加载测试数据集并展示两条样例
-    test_data = DataManager.load_data_from_csv_file(file_path)
-
-    print(f"📊 读取测试数据文件: {file_name} 数据, 加载 {len(test_data)} 条记录")
-    print(f"测试数据样例: {test_data[0:1]}")
-
     # 执行测试
-    result = await run_complete_agent_flow(
+    result = await run_flow(
         question="请生成一份详细的农业经营贷流水分析报告,需要包含:1.总收入和总支出统计 2.收入笔数和支出笔数 3.各类型收入支出占比分析 4.交易对手收入支出TOP3排名 5.按月份的收入支出趋势分析 6.账户数量和交易时间范围统计 7.资金流入流出月度统计等全面指标",
         industry = industry,
-        data=test_data,
-        file_name=file_name,
+        original_file_path=file_path,
         api_key=DEEPSEEK_API_KEY,
         session_id="direct-test"
     )

+ 4 - 0
llmops/config.py

@@ -120,6 +120,10 @@ deepseek_v3_model = {
 }
 
 
+LLM_API_KEY=qwen3_32B_model["api_key"]
+LLM_BASE_URL=qwen3_32B_model["base_url"]
+LLM_MODEL_NAME=qwen3_32B_model["name"]
+
 # 多模型接口地址
 multimodal_api_url = "http://103.154.31.78:20012/api/file/read"
 #multimodal_api_url="http://10.192.72.11:6300/api/file/read"

+ 39 - 5
llmops/workflow_state.py

@@ -91,6 +91,9 @@ class IntegratedWorkflowState(TypedDict):
     transactions_df: Optional[Any]  # 可选的数据框格式
     file_name: str                  # 数据文件名称
     data_set_classified: List[Dict[str, Any]] # 分类打标后的数据集
+    original_file_path: str         # 上传文件绝对路径
+    is_standardized: int            # 数据是否已经标准化 0: 否 1: 是
+    standardized_file_path: str     # 数据标准化的文件路径
 
     # === 意图识别层 (Big Agent原有) ===
     intent_result: Optional[Dict[str, Any]]
@@ -135,15 +138,14 @@ class IntegratedWorkflowState(TypedDict):
 
 # ============= 状态创建和初始化函数 =============
 
-def create_initial_integrated_state(question: str, industry: str, data: List[Dict[str, Any]], file_name: str, session_id: str = None) -> IntegratedWorkflowState:
+def create_initial_integrated_state(question: str, industry: str, original_file_path: str, session_id: str = None) -> IntegratedWorkflowState:
     """
     创建初始的整合状态
 
     Args:
         question: 用户查询
         industry: 行业
-        data: 数据集
-        file_name: 数据文件名称
+        original_file_path: 原始数据文件
         session_id: 会话ID
 
     Returns:
@@ -157,12 +159,14 @@ def create_initial_integrated_state(question: str, industry: str, data: List[Dic
         "user_input": question,
         "question": question,
         "industry": industry,
+        "original_file_path": original_file_path,
 
         # 数据层
-        "data_set": convert_numpy_types(data),
+        "is_standardized": 0,         # 未标准化
+        "data_set": [],
         "data_set_classified": [],    # 分类打标后的数据集
         "transactions_df": None,
-        "file_name": file_name,       # 文件名称
+        "file_name": "",       # 文件名称
 
         # 意图识别层
         "intent_result": None,
@@ -354,4 +358,34 @@ def update_state_with_data_classified(state: IntegratedWorkflowState, data_set_c
         "timestamp": datetime.now().isoformat()
     })
 
+    return new_state
+
+
+def update_state_with_data_standardize(state: IntegratedWorkflowState, is_succ: int, standardized_file_path: str) -> IntegratedWorkflowState:
+    """
+    根据数据标准化结果更新状态
+
+    Args:
+        state: 当前状态
+        is_succ: 是否标准化数据成功 0:否 1:是
+        standardized_file_path: 标准化后的文件路径
+
+    Returns:
+        更新后的状态
+    """
+    import os
+
+    new_state = state.copy()
+    new_state["is_standardized"] = is_succ
+    new_state["standardized_file_path"] = standardized_file_path
+    new_state["file_name"] = os.path.basename(standardized_file_path)
+
+    msg = "成功" if is_succ else "失败"
+    # 添加消息
+    new_state["messages"].append({
+        "role": "assistant",
+        "content": f"✅ 数据标准化完成,处理结果:{msg}",
+        "timestamp": datetime.now().isoformat()
+    })
+
     return new_state