|
|
@@ -40,7 +40,7 @@ from llmops.workflow_state import (
|
|
|
update_state_with_planning_decision,
|
|
|
update_state_with_data_classified,
|
|
|
convert_numpy_types,
|
|
|
-
|
|
|
+ update_state_with_data_standardize
|
|
|
)
|
|
|
from llmops.agents.outline_agent import generate_report_outline
|
|
|
from llmops.agents.planning_agent import plan_next_action
|
|
|
@@ -48,7 +48,8 @@ from llmops.agents.rules_engine_metric_calculation_agent import RulesEngineMetri
|
|
|
from llmops.agents.data_manager import DataManager
|
|
|
import os
|
|
|
from llmops.agents.data_classify_agent import data_classify
|
|
|
-from llmops.config import DEEPSEEK_API_KEY
|
|
|
+from llmops.config import DEEPSEEK_API_KEY, multimodal_api_url
|
|
|
+from llmops.agents.data_stardard import data_standize
|
|
|
|
|
|
class CompleteAgentFlow:
|
|
|
"""完整的智能体工作流"""
|
|
|
@@ -79,6 +80,7 @@ class CompleteAgentFlow:
|
|
|
workflow.add_node("outline_generator", self._outline_generator_node)
|
|
|
workflow.add_node("metric_calculator", self._metric_calculator_node)
|
|
|
workflow.add_node("data_classify", self._data_classify_node)
|
|
|
+ workflow.add_node("data_standardize", self._data_standardize_node)
|
|
|
|
|
|
# 设置入口点
|
|
|
workflow.set_entry_point("planning_node")
|
|
|
@@ -91,11 +93,13 @@ class CompleteAgentFlow:
|
|
|
"outline_generator": "outline_generator",
|
|
|
"metric_calculator": "metric_calculator",
|
|
|
"data_classify": "data_classify",
|
|
|
+ "data_standardize": "data_standardize",
|
|
|
END: END
|
|
|
}
|
|
|
)
|
|
|
|
|
|
# 从各个节点返回规划节点重新决策
|
|
|
+ workflow.add_edge("data_standardize", "planning_node")
|
|
|
workflow.add_edge("data_classify", "planning_node")
|
|
|
workflow.add_edge("outline_generator", "planning_node")
|
|
|
workflow.add_edge("metric_calculator", "planning_node")
|
|
|
@@ -122,6 +126,11 @@ class CompleteAgentFlow:
|
|
|
print("⚠️ 规划步骤超过30次,强制结束流程")
|
|
|
return END
|
|
|
|
|
|
+ # 数据标准化状态 0 → 数据标准化
|
|
|
+ if state.get("is_standardized", 0) == 0:
|
|
|
+ print("→ 路由到 data_standardize(数据标准化)")
|
|
|
+ return "data_standardize"
|
|
|
+
|
|
|
# 数据分类打标数量为0 → 分类打标
|
|
|
if len(state.get("data_set_classified", [])) == 0:
|
|
|
print("→ 路由到 data_classify(分类打标)")
|
|
|
@@ -240,12 +249,21 @@ class CompleteAgentFlow:
|
|
|
async def _data_classify_node(self, state: IntegratedWorkflowState) -> IntegratedWorkflowState:
|
|
|
"""数据分类打标节点"""
|
|
|
try:
|
|
|
+ standardized_file_path = state["standardized_file_path"]
|
|
|
+ file_name = os.path.basename(standardized_file_path)
|
|
|
+
|
|
|
+ # 读取标准化后的数据文件
|
|
|
+ data_set = DataManager.load_data_from_csv_file(standardized_file_path)
|
|
|
+ # 加载测试数据集并展示两条样例
|
|
|
+ print(f"📊 读取标准化数据文件: {file_name}, 加载 {len(data_set)} 条记录")
|
|
|
+ print(f"测试数据样例: {data_set[0:1]}")
|
|
|
+
|
|
|
print("📝 正在对数据进行分类打标...")
|
|
|
|
|
|
# 对数据进行分类打标
|
|
|
data_set_classified = await data_classify(
|
|
|
industry=state["industry"],
|
|
|
- data_set=state["data_set"],
|
|
|
+ data_set=data_set,
|
|
|
file_name=state["file_name"]
|
|
|
)
|
|
|
|
|
|
@@ -263,6 +281,39 @@ class CompleteAgentFlow:
|
|
|
return convert_numpy_types(new_state)
|
|
|
|
|
|
|
|
|
+ async def _data_standardize_node(self, state: IntegratedWorkflowState) -> IntegratedWorkflowState:
|
|
|
+ """数据标准化节点"""
|
|
|
+ try:
|
|
|
+ print("📝 正在对数据进行标准化处理...")
|
|
|
+
|
|
|
+ # 数据标准化处理
|
|
|
+ result = await data_standize(
|
|
|
+ api_key=self.api_key,
|
|
|
+ base_url=self.base_url,
|
|
|
+ multimodal_api_url=multimodal_api_url,
|
|
|
+ input_file_path=state["original_file_path"]
|
|
|
+ )
|
|
|
+ is_succ = 0
|
|
|
+ standardized_file_path = None
|
|
|
+ if result["status"] == "success": # 数据标准化成功
|
|
|
+ is_succ = 1
|
|
|
+ standardized_file_path = result["file_path"]
|
|
|
+
|
|
|
+ # 更新状态
|
|
|
+ new_state = update_state_with_data_standardize(state, is_succ, standardized_file_path)
|
|
|
+
|
|
|
+ print(f"✅ 数据标准化完成,处理状态: {is_succ},标准化文件路径:{standardized_file_path}")
|
|
|
+
|
|
|
+ return convert_numpy_types(new_state)
|
|
|
+
|
|
|
+ except Exception as e:
|
|
|
+ print(f"❌ 数据标准化失败: {e}")
|
|
|
+ new_state = state.copy()
|
|
|
+ new_state["errors"].append(f"数据标准化错误: {str(e)}")
|
|
|
+ return convert_numpy_types(new_state)
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
def _print_ai_selection_analysis(self, outline):
|
|
|
"""打印AI指标选择的推理过程分析 - 完全通用版本"""
|
|
|
print()
|
|
|
@@ -527,7 +578,7 @@ class CompleteAgentFlow:
|
|
|
except:
|
|
|
return "🤔 规划决策已完成"
|
|
|
|
|
|
- async def run_workflow(self, question: str, industry: str, data: List[Dict[str, Any]], file_name: str, session_id: str = None, use_rules_engine_only: bool = False, use_traditional_engine_only: bool = False) -> Dict[str, Any]:
|
|
|
+ async def run_workflow(self, question: str, industry: str, original_file_path: str, session_id: str = None, use_rules_engine_only: bool = False, use_traditional_engine_only: bool = False) -> Dict[str, Any]:
|
|
|
"""
|
|
|
运行完整的工作流
|
|
|
|
|
|
@@ -535,7 +586,7 @@ class CompleteAgentFlow:
|
|
|
question: 用户查询
|
|
|
industry: 行业
|
|
|
data: 数据集
|
|
|
- file_name: 数据文件名称
|
|
|
+ original_file_path: 原始文件路径
|
|
|
session_id: 会话ID
|
|
|
use_rules_engine_only: 是否只使用规则引擎指标计算
|
|
|
use_traditional_engine_only: 是否只使用传统引擎指标计算
|
|
|
@@ -547,8 +598,8 @@ class CompleteAgentFlow:
|
|
|
print("🚀 启动完整智能体工作流...")
|
|
|
print(f"问题:{question}")
|
|
|
print(f"行业:{industry}")
|
|
|
- print(f"数据文件:{file_name}")
|
|
|
- print(f"数据条数:{len(data)}")
|
|
|
+ print(f"数据文件:{original_file_path}")
|
|
|
+ # print(f"数据条数:{len(data)}")
|
|
|
|
|
|
if use_rules_engine_only:
|
|
|
print("计算模式:只使用规则引擎")
|
|
|
@@ -558,7 +609,7 @@ class CompleteAgentFlow:
|
|
|
print("计算模式:标准模式")
|
|
|
|
|
|
# 创建初始状态
|
|
|
- initial_state = create_initial_integrated_state(question, industry, data, file_name, session_id)
|
|
|
+ initial_state = create_initial_integrated_state(question, industry, original_file_path, session_id)
|
|
|
|
|
|
# 设置计算模式标记
|
|
|
if use_rules_engine_only:
|
|
|
@@ -622,6 +673,30 @@ async def run_complete_agent_flow(question: str, industry: str, data: List[Dict[
|
|
|
return await workflow.run_workflow(question, industry, data, file_name, session_id, use_rules_engine_only, use_traditional_engine_only)
|
|
|
|
|
|
|
|
|
+
|
|
|
+# 便捷函数
|
|
|
+async def run_flow(question: str, industry: str, original_file_path: str, api_key: str, session_id: str = None, use_rules_engine_only: bool = False, use_traditional_engine_only: bool = False) -> Dict[str, Any]:
|
|
|
+ """
|
|
|
+ 运行完整智能体工作流的便捷函数
|
|
|
+
|
|
|
+ Args:
|
|
|
+ question: 用户查询
|
|
|
+ data: 数据集
|
|
|
+ original_file_path: 原始文件路径(pdf/img/csv)
|
|
|
+ api_key: API密钥
|
|
|
+ session_id: 会话ID
|
|
|
+ use_rules_engine_only: 是否只使用规则引擎指标计算
|
|
|
+ use_traditional_engine_only: 是否只使用传统引擎指标计算
|
|
|
+
|
|
|
+ Returns:
|
|
|
+ 工作流结果
|
|
|
+ """
|
|
|
+ workflow = CompleteAgentFlow(api_key)
|
|
|
+ return await workflow.run_workflow(question, industry, original_file_path, session_id, use_rules_engine_only, use_traditional_engine_only)
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
# 主函数用于测试
|
|
|
async def main():
|
|
|
"""主函数:执行系统测试"""
|
|
|
@@ -641,23 +716,16 @@ async def main():
|
|
|
# 行业
|
|
|
industry = "农业"
|
|
|
|
|
|
- # 测试文件
|
|
|
- file_name = "交易流水样例数据.csv"
|
|
|
+ # 测试文件(pdf/img/csv)
|
|
|
+ file_name = "11111.png"
|
|
|
curr_dir = os.path.dirname(os.path.abspath(__file__))
|
|
|
file_path = os.path.join(curr_dir, "..", "data_files", file_name)
|
|
|
|
|
|
- # 加载测试数据集并展示两条样例
|
|
|
- test_data = DataManager.load_data_from_csv_file(file_path)
|
|
|
-
|
|
|
- print(f"📊 读取测试数据文件: {file_name} 数据, 加载 {len(test_data)} 条记录")
|
|
|
- print(f"测试数据样例: {test_data[0:1]}")
|
|
|
-
|
|
|
# 执行测试
|
|
|
- result = await run_complete_agent_flow(
|
|
|
+ result = await run_flow(
|
|
|
question="请生成一份详细的农业经营贷流水分析报告,需要包含:1.总收入和总支出统计 2.收入笔数和支出笔数 3.各类型收入支出占比分析 4.交易对手收入支出TOP3排名 5.按月份的收入支出趋势分析 6.账户数量和交易时间范围统计 7.资金流入流出月度统计等全面指标",
|
|
|
industry = industry,
|
|
|
- data=test_data,
|
|
|
- file_name=file_name,
|
|
|
+ original_file_path=file_path,
|
|
|
api_key=DEEPSEEK_API_KEY,
|
|
|
session_id="direct-test"
|
|
|
)
|