|
|
@@ -53,7 +53,7 @@ class TransactionParserAgent:
|
|
|
max_retries=3, # LangChain 内置重试机制
|
|
|
# 配置 httpx 客户端以优化超时和连接 (LangChain 允许透传 http_client)
|
|
|
http_client=httpx.Client(
|
|
|
- timeout=httpx.Timeout(300.0, read=300.0, connect=60.0),
|
|
|
+ timeout=httpx.Timeout(300.0, read=300.0, connect=300.0),
|
|
|
limits=httpx.Limits(max_keepalive_connections=5, max_connections=10)
|
|
|
)
|
|
|
)
|
|
|
@@ -65,6 +65,107 @@ class TransactionParserAgent:
|
|
|
# 初始化API调用跟踪
|
|
|
self.api_calls = []
|
|
|
|
|
|
+
|
|
|
+
|
|
|
+ def _validate_and_reconcile(self, parsed_data: list) -> list:
|
|
|
+ """
|
|
|
+ 金额校验与余额矫正逻辑:
|
|
|
+ 1. 正序排列
|
|
|
+ 2. 寻找锚点 (符合前后余额勾稽关系)
|
|
|
+ 3. 双向推演
|
|
|
+ 4. 熔断机制 (40%)
|
|
|
+ """
|
|
|
+ if len(parsed_data) < 3:
|
|
|
+ return parsed_data
|
|
|
+ sorted_data = [];
|
|
|
+ # 按照日期和时间正序排列(最早的在第一条)
|
|
|
+ # 假设 txDate 为 YYYY-MM-DD, txTime 为 HH:mm:ss
|
|
|
+
|
|
|
+ def get_sort_key(x):
|
|
|
+ # 处理时间:空值用最大时间兜底
|
|
|
+ date = x.get('txDate', '9999-12-31')
|
|
|
+ time = x.get('txTime', '23:59:59')
|
|
|
+ # 处理txId:提取数字,异常则返回极大值(倒序时排最后)
|
|
|
+ try:
|
|
|
+ txid_num = int(''.join([c for c in x.get('txId', '') if c.isdigit()]))
|
|
|
+ except:
|
|
|
+ txid_num = float('inf')
|
|
|
+ return (date, time, -txid_num)
|
|
|
+
|
|
|
+ if parsed_data[0]["txDate"] > parsed_data[len(parsed_data)-1]["txDate"]:
|
|
|
+ sorted_data = sorted(parsed_data, key=get_sort_key)
|
|
|
+ def to_float(s):
|
|
|
+ try:
|
|
|
+ return round(float(s), 2)
|
|
|
+ except:
|
|
|
+ return 0.0
|
|
|
+
|
|
|
+ anchor_idx = -1
|
|
|
+ # 1. 寻找肯定正确的数据行(锚点)
|
|
|
+ # 逻辑:
|
|
|
+ # 1. 当前行余额 = 上一行余额 +- 当前行金额
|
|
|
+ # 当前行余额 = 下一行余额 -+ 下一行金额
|
|
|
+ for i in range(1, len(sorted_data) - 1):
|
|
|
+ p = sorted_data[i - 1]
|
|
|
+ c = sorted_data[i]
|
|
|
+ n = sorted_data[i + 1]
|
|
|
+
|
|
|
+ # 当前行余额
|
|
|
+ c_bal = to_float(c['txBalance'])
|
|
|
+
|
|
|
+ # 计算上一笔推导当前笔
|
|
|
+ p_bal = to_float(p['txBalance'])
|
|
|
+ c_amt = to_float(c['txAmount'])
|
|
|
+ calc_c_bal = round(p_bal + c_amt, 2) if c['txDirection'] == "收入" else round(p_bal - c_amt, 2)
|
|
|
+
|
|
|
+ # 计算当前笔推导下一笔
|
|
|
+ n_amt = to_float(n['txAmount'])
|
|
|
+ n_bal = to_float(n['txBalance'])
|
|
|
+ calc_n_bal = round(n_bal - n_amt, 2) if n['txDirection'] == "收入" else round(n_bal + n_amt, 2)
|
|
|
+ if c_bal == calc_c_bal and c_bal == calc_n_bal:
|
|
|
+ anchor_idx = i
|
|
|
+ break
|
|
|
+
|
|
|
+ if anchor_idx == -1:
|
|
|
+ print("⚠️ 无法找到勾稽关系吻合的锚点,判定为不连续流水,跳过矫正。")
|
|
|
+ return parsed_data
|
|
|
+
|
|
|
+ print(f"数据锚点index={anchor_idx}")
|
|
|
+ # 2. 从锚点开始向上下推演修正
|
|
|
+ new_data = [item.copy() for item in sorted_data]
|
|
|
+ fix_count = 0
|
|
|
+ print(sorted_data)
|
|
|
+ # 往下推 (未来) 当前行余额 = 下一行余额 -+ 下一行金额
|
|
|
+ for i in range(anchor_idx + 1, len(new_data)-1):
|
|
|
+ curr_bal = to_float(new_data[i]['txBalance'])
|
|
|
+ next_bal = to_float(new_data[i + 1]['txBalance'])
|
|
|
+ next_amt = to_float(new_data[i + 1]['txAmount'])
|
|
|
+ expected_bal = round(next_bal - next_amt, 2) if new_data[i+1]['txDirection'] == "收入" else round(next_bal + next_amt, 2)
|
|
|
+ if abs(to_float(curr_bal) - expected_bal) > 0.01:
|
|
|
+ print(f"【往下推】行{new_data[i]['txId']}余额为:{curr_bal},计算余额为:{expected_bal}")
|
|
|
+ new_data[i]['txBalance'] = str(expected_bal)
|
|
|
+ fix_count += 1
|
|
|
+
|
|
|
+ # 往上推 (过去) 当前行余额 = 上一行余额 +- 当前行金额
|
|
|
+ for i in range(anchor_idx - 1, 0, -1): # 关键:终止条件改为0,避免i=0
|
|
|
+ curr_bal = to_float(new_data[i]['txBalance'])
|
|
|
+ pre_bal = to_float(new_data[i - 1]['txBalance'])
|
|
|
+ curr_amt = to_float(new_data[i]['txAmount'])
|
|
|
+ expected_bal = round(pre_bal + curr_amt, 2) if new_data[i]['txDirection'] == "收入" else round(pre_bal - curr_amt, 2)
|
|
|
+ if abs(to_float(curr_bal) - expected_bal) > 0.01:
|
|
|
+ print(f"【往上推】行{new_data[i]['txId']}余额为:{curr_bal},计算余额为:{expected_bal}")
|
|
|
+ new_data[i]['txBalance'] = str(expected_bal)
|
|
|
+ fix_count += 1
|
|
|
+
|
|
|
+ # 3. 熔断判定
|
|
|
+ fix_ratio = fix_count / len(new_data)
|
|
|
+ if fix_ratio > 0.4:
|
|
|
+ print(f"⚠️ 修正比例 {fix_ratio:.2%} 超过 40%,怀疑数据非连续,放弃修正。")
|
|
|
+ return parsed_data
|
|
|
+
|
|
|
+ print(f"✅ 余额勾稽校验通过,自动矫正了 {fix_count} 条数据偏差。")
|
|
|
+ return new_data
|
|
|
+
|
|
|
async def _invoke_miner_u(self, file_path: str) -> str:
|
|
|
"""调用 MinerU 并提取纯行数据 (保持 httpx 调用不变,因为这不是 LLM)"""
|
|
|
miner_start_time = time.perf_counter()
|
|
|
@@ -295,13 +396,15 @@ JSON Array:
|
|
|
batch_data = []
|
|
|
|
|
|
if batch_data:
|
|
|
+ final_list = self._validate_and_reconcile(batch_data)
|
|
|
+ final_list = sorted(final_list, key=lambda x: (x['txId']))
|
|
|
output = io.StringIO()
|
|
|
createdAtStr = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
|
|
writer = csv.writer(output, quoting=csv.QUOTE_MINIMAL, lineterminator='\n')
|
|
|
|
|
|
- print(f"✅ 批次转换成功,包含 {len(batch_data)} 条记录。")
|
|
|
+ print(f"✅ 批次转换成功,包含 {len(final_list)} 条记录。")
|
|
|
|
|
|
- for item in batch_data:
|
|
|
+ for item in final_list:
|
|
|
writer.writerow([
|
|
|
item.get("txId", ""),
|
|
|
item.get("txDate", ""),
|
|
|
@@ -317,7 +420,7 @@ JSON Array:
|
|
|
batch_csv_string = output.getvalue()
|
|
|
csv_content += batch_csv_string
|
|
|
|
|
|
- global_tx_counter += len(batch_data)
|
|
|
+ global_tx_counter += len(final_list)
|
|
|
|
|
|
except Exception as e:
|
|
|
print(f"⚠️ 批次执行失败: {e}")
|
|
|
@@ -327,6 +430,7 @@ JSON Array:
|
|
|
print(f"⏱️ 执行耗时: {elapsed_time:.2f} 秒")
|
|
|
print(f"📊 转换结果:共转换 {global_tx_counter - 1} 条数据")
|
|
|
print(f"✅ 【步骤2 - 标准化转换】 执行完成")
|
|
|
+ print(f"⏱️ 执行总耗时:{time.perf_counter() - switch_start_time:.2f} 秒")
|
|
|
return csv_content
|
|
|
|
|
|
async def parse_and_save_to_file(self, file_path: str, output_dir: str = "output") -> str:
|
|
|
@@ -408,7 +512,7 @@ async def main():
|
|
|
current_script_path = os.path.abspath(__file__)
|
|
|
current_dir = os.path.dirname(current_script_path)
|
|
|
# 模拟 Workflow 传入一个待处理文件
|
|
|
- input_pdf = "data_files/11111.png"
|
|
|
+ input_pdf = "data_files/4.pdf"
|
|
|
filepath = os.path.normpath(os.path.join(current_dir, "..", "..", input_pdf))
|
|
|
|
|
|
if not os.path.exists(filepath):
|