import os import time import asyncio import io import csv import datetime import httpx import json import sqlite3 import re # --- LangChain Imports --- from langchain_openai import ChatOpenAI from langchain_core.prompts import ChatPromptTemplate from langchain_core.output_parsers import JsonOutputParser from langchain_core.outputs import Generation # --- 保持工具类不变 --- class SafeJsonOutputParser(JsonOutputParser): def parse_result(self, result, *, partial: bool = False): if isinstance(result, list) and len(result) > 0: generation = result[0] elif isinstance(result, Generation): generation = result else: raise ValueError(f"Unexpected result type: {type(result)}") text = generation.text text = re.sub(r".*?", "", text, flags=re.S).strip() text = re.sub(r"^```(?:json)?|```$", "", text, flags=re.I | re.M).strip() match = re.search(r"(\[\s*{.*}\s*\]|\{\s*\".*\"\s*\})", text, flags=re.S) if not match: # 兼容:有时候 LLM 可能直接返回 SQL 字符串而不是 JSON,这里做个简单的容错 if "SELECT" in text.upper(): return {"sql": text} raise ValueError(f"Invalid json output: {text[:200]}") json_text = match.group(1) return json.loads(json_text) class TransactionParserAgent: def __init__(self, api_key: str, multimodal_api_url: str, base_url: str = "https://api.deepseek.com", model_name: str = "deepseek-chat"): # 1. 初始化 LangChain ChatOpenAI 客户端 # DeepSeek 完全兼容 OpenAI 接口,使用 ChatOpenAI 是标准做法 print(f"当前使用模型:{model_name}") self.llm = ChatOpenAI( model=model_name, api_key=api_key, base_url=base_url, temperature=0.0, max_retries=3, # LangChain 内置重试机制 # 配置 httpx 客户端以优化超时和连接 (LangChain 允许透传 http_client) http_client=httpx.Client( timeout=httpx.Timeout(300.0, read=300.0, connect=300.0), limits=httpx.Limits(max_keepalive_connections=5, max_connections=10) ) ) self.multimodal_api_url = multimodal_api_url self.parser = SafeJsonOutputParser() # 初始化API调用跟踪 self.api_calls = [] def _validate_and_reconcile(self, parsed_data: list) -> list: """ 金额校验与余额矫正逻辑: 1. 正序排列 2. 寻找锚点 (符合前后余额勾稽关系) 3. 双向推演 4. 熔断机制 (40%) """ if len(parsed_data) < 3: return parsed_data sorted_data = []; # 按照日期和时间正序排列(最早的在第一条) # 假设 txDate 为 YYYY-MM-DD, txTime 为 HH:mm:ss def get_sort_key(x): # 处理时间:空值用最大时间兜底 date = x.get('txDate', '9999-12-31') time = x.get('txTime', '23:59:59') # 处理txId:提取数字,异常则返回极大值(倒序时排最后) try: txid_num = int(''.join([c for c in x.get('txId', '') if c.isdigit()])) except: txid_num = float('inf') return (date, time, -txid_num) if parsed_data[0]["txDate"] > parsed_data[len(parsed_data)-1]["txDate"]: sorted_data = sorted(parsed_data, key=get_sort_key) def to_float(s): try: return round(float(s), 2) except: return 0.0 anchor_idx = -1 # 1. 寻找肯定正确的数据行(锚点) # 逻辑: # 1. 当前行余额 = 上一行余额 +- 当前行金额 # 当前行余额 = 下一行余额 -+ 下一行金额 for i in range(1, len(sorted_data) - 1): p = sorted_data[i - 1] c = sorted_data[i] n = sorted_data[i + 1] # 当前行余额 c_bal = to_float(c['txBalance']) # 计算上一笔推导当前笔 p_bal = to_float(p['txBalance']) c_amt = to_float(c['txAmount']) calc_c_bal = round(p_bal + c_amt, 2) if c['txDirection'] == "收入" else round(p_bal - c_amt, 2) # 计算当前笔推导下一笔 n_amt = to_float(n['txAmount']) n_bal = to_float(n['txBalance']) calc_n_bal = round(n_bal - n_amt, 2) if n['txDirection'] == "收入" else round(n_bal + n_amt, 2) if c_bal == calc_c_bal and c_bal == calc_n_bal: anchor_idx = i break if anchor_idx == -1: print("⚠️ 无法找到勾稽关系吻合的锚点,判定为不连续流水,跳过矫正。") return parsed_data print(f"数据锚点index={anchor_idx}") # 2. 从锚点开始向上下推演修正 new_data = [item.copy() for item in sorted_data] fix_count = 0 print(sorted_data) # 往下推 (未来) 当前行余额 = 下一行余额 -+ 下一行金额 for i in range(anchor_idx + 1, len(new_data)-1): curr_bal = to_float(new_data[i]['txBalance']) next_bal = to_float(new_data[i + 1]['txBalance']) next_amt = to_float(new_data[i + 1]['txAmount']) expected_bal = round(next_bal - next_amt, 2) if new_data[i+1]['txDirection'] == "收入" else round(next_bal + next_amt, 2) if abs(to_float(curr_bal) - expected_bal) > 0.01: print(f"【往下推】行{new_data[i]['txId']}余额为:{curr_bal},计算余额为:{expected_bal}") new_data[i]['txBalance'] = str(expected_bal) fix_count += 1 # 往上推 (过去) 当前行余额 = 上一行余额 +- 当前行金额 for i in range(anchor_idx - 1, 0, -1): # 关键:终止条件改为0,避免i=0 curr_bal = to_float(new_data[i]['txBalance']) pre_bal = to_float(new_data[i - 1]['txBalance']) curr_amt = to_float(new_data[i]['txAmount']) expected_bal = round(pre_bal + curr_amt, 2) if new_data[i]['txDirection'] == "收入" else round(pre_bal - curr_amt, 2) if abs(to_float(curr_bal) - expected_bal) > 0.01: print(f"【往上推】行{new_data[i]['txId']}余额为:{curr_bal},计算余额为:{expected_bal}") new_data[i]['txBalance'] = str(expected_bal) fix_count += 1 # 3. 熔断判定 fix_ratio = fix_count / len(new_data) if fix_ratio > 0.4: print(f"⚠️ 修正比例 {fix_ratio:.2%} 超过 40%,怀疑数据非连续,放弃修正。") return parsed_data print(f"✅ 余额勾稽校验通过,自动矫正了 {fix_count} 条数据偏差。") return new_data async def _invoke_miner_u(self, file_path: str) -> str: """调用 MinerU 并提取纯行数据 (保持 httpx 调用不变,因为这不是 LLM)""" miner_start_time = time.perf_counter() print("\n" + "=" * 40) print("📌 【步骤1 - 数据提取】 开始执行") dealRows = 0 try: # MinerU 是独立服务,继续使用原生 httpx async with httpx.AsyncClient() as client: with open(file_path, 'rb') as f: files = {'file': (os.path.basename(file_path), f)} data = {'folderId': 'text'} print("🔄数据提取中...") response = await client.post(self.multimodal_api_url, files=files, data=data, timeout=120.0) if response.status_code == 200: res_json = response.json() full_md_list = [] for element in res_json.get('convert_json', []): if 'md' in element: full_md_list.append(element['md']) if 'rows' in element: dealRows += len(element['rows']) print(f"📊 提取结果:共提取 {dealRows - 1} 条数据") return "\n\n".join(full_md_list) return "" except Exception as e: print(f"❌ MinerU 调用异常: {e}") return "" finally: print(f"✅ 【步骤1 - 数据提取】 执行完成") print(f"⏱️ 执行耗时:{time.perf_counter() - miner_start_time:.2f} 秒") # --- 🆕 核心逻辑:SQLite 转换引擎 --- def _init_sqlite_db(self, data_rows: list, header_line: str, delimiter='|') -> tuple: """ 将 Markdown 行数据灌入 SQLite 内存数据库的通用宽表 返回: (conn, header_mapping_info) """ # 1. 创建内存数据库 conn = sqlite3.connect(":memory:") cursor = conn.cursor() header_fingerprint = "".join(header_line.strip().strip('|').split()) header_added = False # 确保数据库里只进一个表头 # 2. 分析最大列数,建立通用宽表 (row_id, c0, c1, ... c30) max_cols = 0 parsed_rows = [] # 预处理:清洗 Markdown 分隔符 for row in data_rows: # 去除首尾的 | clean_row = row.strip().strip('|') # A. 过滤掉纯分割线(如 | --- | --- |) if not re.search(r'[\u4e00-\u9fa5a-zA-Z0-9]', clean_row): continue # B. 提取当前行的指纹 current_fingerprint = "".join(clean_row.split()) # C. 核心判断: if current_fingerprint == header_fingerprint: if not header_added: # 只有第一次见到表头指纹时,才放入数据库 header_added = True else: # 之后再见到一模一样的表头,直接跳过 continue # 分割 parts = [p.strip() for p in clean_row.split(delimiter)] if len(parts) > max_cols: max_cols = len(parts) parsed_rows.append(parts) if max_cols == 0: return None, None # 动态建表语句 cols_def = ", ".join([f"c{i} TEXT" for i in range(max_cols)]) create_sql = f"CREATE TABLE temp_raw_data (row_id INTEGER PRIMARY KEY AUTOINCREMENT, {cols_def});" cursor.execute(create_sql) # 3. 批量插入数据 insert_sql = f"INSERT INTO temp_raw_data ({', '.join([f'c{i}' for i in range(max_cols)])}) VALUES ({', '.join(['?' for _ in range(max_cols)])})" # 补全数据(如果某行比最长行短,补None) final_data = [] for p in parsed_rows: padding = [None] * (max_cols - len(p)) final_data.append(p + padding) cursor.executemany(insert_sql, final_data) conn.commit() return conn, max_cols def _get_sql_generation_prompt(self) -> ChatPromptTemplate: system_template = """ # Role 你是一个 SQLite 专家。 # Task 你有一个名为 `temp_raw_data` 的表,里面存储了 OCR 识别后的原始数据。 表的列名为 `c0`, `c1`, `c2`... `cN`。 请根据提供的【表头】和【数据样本】,编写一条 SQL 查询语句,将原始列映射为标准输出字段。 # Target Schema (Output Columns) 你的 SQL 必须 `SELECT` 出以下字段(顺序不能变): 1. `txId`: 交易流水号。如果原始数据没有,使用 `row_id`。 2. `txDate`: 交易日期 (格式 YYYY-MM-DD)。 3. `txTime`: 交易时间 (格式 HH:mm:ss)。如果没有则返回 '00:00:00'。 4. `txAmount`: 交易金额 (绝对值数字,**必须去除逗号**,转为 REAL/FLOAT)。 5. `txDirection`: 交易方向 (必须经过逻辑判断输出 '收入' 或 '支出')。 6. `txBalance`: 余额 (去除逗号)。 7. `txSummary`: 摘要/用途。 8. `txCounterparty`: 对方账号/户名。 # Logic Rules (Crucial!) 1. **Direction Logic**: - 如果有单独的借/贷列:通常 "借"=`支出`, "贷"=`收入`。 - 如果有单独的收入/支出列:哪一列有值就是哪个方向。 - 如果金额有正负号:负号通常是支出。 - 请使用 SQL 的 `CASE WHEN ... THEN ... ELSE ... END` 语法处理。 2. **Data Cleaning**: - 金额字段必须处理千分位逗号:`CAST(REPLACE(c?, ',', '') AS REAL)` - 日期必须清洗。 # Output JSON Format ```json {{ "sql": "SELECT ... FROM temp_raw_data WHERE ..." }} """ user_template = """ # Table Info Max Columns: {max_cols} Generic Column Names: c0, c1, ... c{max_cols_minus_1} # Data Preview (Header + First 3 Rows) {data_preview} # Instruction 请编写 SQL 语句来提取并清洗数据。 注意:不要包含 Markdown 的 sql 标签,直接返回 JSON。 忽略表头行(通常 row_id = 1 是表头,所以 WHERE row_id > 1)。 """ return ChatPromptTemplate.from_messages([("system", system_template), ("user", user_template)]) async def _generate_transform_sql(self, header_row: str, sample_rows: list, max_cols: int) -> str: """让 LLM 编写 SQL""" # 构建预览数据,带上 c0, c1 这种列名提示,方便 LLM 对应 preview_text = "" # 表头预览 header_parts = [p.strip() for p in header_row.strip().strip('|').split('|')] header_map = " | ".join([f"c{i}({val})" for i, val in enumerate(header_parts)]) preview_text += f"Mapping Hint: {header_map}\n" preview_text += "-" * 50 + "\n" # 数据预览 for row in sample_rows: preview_text += row + "\n" prompt_params = { "max_cols": max_cols, "max_cols_minus_1": max_cols - 1, "data_preview": preview_text } # 记录API调用开始时间 call_start_time = datetime.datetime.now() chain = self._get_sql_generation_prompt() | self.llm | self.parser print(f"🧠 [LLM] 正在生成 SQL 清洗逻辑...") result = "" try: result = await chain.ainvoke(prompt_params) sql = result.get("sql") print(f"💡 [LLM] 生成 SQL:\n{sql}") return sql except Exception as e: print(f"❌ SQL 生成失败: {e}") return "" finally: # 记录API调用结束时间 call_end_time = datetime.datetime.now() # 记录API调用结果 - 简化版:只保存提示词和结果数据 call_id = f"api_llm_数据转换_{'{:.2f}'.format((call_end_time - call_start_time).total_seconds())}" # 从chain中提取提示词(如果可能) prompt_content = "" try: # 尝试从chain获取最后的消息内容 if hasattr(chain, 'get_prompts'): prompts = chain.get_prompts() if prompts: prompt_content = str(prompts[-1]) else: # 如果无法获取,构造基本的提示词信息 prompt_content = f"传入数据,max_cols: {max_cols},preview_text: {preview_text}..." except: prompt_content = f"传入数据,max_cols: {max_cols},preview_text: {preview_text}..." api_call_info = { "call_id": call_id, "start_time": call_start_time.isoformat(), "end_time": call_end_time.isoformat(), "duration": (call_end_time - call_start_time).total_seconds(), "prompt": prompt_content, "input_params": { "max_cols": max_cols, "max_cols_minus_1": max_cols - 1, "data_preview": preview_text }, "llm_result": result } self.api_calls.append(api_call_info) # 保存API结果到文件 (Markdown格式,更易阅读) # 使用运行ID创建独立的文件夹 run_id = os.environ.get('FLOW_RUN_ID', 'default') api_results_dir = f"api_results_{run_id}" os.makedirs(api_results_dir, exist_ok=True) timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S") filename = f"{timestamp}_{call_id}.md" filepath = os.path.join(api_results_dir, filename) try: with open(filepath, 'w', encoding='utf-8') as f: f.write("# 数据转换结果\n\n") f.write("## 调用信息\n\n") f.write(f"- 调用ID: {call_id}\n") f.write(f"- 开始时间: {call_start_time.isoformat()}\n") f.write(f"- 结束时间: {call_end_time.isoformat()}\n") f.write(f"- 执行时长: {(call_end_time - call_start_time).total_seconds():.2f} 秒\n") f.write("\n## 提示词入参\n\n") f.write("```\n") f.write(api_call_info["prompt"]) f.write("\n```\n\n") f.write("## 输入参数\n\n") f.write("```json\n") f.write(json.dumps(api_call_info["input_params"], ensure_ascii=False, indent=2)) f.write("\n```\n\n") f.write("## LLM返回结果\n\n") f.write("```json\n") f.write(json.dumps(api_call_info["llm_result"], ensure_ascii=False, indent=2)) f.write("\n```\n") print(f"[API_RESULT] 保存API结果文件: {filepath}") except Exception as e: print(f"[ERROR] 保存API结果文件失败: {filepath}, 错误: {str(e)}") async def parse_to_csv(self, file_path: str) -> str: # 1. 获取 Markdown md_text = await self._invoke_miner_u(file_path) if not md_text: return "" # 记录开始时间(使用time.perf_counter获取高精度时间) start_time = time.perf_counter() print("\n" + "=" * 40) print("📌 【步骤2 - 标准化转换】 开始执行") # 2. 预处理数据行 raw_lines = md_text.splitlines() clean_lines = [l.strip() for l in raw_lines if l.strip() and "|" in l] # 简单判定表头 (包含2个以上关键词) header_line = "" header_idx = 0 keywords = ["日期", "金额", "余额", "摘要", "用途", "借", "贷"] for idx, line in enumerate(clean_lines): if sum(1 for k in keywords if k in line) >= 2: header_line = line header_idx = idx break if not header_line: header_line = clean_lines[0] # 数据行 (保留原始数据,之后灌入 DB) data_rows = clean_lines # 把表头也灌进去,通过 row_id > header_idx + 1 来过滤 # 3. 灌入 SQLite conn, max_cols = self._init_sqlite_db(data_rows,header_line) if not conn: return "" try: # 4. LLM 生成 SQL # 取表头和前3条数据作为样本 sample_data = clean_lines[header_idx:header_idx + 4] sql_query = await self._generate_transform_sql(header_line, sample_data, max_cols) if not sql_query: return "" # 5. 执行 SQL cursor = conn.cursor() # 为了安全,确保 SQL 只是 SELECT if "DROP" in sql_query.upper() or "DELETE" in sql_query.upper(): raise ValueError("Unsafe SQL detected") # 有时候 LLM 忘记过滤表头,我们强制在 SQL 外层或提示中处理 # 这里的简单做法是假设 SQL 正确,或者在 SQL 后追加 limit 测试 print(f"🚀 [SQLite] 执行查询...") cursor.execute(sql_query) results = cursor.fetchall() print(f"✅ 提取成功,共 {len(results)} 条数据") # 6. 导出为 CSV 字符串 output = io.StringIO() writer = csv.writer(output, quoting=csv.QUOTE_MINIMAL, lineterminator='\n') # 写入标准表头 csv_header = ["txId", "txDate", "txTime", "txAmount", "txDirection", "txBalance", "txSummary", "txCounterparty", "createdAt"] writer.writerow(csv_header) created_at = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") for row in results: # row 是元组 (id, date, time, amt, dir, bal, sum, counter) # 转换 tuple 为 list 并添加 createdAt row_list = list(row) # --- 🆕 新增:txAmount 取绝对值逻辑 --- try: raw_amount = str(row_list[3]).replace(',', '') # 再次确保去除逗号 if raw_amount: # 转换为浮点数取绝对值,再转回字符串(或保持 float) row_list[3] = abs(float(raw_amount)) except (ValueError, TypeError): # 如果转换失败(例如识别到了文字),保持原样或设为 0.0 print(f"⚠️ 金额转换失败: {row_list[3]}") row_list[3] = 0.0 # 安全性清洗:处理可能的 None row_list = [str(x) if x is not None else "" for x in row_list] # 确保只取前8个字段 (以防 LLM 多选了) final_row = row_list[:8] + [created_at] writer.writerow(final_row) return output.getvalue() except sqlite3.Error as e: print(f"❌ SQLite 执行错误: {e}") # 可以在这里做一个重试机制:把错误信息返给 LLM 让它修正 SQL return "" finally: conn.close() print(f"✅ 【步骤2 - 标准化转换】 执行完成") print(f"⏱️ 总耗时: {time.perf_counter() - start_time:.2f} 秒") # --- 流程入口 --- async def parse_and_save_to_file(self, file_path: str, output_dir: str = "output") -> str: current_script_path = os.path.abspath(__file__) current_dir = os.path.dirname(current_script_path) file_full_name = os.path.basename(file_path) file_name = os.path.splitext(file_full_name)[0] output_dir = os.path.normpath(os.path.join(current_dir, "..", "..", output_dir)) os.makedirs(output_dir, exist_ok=True) timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S") file_name = f"{file_name}_data_standard_{timestamp}.csv" full_path = os.path.join(output_dir, file_name) csv_result = await self.parse_to_csv(file_path) if csv_result: with open(full_path, "w", encoding="utf-8") as f: f.write(csv_result) return full_path else: raise Exception("数据解析失败,未生成有效内容") async def run_workflow_task(self, input_file_path: str) -> dict: # 1. 记录开始时间(使用time.perf_counter获取高精度时间) start_time = time.perf_counter() print(f"BEGIN---数据标准化任务开始---") try: print(f"待执行标准化的文件:{input_file_path}") saved_path = await self.parse_and_save_to_file(input_file_path, "data_files") print(f"结果文件保存至:{saved_path}") return { "status": "success", "file_path": saved_path, "file_name": os.path.basename(saved_path), "timestamp": datetime.datetime.now().isoformat() } except Exception as e: return {"status": "error", "message": str(e)} finally: end_time = time.perf_counter() elapsed_time = end_time - start_time print(f"⏱️ 执行总耗时: {elapsed_time:.2f} 秒") print(f"END---数据标准化任务结束") async def data_standardize(api_key: str, base_url: str, model_name: str, multimodal_api_url: str, input_file_path: str) -> dict: """ 数据标准化入口方法 """ # 创建Agent agent = TransactionParserAgent( api_key=api_key, base_url=base_url, model_name=model_name, multimodal_api_url=multimodal_api_url ) # 执行标准化处理 return await agent.run_workflow_task(input_file_path) # --- 运行 --- async def main(): agent = TransactionParserAgent( api_key="", multimodal_api_url="http://103.154.31.78:20012/api/file/read", model_name="Qwen3-32B", base_url="http://10.192.72.12:9996/v1", ) current_script_path = os.path.abspath(__file__) current_dir = os.path.dirname(current_script_path) # 模拟 Workflow 传入一个待处理文件 input_pdf = "data_files/11111.png" filepath = os.path.normpath(os.path.join(current_dir, "..", "..", input_pdf)) if not os.path.exists(filepath): print(f"{filepath}文件不存在") return result = await agent.run_workflow_task(filepath) if result["status"] == "success": print(f"🎯 【数据标准化】任务完成!") else: print(f"❌ 任务失败: {result['message']}") if __name__ == "__main__": asyncio.run(main())