data_stardard.py 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426
  1. import os
  2. import time
  3. import asyncio
  4. import io
  5. import csv
  6. import datetime
  7. import httpx
  8. import json
  9. import uuid
  10. # --- LangChain Imports ---
  11. from langchain_openai import ChatOpenAI
  12. from langchain_core.prompts import ChatPromptTemplate
  13. from langchain_core.output_parsers import JsonOutputParser
  14. from langchain_core.outputs import Generation
  15. import re
  16. class SafeJsonOutputParser(JsonOutputParser):
  17. def parse_result(self, result, *, partial: bool = False):
  18. if isinstance(result, list) and len(result) > 0:
  19. generation = result[0]
  20. elif isinstance(result, Generation):
  21. generation = result
  22. else:
  23. raise ValueError(f"Unexpected result type: {type(result)}")
  24. text = generation.text
  25. # 1️⃣ 去 <think>...</think>
  26. text = re.sub(r"<think>.*?</think>", "", text, flags=re.S).strip()
  27. # 2️⃣ 去 ```json ``` 包裹
  28. text = re.sub(r"^```(?:json)?|```$", "", text, flags=re.I | re.M).strip()
  29. # 3️⃣ ⭐ 只截取 JSON 本体
  30. match = re.search(r"(\[\s*{.*}\s*\]|\{\s*\".*\"\s*\})", text, flags=re.S)
  31. if not match:
  32. raise ValueError(f"Invalid json output after clean: {text[:200]}")
  33. json_text = match.group(1)
  34. return json.loads(json_text)
  35. # --- 核心 Parser ---
  36. class TransactionParserAgent:
  37. def __init__(self, api_key: str, multimodal_api_url: str, base_url: str = "https://api.deepseek.com"):
  38. # 1. 初始化 LangChain ChatOpenAI 客户端
  39. # DeepSeek 完全兼容 OpenAI 接口,使用 ChatOpenAI 是标准做法
  40. self.llm = ChatOpenAI(
  41. model="deepseek-chat",
  42. api_key=api_key,
  43. base_url=base_url,
  44. temperature=0.1,
  45. max_retries=3, # LangChain 内置重试机制
  46. # 配置 httpx 客户端以优化超时和连接 (LangChain 允许透传 http_client)
  47. http_client=httpx.Client(
  48. timeout=httpx.Timeout(300.0, read=300.0, connect=60.0),
  49. limits=httpx.Limits(max_keepalive_connections=5, max_connections=10)
  50. )
  51. )
  52. self.multimodal_api_url = multimodal_api_url
  53. # 定义 JSON 解析器
  54. self.parser = SafeJsonOutputParser()
  55. # 初始化API调用跟踪
  56. self.api_calls = []
  57. async def _invoke_miner_u(self, file_path: str) -> str:
  58. """调用 MinerU 并提取纯行数据 (保持 httpx 调用不变,因为这不是 LLM)"""
  59. miner_start_time = time.perf_counter()
  60. print("\n" + "=" * 40)
  61. print("📌 【步骤1 - 数据提取】 开始执行")
  62. dealRows = 0
  63. try:
  64. # MinerU 是独立服务,继续使用原生 httpx
  65. async with httpx.AsyncClient() as client:
  66. with open(file_path, 'rb') as f:
  67. files = {'file': (os.path.basename(file_path), f)}
  68. data = {'folderId': 'text'}
  69. print("🔄数据提取中...")
  70. response = await client.post(self.multimodal_api_url, files=files, data=data, timeout=120.0)
  71. if response.status_code == 200:
  72. res_json = response.json()
  73. full_md_list = []
  74. for element in res_json.get('convert_json', []):
  75. if 'md' in element:
  76. full_md_list.append(element['md'])
  77. if 'rows' in element:
  78. dealRows+=len(element['rows'])
  79. print(f"📊 提取结果:共提取 {dealRows-1} 条数据")
  80. return "\n\n".join(full_md_list)
  81. return ""
  82. except Exception as e:
  83. print(f"❌ MinerU 调用异常: {e}")
  84. return ""
  85. finally:
  86. print(f"✅ 【步骤1 - 数据提取】 执行完成")
  87. print(f"⏱️ 执行耗时:{ time.perf_counter() - miner_start_time:.2f} 秒")
  88. def _get_csv_prompt_template(self) -> ChatPromptTemplate:
  89. """
  90. 构造 LangChain 的 Prompt 模板
  91. """
  92. system_template = """
  93. # Role
  94. 你是一个高精度的银行账单转换工具。
  95. # Task
  96. 将输入的 Markdown 表格行转换为 JSON 数组。
  97. # Field Rules
  98. 1. txId: 如果输入数据中有交易流水号则直接使用,如果没有,从 T{start_id:04d} 开始递增生成。
  99. 2. txDate: 交易日期,格式为YYYY-MM-DD
  100. 3. txTime: 交易时间,格式为HH:mm:ss (未知填 00:00:00)
  101. 4. txAmount: 交易金额,绝对值数字
  102. 5. txBalance: 交易后余额。浮点数,移除千分位逗号。
  103. 6. txDirection: 交易方向。必须根据以下逻辑判断只输出“收入”或“支出”:
  104. - 若有“借/贷”列:“借”通常为支出,“贷”通常为收入(除非是信用卡,需结合表头)。
  105. - 若有“收入/支出”分列:按列归类。
  106. - 若金额带正负号:"+"为收入,"-"为支出。
  107. - 如果无符号,请结合表头判断。
  108. 7. txSummary: 摘要、用途、业务类型等备注。
  109. 8. txCounterparty: 交易对手方(名称及账号,如有)。
  110. # Constraints
  111. - **强制输出格式**:
  112. 1. 严格返回一个包含对象的 JSON 数组。
  113. 2. 每个对象必须包含上述 8 个字段名作为 Key。
  114. 3. 不要输出任何解释文字或 Markdown 代码块标签。
  115. # Anti-Hallucination Rules
  116. - 不得根据上下文推断任何未在原始数据中明确出现的字段
  117. - 不得计算或猜测余额
  118. - 不得根据常识补全对手方名称
  119. - 若字段缺失,必须返回空字符串 ""
  120. """
  121. user_template = """# Input Data
  122. {chunk_data}
  123. # Output
  124. JSON Array:
  125. """
  126. return ChatPromptTemplate.from_messages([
  127. ("system", system_template),
  128. ("user", user_template)
  129. ])
  130. async def parse_to_csv(self, file_path: str) -> str:
  131. # 1. 获取完整 Markdown 文本并按行切分
  132. md_text = await self._invoke_miner_u(file_path)
  133. if not md_text:
  134. return ""
  135. # 记录开始时间(使用time.perf_counter获取高精度时间)
  136. switch_start_time = time.perf_counter()
  137. print("\n" + "=" * 40)
  138. print("📌 【步骤2 - 标准化转换】 开始执行")
  139. # 初步切分
  140. raw_lines = md_text.splitlines()
  141. # 提取真正的第一行作为基准表头
  142. clean_lines = [l.strip() for l in raw_lines if l.strip()]
  143. if len(clean_lines) < 2: return ""
  144. # --- 【核心改进:动态寻找表头】 ---
  145. table_header = ""
  146. header_index = 0
  147. header_keywords = ["余额", "金额", "账号", "日期", "借/贷", "摘要"]
  148. for idx, line in enumerate(clean_lines):
  149. # 如果某一行包含 2 个以上关键词,且含有 Markdown 表格分隔符 '|'
  150. hit_count = sum(1 for kw in header_keywords if kw in line)
  151. if hit_count >= 2 and "|" in line:
  152. table_header = line
  153. header_index = idx
  154. break
  155. if not table_header:
  156. table_header = clean_lines[0]
  157. header_index = 0
  158. data_rows = []
  159. for line in clean_lines[header_index + 1:]:
  160. if all(c in '|- ' for c in line): continue
  161. if line == table_header: continue
  162. # 过滤掉一些 MinerU 可能在表格末尾产生的页码或无关文字
  163. if "|" not in line: continue
  164. data_rows.append(line)
  165. csv_header = "txId,txDate,txTime,txAmount,txDirection,txBalance,txSummary,txCounterparty,createdAt\n"
  166. csv_content = csv_header
  167. batch_size = 15
  168. global_tx_counter = 1
  169. # 构建 LCEL Chain: Prompt -> LLM -> Parser
  170. chain = self._get_csv_prompt_template() | self.llm | self.parser
  171. # 2. 分块处理
  172. for i in range(0, len(data_rows), batch_size):
  173. chunk = data_rows[i: i + batch_size]
  174. context_chunk = [table_header] + chunk
  175. chunk_str = "\n".join(context_chunk)
  176. # 1. 记录开始时间(使用time.perf_counter获取高精度时间)
  177. start_time = time.perf_counter()
  178. print(f"🔄 正在通过LLM转换批次 {i // batch_size + 1},包含 {len(chunk)} 条数据...")
  179. # print(f"待转换的数据块:\n{chunk_str}")
  180. try:
  181. # --- LangChain 调用 ---
  182. # 使用 ainvoke 异步调用链
  183. # 记录API调用开始时间
  184. call_start_time = datetime.datetime.now()
  185. data_data = await chain.ainvoke({
  186. "start_id": global_tx_counter,
  187. "chunk_data": chunk_str
  188. })
  189. # 记录API调用结束时间
  190. call_end_time = datetime.datetime.now()
  191. # 记录API调用结果 - 简化版:只保存提示词和结果数据
  192. call_id = f"api_llm_数据转换_{'{:.2f}'.format((call_end_time - call_start_time).total_seconds())}"
  193. # 从chain中提取提示词(如果可能)
  194. prompt_content = ""
  195. try:
  196. # 尝试从chain获取最后的消息内容
  197. if hasattr(chain, 'get_prompts'):
  198. prompts = chain.get_prompts()
  199. if prompts:
  200. prompt_content = str(prompts[-1])
  201. else:
  202. # 如果无法获取,构造基本的提示词信息
  203. prompt_content = f"转换批次数据,start_id: {global_tx_counter}, chunk_data: {chunk_str[:200]}..."
  204. except:
  205. prompt_content = f"转换批次数据,start_id: {global_tx_counter}, chunk_data: {chunk_str[:200]}..."
  206. api_call_info = {
  207. "call_id": call_id,
  208. "start_time": call_start_time.isoformat(),
  209. "end_time": call_end_time.isoformat(),
  210. "duration": (call_end_time - call_start_time).total_seconds(),
  211. "prompt": prompt_content,
  212. "input_params": {
  213. "start_id": global_tx_counter,
  214. "chunk_data": chunk_str
  215. },
  216. "llm_result": data_data
  217. }
  218. self.api_calls.append(api_call_info)
  219. # 保存API结果到文件 (Markdown格式,更易阅读)
  220. # 使用运行ID创建独立的文件夹
  221. run_id = os.environ.get('FLOW_RUN_ID', 'default')
  222. api_results_dir = f"api_results_{run_id}"
  223. os.makedirs(api_results_dir, exist_ok=True)
  224. timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
  225. filename = f"{timestamp}_{call_id}.md"
  226. filepath = os.path.join(api_results_dir, filename)
  227. try:
  228. with open(filepath, 'w', encoding='utf-8') as f:
  229. f.write("# 数据转换结果\n\n")
  230. f.write("## 调用信息\n\n")
  231. f.write(f"- 调用ID: {call_id}\n")
  232. f.write(f"- 开始时间: {call_start_time.isoformat()}\n")
  233. f.write(f"- 结束时间: {call_end_time.isoformat()}\n")
  234. f.write(f"- 执行时长: {(call_end_time - call_start_time).total_seconds():.2f} 秒\n")
  235. f.write("\n## 提示词入参\n\n")
  236. f.write("```\n")
  237. f.write(api_call_info["prompt"])
  238. f.write("\n```\n\n")
  239. f.write("## 输入参数\n\n")
  240. f.write("```json\n")
  241. f.write(json.dumps(api_call_info["input_params"], ensure_ascii=False, indent=2))
  242. f.write("\n```\n\n")
  243. f.write("## LLM返回结果\n\n")
  244. f.write("```json\n")
  245. f.write(json.dumps(api_call_info["llm_result"], ensure_ascii=False, indent=2))
  246. f.write("\n```\n")
  247. print(f"[API_RESULT] 保存API结果文件: {filepath}")
  248. except Exception as e:
  249. print(f"[ERROR] 保存API结果文件失败: {filepath}, 错误: {str(e)}")
  250. # print(f"💡 LLM 返回数据: {data_data}")
  251. # 兼容处理:LangChain Parser 通常会直接返回 List 或 Dict
  252. if isinstance(data_data, dict):
  253. # 尝试寻找 transactions 键,如果没有则假设整个 dict 就是我们要的对象(虽然罕见)
  254. batch_data = data_data.get("transactions", [data_data])
  255. # 如果取出来还是 dict (例如单条记录),包一层 list
  256. if isinstance(batch_data, dict):
  257. batch_data = [batch_data]
  258. elif isinstance(data_data, list):
  259. batch_data = data_data
  260. else:
  261. batch_data = []
  262. if batch_data:
  263. output = io.StringIO()
  264. createdAtStr = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
  265. writer = csv.writer(output, quoting=csv.QUOTE_MINIMAL, lineterminator='\n')
  266. print(f"✅ 批次转换成功,包含 {len(batch_data)} 条记录。")
  267. for item in batch_data:
  268. writer.writerow([
  269. item.get("txId", ""),
  270. item.get("txDate", ""),
  271. item.get("txTime", ""),
  272. item.get("txAmount", ""),
  273. item.get("txDirection", ""),
  274. item.get("txBalance", ""),
  275. item.get("txSummary", ""),
  276. item.get("txCounterparty", ""),
  277. createdAtStr
  278. ])
  279. batch_csv_string = output.getvalue()
  280. csv_content += batch_csv_string
  281. global_tx_counter += len(batch_data)
  282. except Exception as e:
  283. print(f"⚠️ 批次执行失败: {e}")
  284. finally:
  285. end_time = time.perf_counter()
  286. elapsed_time = end_time - start_time
  287. print(f"⏱️ 执行耗时: {elapsed_time:.2f} 秒")
  288. print(f"📊 转换结果:共转换 {global_tx_counter - 1} 条数据")
  289. print(f"✅ 【步骤2 - 标准化转换】 执行完成")
  290. return csv_content
  291. async def parse_and_save_to_file(self, file_path: str, output_dir: str = "output") -> str:
  292. """
  293. 供 Workflow 调用:解析并保存文件,返回全路径名
  294. """
  295. current_script_path = os.path.abspath(__file__)
  296. current_dir = os.path.dirname(current_script_path)
  297. file_full_name = os.path.basename(file_path)
  298. file_name = os.path.splitext(file_full_name)[0] # 不带后缀 11111
  299. output_dir = os.path.normpath(os.path.join(current_dir, "..", "..", output_dir))
  300. os.makedirs(output_dir, exist_ok=True)
  301. timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
  302. file_name = f"{file_name}_data_standard_{timestamp}.csv"
  303. full_path = os.path.join(output_dir, file_name)
  304. csv_result = await self.parse_to_csv(file_path)
  305. if csv_result:
  306. with open(full_path, "w", encoding="utf-8") as f:
  307. f.write(csv_result)
  308. return full_path
  309. else:
  310. raise Exception("数据解析失败,未生成有效内容")
  311. async def run_workflow_task(self, input_file_path: str) -> dict:
  312. """
  313. 标准 Workflow 入口方法
  314. """
  315. # 1. 记录开始时间(使用time.perf_counter获取高精度时间)
  316. start_time = time.perf_counter()
  317. print(f"BEGIN---数据标准化任务开始---")
  318. try:
  319. print(f"待执行标准化的文件:{input_file_path}")
  320. api_results_dir = "data_files"
  321. saved_path = await self.parse_and_save_to_file(input_file_path, api_results_dir)
  322. return {
  323. "status": "success",
  324. "file_path": saved_path,
  325. "file_name": os.path.basename(saved_path),
  326. "timestamp": datetime.datetime.now().isoformat()
  327. }
  328. except Exception as e:
  329. return {
  330. "status": "error",
  331. "message": str(e)
  332. }
  333. finally:
  334. end_time = time.perf_counter()
  335. elapsed_time = end_time - start_time
  336. print(f"⏱️ 执行总耗时: {elapsed_time:.2f} 秒")
  337. print(f"END---数据标准化任务结束")
  338. async def data_standize(api_key: str, base_url: str, multimodal_api_url: str, input_file_path: str) -> dict:
  339. """
  340. 数据标准化入口方法
  341. """
  342. # 创建Agent
  343. agent = TransactionParserAgent(
  344. api_key="sk-8634dbc2866540c4b6003bb5733f23d8",
  345. base_url=base_url,
  346. multimodal_api_url=multimodal_api_url
  347. )
  348. # 执行标准化处理
  349. return await agent.run_workflow_task(input_file_path)
  350. # --- 运行 ---
  351. async def main():
  352. agent = TransactionParserAgent(
  353. api_key="sk-8634dbc2866540c4b6003bb5733f23d8",
  354. multimodal_api_url="http://103.154.31.78:20012/api/file/read"
  355. )
  356. current_script_path = os.path.abspath(__file__)
  357. current_dir = os.path.dirname(current_script_path)
  358. # 模拟 Workflow 传入一个待处理文件
  359. input_pdf = "data_files/11111.png"
  360. filepath = os.path.normpath(os.path.join(current_dir, "..", "..", input_pdf))
  361. if not os.path.exists(filepath):
  362. print(f"{filepath}文件不存在")
  363. return
  364. result = await agent.run_workflow_task(filepath)
  365. if result["status"] == "success":
  366. print(f"🎯 【数据标准化】任务完成!")
  367. else:
  368. print(f"❌ 任务失败: {result['message']}")
  369. if __name__ == "__main__":
  370. asyncio.run(main())