data_stardard.py 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600
  1. import os
  2. import time
  3. import asyncio
  4. import io
  5. import csv
  6. import datetime
  7. import httpx
  8. import json
  9. import sqlite3
  10. import re
  11. # --- LangChain Imports ---
  12. from langchain_openai import ChatOpenAI
  13. from langchain_core.prompts import ChatPromptTemplate
  14. from langchain_core.output_parsers import JsonOutputParser
  15. from langchain_core.outputs import Generation
  16. # --- 保持工具类不变 ---
  17. class SafeJsonOutputParser(JsonOutputParser):
  18. def parse_result(self, result, *, partial: bool = False):
  19. if isinstance(result, list) and len(result) > 0:
  20. generation = result[0]
  21. elif isinstance(result, Generation):
  22. generation = result
  23. else:
  24. raise ValueError(f"Unexpected result type: {type(result)}")
  25. text = generation.text
  26. text = re.sub(r"<think>.*?</think>", "", text, flags=re.S).strip()
  27. text = re.sub(r"^```(?:json)?|```$", "", text, flags=re.I | re.M).strip()
  28. match = re.search(r"(\[\s*{.*}\s*\]|\{\s*\".*\"\s*\})", text, flags=re.S)
  29. if not match:
  30. # 兼容:有时候 LLM 可能直接返回 SQL 字符串而不是 JSON,这里做个简单的容错
  31. if "SELECT" in text.upper():
  32. return {"sql": text}
  33. raise ValueError(f"Invalid json output: {text[:200]}")
  34. json_text = match.group(1)
  35. return json.loads(json_text)
  36. class TransactionParserAgent:
  37. def __init__(self, api_key: str, multimodal_api_url: str, base_url: str = "https://api.deepseek.com", model_name: str = "deepseek-chat"):
  38. # 1. 初始化 LangChain ChatOpenAI 客户端
  39. # DeepSeek 完全兼容 OpenAI 接口,使用 ChatOpenAI 是标准做法
  40. print(f"当前使用模型:{model_name}")
  41. self.llm = ChatOpenAI(
  42. model=model_name,
  43. api_key=api_key,
  44. base_url=base_url,
  45. temperature=0.0,
  46. max_retries=3, # LangChain 内置重试机制
  47. # 配置 httpx 客户端以优化超时和连接 (LangChain 允许透传 http_client)
  48. http_client=httpx.Client(
  49. timeout=httpx.Timeout(300.0, read=300.0, connect=300.0),
  50. limits=httpx.Limits(max_keepalive_connections=5, max_connections=10)
  51. )
  52. )
  53. self.multimodal_api_url = multimodal_api_url
  54. self.parser = SafeJsonOutputParser()
  55. # 初始化API调用跟踪
  56. self.api_calls = []
  57. def _validate_and_reconcile(self, parsed_data: list) -> list:
  58. """
  59. 金额校验与余额矫正逻辑:
  60. 1. 正序排列
  61. 2. 寻找锚点 (符合前后余额勾稽关系)
  62. 3. 双向推演
  63. 4. 熔断机制 (40%)
  64. """
  65. if len(parsed_data) < 3:
  66. return parsed_data
  67. sorted_data = [];
  68. # 按照日期和时间正序排列(最早的在第一条)
  69. # 假设 txDate 为 YYYY-MM-DD, txTime 为 HH:mm:ss
  70. def get_sort_key(x):
  71. # 处理时间:空值用最大时间兜底
  72. date = x.get('txDate', '9999-12-31')
  73. time = x.get('txTime', '23:59:59')
  74. # 处理txId:提取数字,异常则返回极大值(倒序时排最后)
  75. try:
  76. txid_num = int(''.join([c for c in x.get('txId', '') if c.isdigit()]))
  77. except:
  78. txid_num = float('inf')
  79. return (date, time, -txid_num)
  80. if parsed_data[0]["txDate"] > parsed_data[len(parsed_data)-1]["txDate"]:
  81. sorted_data = sorted(parsed_data, key=get_sort_key)
  82. def to_float(s):
  83. try:
  84. return round(float(s), 2)
  85. except:
  86. return 0.0
  87. anchor_idx = -1
  88. # 1. 寻找肯定正确的数据行(锚点)
  89. # 逻辑:
  90. # 1. 当前行余额 = 上一行余额 +- 当前行金额
  91. # 当前行余额 = 下一行余额 -+ 下一行金额
  92. for i in range(1, len(sorted_data) - 1):
  93. p = sorted_data[i - 1]
  94. c = sorted_data[i]
  95. n = sorted_data[i + 1]
  96. # 当前行余额
  97. c_bal = to_float(c['txBalance'])
  98. # 计算上一笔推导当前笔
  99. p_bal = to_float(p['txBalance'])
  100. c_amt = to_float(c['txAmount'])
  101. calc_c_bal = round(p_bal + c_amt, 2) if c['txDirection'] == "收入" else round(p_bal - c_amt, 2)
  102. # 计算当前笔推导下一笔
  103. n_amt = to_float(n['txAmount'])
  104. n_bal = to_float(n['txBalance'])
  105. calc_n_bal = round(n_bal - n_amt, 2) if n['txDirection'] == "收入" else round(n_bal + n_amt, 2)
  106. if c_bal == calc_c_bal and c_bal == calc_n_bal:
  107. anchor_idx = i
  108. break
  109. if anchor_idx == -1:
  110. print("⚠️ 无法找到勾稽关系吻合的锚点,判定为不连续流水,跳过矫正。")
  111. return parsed_data
  112. print(f"数据锚点index={anchor_idx}")
  113. # 2. 从锚点开始向上下推演修正
  114. new_data = [item.copy() for item in sorted_data]
  115. fix_count = 0
  116. print(sorted_data)
  117. # 往下推 (未来) 当前行余额 = 下一行余额 -+ 下一行金额
  118. for i in range(anchor_idx + 1, len(new_data)-1):
  119. curr_bal = to_float(new_data[i]['txBalance'])
  120. next_bal = to_float(new_data[i + 1]['txBalance'])
  121. next_amt = to_float(new_data[i + 1]['txAmount'])
  122. expected_bal = round(next_bal - next_amt, 2) if new_data[i+1]['txDirection'] == "收入" else round(next_bal + next_amt, 2)
  123. if abs(to_float(curr_bal) - expected_bal) > 0.01:
  124. print(f"【往下推】行{new_data[i]['txId']}余额为:{curr_bal},计算余额为:{expected_bal}")
  125. new_data[i]['txBalance'] = str(expected_bal)
  126. fix_count += 1
  127. # 往上推 (过去) 当前行余额 = 上一行余额 +- 当前行金额
  128. for i in range(anchor_idx - 1, 0, -1): # 关键:终止条件改为0,避免i=0
  129. curr_bal = to_float(new_data[i]['txBalance'])
  130. pre_bal = to_float(new_data[i - 1]['txBalance'])
  131. curr_amt = to_float(new_data[i]['txAmount'])
  132. expected_bal = round(pre_bal + curr_amt, 2) if new_data[i]['txDirection'] == "收入" else round(pre_bal - curr_amt, 2)
  133. if abs(to_float(curr_bal) - expected_bal) > 0.01:
  134. print(f"【往上推】行{new_data[i]['txId']}余额为:{curr_bal},计算余额为:{expected_bal}")
  135. new_data[i]['txBalance'] = str(expected_bal)
  136. fix_count += 1
  137. # 3. 熔断判定
  138. fix_ratio = fix_count / len(new_data)
  139. if fix_ratio > 0.4:
  140. print(f"⚠️ 修正比例 {fix_ratio:.2%} 超过 40%,怀疑数据非连续,放弃修正。")
  141. return parsed_data
  142. print(f"✅ 余额勾稽校验通过,自动矫正了 {fix_count} 条数据偏差。")
  143. return new_data
  144. async def _invoke_miner_u(self, file_path: str) -> str:
  145. """调用 MinerU 并提取纯行数据 (保持 httpx 调用不变,因为这不是 LLM)"""
  146. miner_start_time = time.perf_counter()
  147. print("\n" + "=" * 40)
  148. print("📌 【步骤1 - 数据提取】 开始执行")
  149. dealRows = 0
  150. try:
  151. # MinerU 是独立服务,继续使用原生 httpx
  152. async with httpx.AsyncClient() as client:
  153. with open(file_path, 'rb') as f:
  154. files = {'file': (os.path.basename(file_path), f)}
  155. data = {'folderId': 'text'}
  156. print("🔄数据提取中...")
  157. response = await client.post(self.multimodal_api_url, files=files, data=data, timeout=120.0)
  158. if response.status_code == 200:
  159. res_json = response.json()
  160. full_md_list = []
  161. for element in res_json.get('convert_json', []):
  162. if 'md' in element:
  163. full_md_list.append(element['md'])
  164. if 'rows' in element:
  165. dealRows += len(element['rows'])
  166. print(f"📊 提取结果:共提取 {dealRows - 1} 条数据")
  167. return "\n\n".join(full_md_list)
  168. return ""
  169. except Exception as e:
  170. print(f"❌ MinerU 调用异常: {e}")
  171. return ""
  172. finally:
  173. print(f"✅ 【步骤1 - 数据提取】 执行完成")
  174. print(f"⏱️ 执行耗时:{time.perf_counter() - miner_start_time:.2f} 秒")
  175. # --- 🆕 核心逻辑:SQLite 转换引擎 ---
  176. def _init_sqlite_db(self, data_rows: list, header_line: str, delimiter='|') -> tuple:
  177. """
  178. 将 Markdown 行数据灌入 SQLite 内存数据库的通用宽表
  179. 返回: (conn, header_mapping_info)
  180. """
  181. # 1. 创建内存数据库
  182. conn = sqlite3.connect(":memory:")
  183. cursor = conn.cursor()
  184. header_fingerprint = "".join(header_line.strip().strip('|').split())
  185. header_added = False # 确保数据库里只进一个表头
  186. # 2. 分析最大列数,建立通用宽表 (row_id, c0, c1, ... c30)
  187. max_cols = 0
  188. parsed_rows = []
  189. # 预处理:清洗 Markdown 分隔符
  190. for row in data_rows:
  191. # 去除首尾的 |
  192. clean_row = row.strip().strip('|')
  193. # A. 过滤掉纯分割线(如 | --- | --- |)
  194. if not re.search(r'[\u4e00-\u9fa5a-zA-Z0-9]', clean_row):
  195. continue
  196. # B. 提取当前行的指纹
  197. current_fingerprint = "".join(clean_row.split())
  198. # C. 核心判断:
  199. if current_fingerprint == header_fingerprint:
  200. if not header_added:
  201. # 只有第一次见到表头指纹时,才放入数据库
  202. header_added = True
  203. else:
  204. # 之后再见到一模一样的表头,直接跳过
  205. continue
  206. # 分割
  207. parts = [p.strip() for p in clean_row.split(delimiter)]
  208. if len(parts) > max_cols:
  209. max_cols = len(parts)
  210. parsed_rows.append(parts)
  211. if max_cols == 0:
  212. return None, None
  213. # 动态建表语句
  214. cols_def = ", ".join([f"c{i} TEXT" for i in range(max_cols)])
  215. create_sql = f"CREATE TABLE temp_raw_data (row_id INTEGER PRIMARY KEY AUTOINCREMENT, {cols_def});"
  216. cursor.execute(create_sql)
  217. # 3. 批量插入数据
  218. insert_sql = f"INSERT INTO temp_raw_data ({', '.join([f'c{i}' for i in range(max_cols)])}) VALUES ({', '.join(['?' for _ in range(max_cols)])})"
  219. # 补全数据(如果某行比最长行短,补None)
  220. final_data = []
  221. for p in parsed_rows:
  222. padding = [None] * (max_cols - len(p))
  223. final_data.append(p + padding)
  224. cursor.executemany(insert_sql, final_data)
  225. conn.commit()
  226. return conn, max_cols
  227. def _get_sql_generation_prompt(self) -> ChatPromptTemplate:
  228. system_template = """
  229. # Role
  230. 你是一个 SQLite 专家。
  231. # Task
  232. 你有一个名为 `temp_raw_data` 的表,里面存储了 OCR 识别后的原始数据。
  233. 表的列名为 `c0`, `c1`, `c2`... `cN`。
  234. 请根据提供的【表头】和【数据样本】,编写一条 SQL 查询语句,将原始列映射为标准输出字段。
  235. # Target Schema (Output Columns)
  236. 你的 SQL 必须 `SELECT` 出以下字段(顺序不能变):
  237. 1. `txId`: 交易流水号。如果原始数据没有,使用 `row_id`。
  238. 2. `txDate`: 交易日期 (格式 YYYY-MM-DD)。
  239. 3. `txTime`: 交易时间 (格式 HH:mm:ss)。如果没有则返回 '00:00:00'。
  240. 4. `txAmount`: 交易金额 (绝对值数字,**必须去除逗号**,转为 REAL/FLOAT)。
  241. 5. `txDirection`: 交易方向 (必须经过逻辑判断输出 '收入' 或 '支出')。
  242. 6. `txBalance`: 余额 (去除逗号)。
  243. 7. `txSummary`: 摘要/用途。
  244. 8. `txCounterparty`: 对方账号/户名。
  245. # Logic Rules (Crucial!)
  246. 1. **Direction Logic**:
  247. - 如果有单独的借/贷列:通常 "借"=`支出`, "贷"=`收入`。
  248. - 如果有单独的收入/支出列:哪一列有值就是哪个方向。
  249. - 如果金额有正负号:负号通常是支出。
  250. - 请使用 SQL 的 `CASE WHEN ... THEN ... ELSE ... END` 语法处理。
  251. 2. **Data Cleaning**:
  252. - 金额字段必须处理千分位逗号:`CAST(REPLACE(c?, ',', '') AS REAL)`
  253. - 日期必须清洗。
  254. # Output JSON Format
  255. ```json
  256. {{
  257. "sql": "SELECT ... FROM temp_raw_data WHERE ..."
  258. }}
  259. """
  260. user_template = """
  261. # Table Info
  262. Max Columns: {max_cols} Generic Column Names: c0, c1, ... c{max_cols_minus_1}
  263. # Data Preview (Header + First 3 Rows)
  264. {data_preview}
  265. # Instruction
  266. 请编写 SQL 语句来提取并清洗数据。 注意:不要包含 Markdown 的 sql 标签,直接返回 JSON。 忽略表头行(通常 row_id = 1 是表头,所以 WHERE row_id > 1)。 """
  267. return ChatPromptTemplate.from_messages([("system", system_template), ("user", user_template)])
  268. async def _generate_transform_sql(self, header_row: str, sample_rows: list, max_cols: int) -> str:
  269. """让 LLM 编写 SQL"""
  270. # 构建预览数据,带上 c0, c1 这种列名提示,方便 LLM 对应
  271. preview_text = ""
  272. # 表头预览
  273. header_parts = [p.strip() for p in header_row.strip().strip('|').split('|')]
  274. header_map = " | ".join([f"c{i}({val})" for i, val in enumerate(header_parts)])
  275. preview_text += f"Mapping Hint: {header_map}\n"
  276. preview_text += "-" * 50 + "\n"
  277. # 数据预览
  278. for row in sample_rows:
  279. preview_text += row + "\n"
  280. prompt_params = {
  281. "max_cols": max_cols,
  282. "max_cols_minus_1": max_cols - 1,
  283. "data_preview": preview_text
  284. }
  285. # 记录API调用开始时间
  286. call_start_time = datetime.datetime.now()
  287. chain = self._get_sql_generation_prompt() | self.llm | self.parser
  288. print(f"🧠 [LLM] 正在生成 SQL 清洗逻辑...")
  289. result = ""
  290. try:
  291. result = await chain.ainvoke(prompt_params)
  292. sql = result.get("sql")
  293. print(f"💡 [LLM] 生成 SQL:\n{sql}")
  294. return sql
  295. except Exception as e:
  296. print(f"❌ SQL 生成失败: {e}")
  297. return ""
  298. finally:
  299. # 记录API调用结束时间
  300. call_end_time = datetime.datetime.now()
  301. # 记录API调用结果 - 简化版:只保存提示词和结果数据
  302. call_id = f"api_llm_数据转换_{'{:.2f}'.format((call_end_time - call_start_time).total_seconds())}"
  303. # 从chain中提取提示词(如果可能)
  304. prompt_content = ""
  305. try:
  306. # 尝试从chain获取最后的消息内容
  307. if hasattr(chain, 'get_prompts'):
  308. prompts = chain.get_prompts()
  309. if prompts:
  310. prompt_content = str(prompts[-1])
  311. else:
  312. # 如果无法获取,构造基本的提示词信息
  313. prompt_content = f"传入数据,max_cols: {max_cols},preview_text: {preview_text}..."
  314. except:
  315. prompt_content = f"传入数据,max_cols: {max_cols},preview_text: {preview_text}..."
  316. api_call_info = {
  317. "call_id": call_id,
  318. "start_time": call_start_time.isoformat(),
  319. "end_time": call_end_time.isoformat(),
  320. "duration": (call_end_time - call_start_time).total_seconds(),
  321. "prompt": prompt_content,
  322. "input_params": {
  323. "max_cols": max_cols,
  324. "max_cols_minus_1": max_cols - 1,
  325. "data_preview": preview_text
  326. },
  327. "llm_result": result
  328. }
  329. self.api_calls.append(api_call_info)
  330. # 保存API结果到文件 (Markdown格式,更易阅读)
  331. # 使用运行ID创建独立的文件夹
  332. run_id = os.environ.get('FLOW_RUN_ID', 'default')
  333. api_results_dir = f"api_results_{run_id}"
  334. os.makedirs(api_results_dir, exist_ok=True)
  335. timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
  336. filename = f"{timestamp}_{call_id}.md"
  337. filepath = os.path.join(api_results_dir, filename)
  338. try:
  339. with open(filepath, 'w', encoding='utf-8') as f:
  340. f.write("# 数据转换结果\n\n")
  341. f.write("## 调用信息\n\n")
  342. f.write(f"- 调用ID: {call_id}\n")
  343. f.write(f"- 开始时间: {call_start_time.isoformat()}\n")
  344. f.write(f"- 结束时间: {call_end_time.isoformat()}\n")
  345. f.write(f"- 执行时长: {(call_end_time - call_start_time).total_seconds():.2f} 秒\n")
  346. f.write("\n## 提示词入参\n\n")
  347. f.write("```\n")
  348. f.write(api_call_info["prompt"])
  349. f.write("\n```\n\n")
  350. f.write("## 输入参数\n\n")
  351. f.write("```json\n")
  352. f.write(json.dumps(api_call_info["input_params"], ensure_ascii=False, indent=2))
  353. f.write("\n```\n\n")
  354. f.write("## LLM返回结果\n\n")
  355. f.write("```json\n")
  356. f.write(json.dumps(api_call_info["llm_result"], ensure_ascii=False, indent=2))
  357. f.write("\n```\n")
  358. print(f"[API_RESULT] 保存API结果文件: {filepath}")
  359. except Exception as e:
  360. print(f"[ERROR] 保存API结果文件失败: {filepath}, 错误: {str(e)}")
  361. async def parse_to_csv(self, file_path: str) -> str:
  362. # 1. 获取 Markdown
  363. md_text = await self._invoke_miner_u(file_path)
  364. if not md_text: return ""
  365. # 记录开始时间(使用time.perf_counter获取高精度时间)
  366. start_time = time.perf_counter()
  367. print("\n" + "=" * 40)
  368. print("📌 【步骤2 - 标准化转换】 开始执行")
  369. # 2. 预处理数据行
  370. raw_lines = md_text.splitlines()
  371. clean_lines = [l.strip() for l in raw_lines if l.strip() and "|" in l]
  372. # 简单判定表头 (包含2个以上关键词)
  373. header_line = ""
  374. header_idx = 0
  375. keywords = ["日期", "金额", "余额", "摘要", "用途", "借", "贷"]
  376. for idx, line in enumerate(clean_lines):
  377. if sum(1 for k in keywords if k in line) >= 2:
  378. header_line = line
  379. header_idx = idx
  380. break
  381. if not header_line:
  382. header_line = clean_lines[0]
  383. # 数据行 (保留原始数据,之后灌入 DB)
  384. data_rows = clean_lines # 把表头也灌进去,通过 row_id > header_idx + 1 来过滤
  385. # 3. 灌入 SQLite
  386. conn, max_cols = self._init_sqlite_db(data_rows,header_line)
  387. if not conn:
  388. return ""
  389. try:
  390. # 4. LLM 生成 SQL
  391. # 取表头和前3条数据作为样本
  392. sample_data = clean_lines[header_idx:header_idx + 4]
  393. sql_query = await self._generate_transform_sql(header_line, sample_data, max_cols)
  394. if not sql_query:
  395. return ""
  396. # 5. 执行 SQL
  397. cursor = conn.cursor()
  398. # 为了安全,确保 SQL 只是 SELECT
  399. if "DROP" in sql_query.upper() or "DELETE" in sql_query.upper():
  400. raise ValueError("Unsafe SQL detected")
  401. # 有时候 LLM 忘记过滤表头,我们强制在 SQL 外层或提示中处理
  402. # 这里的简单做法是假设 SQL 正确,或者在 SQL 后追加 limit 测试
  403. print(f"🚀 [SQLite] 执行查询...")
  404. cursor.execute(sql_query)
  405. results = cursor.fetchall()
  406. print(f"✅ 提取成功,共 {len(results)} 条数据")
  407. # 6. 导出为 CSV 字符串
  408. output = io.StringIO()
  409. writer = csv.writer(output, quoting=csv.QUOTE_MINIMAL, lineterminator='\n')
  410. # 写入标准表头
  411. csv_header = ["txId", "txDate", "txTime", "txAmount", "txDirection", "txBalance", "txSummary",
  412. "txCounterparty", "createdAt"]
  413. writer.writerow(csv_header)
  414. created_at = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
  415. for row in results:
  416. # row 是元组 (id, date, time, amt, dir, bal, sum, counter)
  417. # 转换 tuple 为 list 并添加 createdAt
  418. row_list = list(row)
  419. # --- 🆕 新增:txAmount 取绝对值逻辑 ---
  420. try:
  421. raw_amount = str(row_list[3]).replace(',', '') # 再次确保去除逗号
  422. if raw_amount:
  423. # 转换为浮点数取绝对值,再转回字符串(或保持 float)
  424. row_list[3] = abs(float(raw_amount))
  425. except (ValueError, TypeError):
  426. # 如果转换失败(例如识别到了文字),保持原样或设为 0.0
  427. print(f"⚠️ 金额转换失败: {row_list[3]}")
  428. row_list[3] = 0.0
  429. # 安全性清洗:处理可能的 None
  430. row_list = [str(x) if x is not None else "" for x in row_list]
  431. # 确保只取前8个字段 (以防 LLM 多选了)
  432. final_row = row_list[:8] + [created_at]
  433. writer.writerow(final_row)
  434. return output.getvalue()
  435. except sqlite3.Error as e:
  436. print(f"❌ SQLite 执行错误: {e}")
  437. # 可以在这里做一个重试机制:把错误信息返给 LLM 让它修正 SQL
  438. return ""
  439. finally:
  440. conn.close()
  441. print(f"✅ 【步骤2 - 标准化转换】 执行完成")
  442. print(f"⏱️ 总耗时: {time.perf_counter() - start_time:.2f} 秒")
  443. # --- 流程入口 ---
  444. async def parse_and_save_to_file(self, file_path: str, output_dir: str = "output") -> str:
  445. current_script_path = os.path.abspath(__file__)
  446. current_dir = os.path.dirname(current_script_path)
  447. file_full_name = os.path.basename(file_path)
  448. file_name = os.path.splitext(file_full_name)[0]
  449. output_dir = os.path.normpath(os.path.join(current_dir, "..", "..", output_dir))
  450. os.makedirs(output_dir, exist_ok=True)
  451. timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
  452. file_name = f"{file_name}_data_standard_{timestamp}.csv"
  453. full_path = os.path.join(output_dir, file_name)
  454. csv_result = await self.parse_to_csv(file_path)
  455. if csv_result:
  456. with open(full_path, "w", encoding="utf-8") as f:
  457. f.write(csv_result)
  458. return full_path
  459. else:
  460. raise Exception("数据解析失败,未生成有效内容")
  461. async def run_workflow_task(self, input_file_path: str) -> dict:
  462. # 1. 记录开始时间(使用time.perf_counter获取高精度时间)
  463. start_time = time.perf_counter()
  464. print(f"BEGIN---数据标准化任务开始---")
  465. try:
  466. print(f"待执行标准化的文件:{input_file_path}")
  467. saved_path = await self.parse_and_save_to_file(input_file_path, "data_files")
  468. print(f"结果文件保存至:{saved_path}")
  469. return {
  470. "status": "success",
  471. "file_path": saved_path,
  472. "file_name": os.path.basename(saved_path),
  473. "timestamp": datetime.datetime.now().isoformat()
  474. }
  475. except Exception as e:
  476. return {"status": "error", "message": str(e)}
  477. finally:
  478. end_time = time.perf_counter()
  479. elapsed_time = end_time - start_time
  480. print(f"⏱️ 执行总耗时: {elapsed_time:.2f} 秒")
  481. print(f"END---数据标准化任务结束")
  482. async def data_standardize(api_key: str, base_url: str, model_name: str, multimodal_api_url: str, input_file_path: str) -> dict:
  483. """
  484. 数据标准化入口方法
  485. """
  486. # 创建Agent
  487. agent = TransactionParserAgent(
  488. api_key=api_key,
  489. base_url=base_url,
  490. model_name=model_name,
  491. multimodal_api_url=multimodal_api_url
  492. )
  493. # 执行标准化处理
  494. return await agent.run_workflow_task(input_file_path)
  495. # --- 运行 ---
  496. async def main():
  497. agent = TransactionParserAgent(
  498. api_key="",
  499. multimodal_api_url="http://103.154.31.78:20012/api/file/read",
  500. model_name="Qwen3-32B",
  501. base_url="http://10.192.72.12:9996/v1",
  502. )
  503. current_script_path = os.path.abspath(__file__)
  504. current_dir = os.path.dirname(current_script_path)
  505. # 模拟 Workflow 传入一个待处理文件
  506. input_pdf = "data_files/11111.png"
  507. filepath = os.path.normpath(os.path.join(current_dir, "..", "..", input_pdf))
  508. if not os.path.exists(filepath):
  509. print(f"{filepath}文件不存在")
  510. return
  511. result = await agent.run_workflow_task(filepath)
  512. if result["status"] == "success":
  513. print(f"🎯 【数据标准化】任务完成!")
  514. else:
  515. print(f"❌ 任务失败: {result['message']}")
  516. if __name__ == "__main__":
  517. asyncio.run(main())