|
|
@@ -1,5 +1,5 @@
|
|
|
import os
|
|
|
-import json
|
|
|
+import time
|
|
|
import asyncio
|
|
|
import io
|
|
|
import csv
|
|
|
@@ -23,9 +23,6 @@ class TransactionParserAgent:
|
|
|
base_url=base_url,
|
|
|
temperature=0.1,
|
|
|
max_retries=3, # LangChain 内置重试机制
|
|
|
- model_kwargs={
|
|
|
- "response_format": {"type": "json_object"} # 强制 JSON 模式
|
|
|
- },
|
|
|
# 配置 httpx 客户端以优化超时和连接 (LangChain 允许透传 http_client)
|
|
|
http_client=httpx.Client(
|
|
|
timeout=httpx.Timeout(300.0, read=300.0, connect=60.0),
|
|
|
@@ -39,27 +36,38 @@ class TransactionParserAgent:
|
|
|
|
|
|
async def _invoke_miner_u(self, file_path: str) -> str:
|
|
|
"""调用 MinerU 并提取纯行数据 (保持 httpx 调用不变,因为这不是 LLM)"""
|
|
|
- print(f"🚀 MinerU 解析中: {os.path.basename(file_path)}")
|
|
|
+ miner_start_time = time.perf_counter()
|
|
|
+ print("\n" + "=" * 40)
|
|
|
+ print("📌 【步骤1 - 数据提取】 开始执行")
|
|
|
+ dealRows = 0
|
|
|
try:
|
|
|
# MinerU 是独立服务,继续使用原生 httpx
|
|
|
async with httpx.AsyncClient() as client:
|
|
|
with open(file_path, 'rb') as f:
|
|
|
files = {'file': (os.path.basename(file_path), f)}
|
|
|
data = {'folderId': 'text'}
|
|
|
+ print("🔄数据提取中...")
|
|
|
response = await client.post(self.multimodal_api_url, files=files, data=data, timeout=120.0)
|
|
|
-
|
|
|
if response.status_code == 200:
|
|
|
res_json = response.json()
|
|
|
full_md_list = []
|
|
|
for element in res_json.get('convert_json', []):
|
|
|
if 'md' in element:
|
|
|
full_md_list.append(element['md'])
|
|
|
+ if 'rows' in element:
|
|
|
+ dealRows+=len(element['rows'])
|
|
|
+ print(f"📊 提取结果:共提取 {dealRows-1} 条数据")
|
|
|
return "\n\n".join(full_md_list)
|
|
|
return ""
|
|
|
except Exception as e:
|
|
|
print(f"❌ MinerU 调用异常: {e}")
|
|
|
return ""
|
|
|
-
|
|
|
+ finally:
|
|
|
+ miner_end_time = time.perf_counter()
|
|
|
+ miner_elapsed_time = miner_end_time - miner_start_time
|
|
|
+ print(f"✅ 【步骤1 - 数据提取】 执行完成")
|
|
|
+ print(f"⏱️ 执行耗时:{miner_elapsed_time:.2f} 秒")
|
|
|
+ print("\n" + "=" * 40)
|
|
|
def _get_csv_prompt_template(self) -> ChatPromptTemplate:
|
|
|
"""
|
|
|
构造 LangChain 的 Prompt 模板
|
|
|
@@ -90,6 +98,12 @@ class TransactionParserAgent:
|
|
|
1. 严格返回一个包含对象的 JSON 数组。
|
|
|
2. 每个对象必须包含上述 8 个字段名作为 Key。
|
|
|
3. 不要输出任何解释文字或 Markdown 代码块标签。
|
|
|
+
|
|
|
+# Anti-Hallucination Rules
|
|
|
+- 不得根据上下文推断任何未在原始数据中明确出现的字段
|
|
|
+- 不得计算或猜测余额
|
|
|
+- 不得根据常识补全对手方名称
|
|
|
+- 若字段缺失,必须返回空字符串 ""
|
|
|
"""
|
|
|
user_template = """# Input Data
|
|
|
{chunk_data}
|
|
|
@@ -107,10 +121,11 @@ JSON Array:
|
|
|
md_text = await self._invoke_miner_u(file_path)
|
|
|
if not md_text:
|
|
|
return ""
|
|
|
-
|
|
|
+ # 记录开始时间(使用time.perf_counter获取高精度时间)
|
|
|
+ switch_start_time = time.perf_counter()
|
|
|
+ print(f"-------【步骤2--标准化转换】开始-------")
|
|
|
# 初步切分
|
|
|
raw_lines = md_text.splitlines()
|
|
|
-
|
|
|
# 提取真正的第一行作为基准表头
|
|
|
clean_lines = [l.strip() for l in raw_lines if l.strip()]
|
|
|
if len(clean_lines) < 2: return ""
|
|
|
@@ -154,8 +169,9 @@ JSON Array:
|
|
|
chunk = data_rows[i: i + batch_size]
|
|
|
context_chunk = [table_header] + chunk
|
|
|
chunk_str = "\n".join(context_chunk)
|
|
|
-
|
|
|
- print(f"🔄 正在转换批次 {i // batch_size + 1},包含 {len(chunk)} 条数据...")
|
|
|
+ # 1. 记录开始时间(使用time.perf_counter获取高精度时间)
|
|
|
+ start_time = time.perf_counter()
|
|
|
+ print(f"🔄 正在通过LLM转换批次 {i // batch_size + 1},包含 {len(chunk)} 条数据...")
|
|
|
# print(f"待转换的数据块:\n{chunk_str}")
|
|
|
try:
|
|
|
# --- LangChain 调用 ---
|
|
|
@@ -206,7 +222,13 @@ JSON Array:
|
|
|
|
|
|
except Exception as e:
|
|
|
print(f"⚠️ 批次执行失败: {e}")
|
|
|
-
|
|
|
+ finally:
|
|
|
+ end_time = time.perf_counter()
|
|
|
+ elapsed_time = end_time - start_time
|
|
|
+ print(f"执行耗时: {elapsed_time:.2f} 秒")
|
|
|
+ switch_end_time = time.perf_counter()
|
|
|
+ switch_elapsed_time = switch_end_time - switch_start_time
|
|
|
+ print(f"-------【步骤2--标准化转换】结束-------共转换{global_tx_counter-1}条数据,总耗时: {switch_elapsed_time:.2f} 秒")
|
|
|
return csv_content
|
|
|
|
|
|
async def parse_and_save_to_file(self, file_path: str, output_dir: str = "output") -> str:
|
|
|
@@ -237,11 +259,15 @@ JSON Array:
|
|
|
"""
|
|
|
标准 Workflow 入口方法
|
|
|
"""
|
|
|
+ # 1. 记录开始时间(使用time.perf_counter获取高精度时间)
|
|
|
+ start_time = time.perf_counter()
|
|
|
+ print(f"BEGIN---数据标准化任务开始---")
|
|
|
try:
|
|
|
print(f"待执行标准化的文件:{input_file_path}")
|
|
|
api_results_dir = "data_files"
|
|
|
saved_path = await self.parse_and_save_to_file(input_file_path, api_results_dir)
|
|
|
|
|
|
+
|
|
|
return {
|
|
|
"status": "success",
|
|
|
"file_path": saved_path,
|
|
|
@@ -253,7 +279,10 @@ JSON Array:
|
|
|
"status": "error",
|
|
|
"message": str(e)
|
|
|
}
|
|
|
-
|
|
|
+ finally:
|
|
|
+ end_time = time.perf_counter()
|
|
|
+ elapsed_time = end_time - start_time
|
|
|
+ print(f"END---数据标准化任务执行总耗时: {elapsed_time:.2f} 秒")
|
|
|
|
|
|
# --- 运行 ---
|
|
|
async def main():
|
|
|
@@ -265,7 +294,7 @@ async def main():
|
|
|
current_script_path = os.path.abspath(__file__)
|
|
|
current_dir = os.path.dirname(current_script_path)
|
|
|
# 模拟 Workflow 传入一个待处理文件
|
|
|
- input_pdf = "data_files/11111.png"
|
|
|
+ input_pdf = "data_files/1.pdf"
|
|
|
filepath = os.path.normpath(os.path.join(current_dir, "..", "..", input_pdf))
|
|
|
|
|
|
if not os.path.exists(filepath):
|
|
|
@@ -276,7 +305,6 @@ async def main():
|
|
|
|
|
|
if result["status"] == "success":
|
|
|
print(f"🎯 【数据标准化】任务完成!")
|
|
|
- print(f"📂 标准化后文件输出位置: {result['file_path']}")
|
|
|
else:
|
|
|
print(f"❌ 任务失败: {result['message']}")
|
|
|
|