Sfoglia il codice sorgente

调整数据标准化模块的输出文件path

chaixuhong 6 giorni fa
parent
commit
f9a63c0185
1 ha cambiato i file con 29 aggiunte e 8 eliminazioni
  1. 29 8
      llmops/agents/data_stardard.py

+ 29 - 8
llmops/agents/data_stardard.py

@@ -115,12 +115,29 @@ JSON Array:
         clean_lines = [l.strip() for l in raw_lines if l.strip()]
         if len(clean_lines) < 2: return ""
 
-        table_header = clean_lines[0]
+        # --- 【核心改进:动态寻找表头】 ---
+        table_header = ""
+        header_index = 0
+
+        header_keywords = ["余额", "金额", "账号", "日期", "借/贷", "摘要"]
+
+        for idx, line in enumerate(clean_lines):
+            # 如果某一行包含 2 个以上关键词,且含有 Markdown 表格分隔符 '|'
+            hit_count = sum(1 for kw in header_keywords if kw in line)
+            if hit_count >= 2 and "|" in line:
+                table_header = line
+                header_index = idx
+                break
+
+        if not table_header:
+            table_header = clean_lines[0]
+            header_index = 0
         data_rows = []
-
-        for line in clean_lines[1:]:
+        for line in clean_lines[header_index + 1:]:
             if all(c in '|- ' for c in line): continue
             if line == table_header: continue
+            # 过滤掉一些 MinerU 可能在表格末尾产生的页码或无关文字
+            if "|" not in line: continue
             data_rows.append(line)
 
         csv_header = "txId,txDate,txTime,txAmount,txDirection,txBalance,txSummary,txCounterparty,createdAt\n"
@@ -139,7 +156,7 @@ JSON Array:
             chunk_str = "\n".join(context_chunk)
 
             print(f"🔄 正在转换批次 {i // batch_size + 1},包含 {len(chunk)} 条数据...")
-            print(f"待转换的数据块:\n{chunk_str}")
+            # print(f"待转换的数据块:\n{chunk_str}")
             try:
                 # --- LangChain 调用 ---
                 # 使用 ainvoke 异步调用链
@@ -198,10 +215,14 @@ JSON Array:
         """
         current_script_path = os.path.abspath(__file__)
         current_dir = os.path.dirname(current_script_path)
-        output_dir = os.path.normpath(os.path.join(current_dir, "..", output_dir))
+
+        file_name = os.path.splitext(file_path)[0]  # 不带后缀 11111
+
+        output_dir = os.path.normpath(os.path.join(current_dir, "..", "..", output_dir))
+
         os.makedirs(output_dir, exist_ok=True)
         timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
-        file_name = f"statement_{timestamp}.csv"
+        file_name = f"{file_name}_data_standard_{timestamp}.csv"
         full_path = os.path.join(output_dir, file_name)
 
         csv_result = await self.parse_to_csv(file_path)
@@ -219,7 +240,7 @@ JSON Array:
         """
         try:
             print(f"传入文件路径:{input_file_path}")
-            api_results_dir = "api_results"
+            api_results_dir = "data_files"
             saved_path = await self.parse_and_save_to_file(input_file_path, api_results_dir)
 
             return {
@@ -245,7 +266,7 @@ async def main():
     current_script_path = os.path.abspath(__file__)
     current_dir = os.path.dirname(current_script_path)
     # 模拟 Workflow 传入一个待处理文件
-    input_pdf = "data_files/1.pdf"
+    input_pdf = "data_files/11111.png"
     filepath = os.path.normpath(os.path.join(current_dir, "..", "..", input_pdf))
 
     if not os.path.exists(filepath):