mineru_local_daemon.sh 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396
  1. #!/bin/bash
  2. # filepath: ocr_platform/ocr_tools/daemons/mineru_local_daemon.sh
  3. # 对应: MinerU2.5-Pro 本地 llama-server 服务(macOS),使用 GGUF 格式模型
  4. # 适用于 Mac M4 Pro 48G,使用 Metal GPU 加速
  5. # 模型下载地址: https://huggingface.co/mradermacher/MinerU2.5-Pro-2604-1.2B-GGUF
  6. #
  7. # 首次下载方式(自动下载 Q8_0 量化版本):
  8. # llama-server -hf mradermacher/MinerU2.5-Pro-2604-1.2B-GGUF:Q8_0
  9. # 模型会缓存到 ~/Library/Caches/llama.cpp/,也可手动移至 ~/models/mineru_vl/
  10. # curl -X POST http://localhost:8103/v1/chat/completions -d @payload.json
  11. # ----!!!!! 目前提供的gguf 不是官方发布的最终版本,无法解析表格结构 ----!!!!!
  12. LOGDIR="$HOME/workspace/logs"
  13. mkdir -p $LOGDIR
  14. PIDFILE="$LOGDIR/mineru_llamaserver.pid"
  15. LOGFILE="$LOGDIR/mineru_llamaserver.log"
  16. # 配置参数
  17. CONDA_ENV="mineru2"
  18. PORT="8103"
  19. HOST="0.0.0.0"
  20. # 本地 GGUF 模型路径(llama-server -hf 下载后的实际路径)
  21. HF_CACHE="$HOME/models/hf_home/hub/models--mradermacher--MinerU2.5-Pro-2604-1.2B-GGUF/snapshots/70429e9c728b6a5e904f358a9936c17bd3f5f4b8"
  22. MODEL_PATH="$HF_CACHE/MinerU2.5-Pro-2604-1.2B.Q8_0.gguf"
  23. MMPROJ_PATH="$HF_CACHE/MinerU2.5-Pro-2604-1.2B.mmproj-Q8_0.gguf"
  24. # 模型别名(对外暴露的模型 ID,对应 yaml 中的 model 字段)
  25. MODEL_NAME="MinerU2.5-Pro-2604-1.2B"
  26. # llama-server 参数
  27. # 注意:MinerU2.5-Pro n_ctx_train=8192,设置 8192 即可
  28. CONTEXT_SIZE="8192" # 上下文长度(对齐模型 n_ctx_train=8192)
  29. GPU_LAYERS="99" # Metal GPU 层数(99 表示全部)
  30. THREADS="8" # CPU 线程数(M4 Pro 建议值)
  31. BATCH_SIZE="512" # 批处理大小
  32. UBATCH_SIZE="128" # 微批处理大小
  33. # conda 环境激活
  34. if [ -f "$HOME/anaconda3/etc/profile.d/conda.sh" ]; then
  35. source "$HOME/anaconda3/etc/profile.d/conda.sh"
  36. conda activate $CONDA_ENV
  37. elif [ -f "$HOME/miniconda3/etc/profile.d/conda.sh" ]; then
  38. source "$HOME/miniconda3/etc/profile.d/conda.sh"
  39. conda activate $CONDA_ENV
  40. elif [ -f "/opt/miniconda3/etc/profile.d/conda.sh" ]; then
  41. source /opt/miniconda3/etc/profile.d/conda.sh
  42. conda activate $CONDA_ENV
  43. else
  44. echo "Warning: conda initialization file not found, trying direct path"
  45. export PATH="/opt/miniconda3/envs/$CONDA_ENV/bin:$PATH"
  46. fi
  47. start() {
  48. if [ -f $PIDFILE ] && kill -0 $(cat $PIDFILE) 2>/dev/null; then
  49. echo "MinerU2.5-Pro llama-server 已在运行"
  50. return 1
  51. fi
  52. echo "启动 MinerU2.5-Pro llama-server 守护进程..."
  53. echo "Host: $HOST, Port: $PORT"
  54. echo "主模型: $MODEL_PATH"
  55. echo "多模态投影器: $MMPROJ_PATH"
  56. echo "上下文长度: $CONTEXT_SIZE"
  57. echo "GPU 层数: $GPU_LAYERS (Metal)"
  58. echo "线程数: $THREADS"
  59. # 检查模型文件是否存在
  60. if [ ! -f "$MODEL_PATH" ]; then
  61. echo "❌ 主模型文件不存在: $MODEL_PATH"
  62. echo "请先运行以下命令下载模型:"
  63. echo " llama-server -hf mradermacher/MinerU2.5-Pro-2604-1.2B-GGUF:Q8_0"
  64. echo "下载完成后更新脚本中的 HF_CACHE 路径(快照 hash 可能不同)"
  65. return 1
  66. fi
  67. if [ ! -f "$MMPROJ_PATH" ]; then
  68. echo "❌ 多模态投影器文件不存在: $MMPROJ_PATH"
  69. echo "请确认 mmproj 文件已下载"
  70. return 1
  71. fi
  72. # 检查 llama-server 命令
  73. if ! command -v llama-server >/dev/null 2>&1; then
  74. echo "❌ llama-server 未找到"
  75. echo "请安装: brew install llama.cpp"
  76. return 1
  77. fi
  78. echo "🔧 使用 llama-server: $(which llama-server)"
  79. echo "🔧 llama.cpp 版本: $(llama-server --version 2>&1 | head -1 || echo 'Unknown')"
  80. echo "💻 系统信息:"
  81. echo " 架构: $(uname -m)"
  82. echo " 系统: $(uname -s)"
  83. echo " 内存: $(sysctl -n hw.memsize | awk '{printf "%.1f GB", $1/1024/1024/1024}')"
  84. # 启动 llama-server
  85. # 注意:MinerU2.5-Pro GGUF 内嵌推荐采样参数(top_k=1, top_p=0.001, temp=0.01),
  86. # llama-server 会自动应用,此处只设 --temp 0 确保确定性解码
  87. nohup llama-server \
  88. -m "$MODEL_PATH" \
  89. --mmproj "$MMPROJ_PATH" \
  90. --alias $MODEL_NAME \
  91. --host $HOST \
  92. --port $PORT \
  93. --media-path $HOME/workspace \
  94. -c $CONTEXT_SIZE \
  95. -ngl $GPU_LAYERS \
  96. -t $THREADS \
  97. -b $BATCH_SIZE \
  98. -ub $UBATCH_SIZE \
  99. --temp 0 \
  100. > $LOGFILE 2>&1 &
  101. echo $! > $PIDFILE
  102. echo "✅ MinerU2.5-Pro llama-server 已启动,PID: $(cat $PIDFILE)"
  103. echo "📋 日志文件: $LOGFILE"
  104. echo "🌐 服务 URL: http://$HOST:$PORT"
  105. echo "📖 OpenAI 兼容 API: http://localhost:$PORT/v1 (chat/completions, models)"
  106. echo ""
  107. echo "等待服务启动..."
  108. sleep 5
  109. status
  110. }
  111. stop() {
  112. if [ ! -f $PIDFILE ]; then
  113. echo "MinerU2.5-Pro llama-server 未在运行"
  114. return 1
  115. fi
  116. PID=$(cat $PIDFILE)
  117. echo "停止 MinerU2.5-Pro llama-server (PID: $PID)..."
  118. kill $PID
  119. for i in {1..30}; do
  120. if ! kill -0 $PID 2>/dev/null; then
  121. break
  122. fi
  123. echo "等待进程停止... ($i/30)"
  124. sleep 1
  125. done
  126. if kill -0 $PID 2>/dev/null; then
  127. echo "强制终止进程..."
  128. kill -9 $PID
  129. fi
  130. rm -f $PIDFILE
  131. echo "✅ MinerU2.5-Pro llama-server 已停止"
  132. }
  133. status() {
  134. if [ -f $PIDFILE ] && kill -0 $(cat $PIDFILE) 2>/dev/null; then
  135. PID=$(cat $PIDFILE)
  136. echo "✅ MinerU2.5-Pro llama-server 正在运行 (PID: $PID)"
  137. echo "🌐 服务 URL: http://$HOST:$PORT"
  138. echo "📋 日志文件: $LOGFILE"
  139. # 检查端口监听状态
  140. if lsof -nP -iTCP:$PORT -sTCP:LISTEN >/dev/null 2>&1; then
  141. echo "🔗 端口 $PORT 正在监听"
  142. else
  143. echo "⚠️ 端口 $PORT 未在监听(服务可能正在启动)"
  144. fi
  145. # 检查 API 响应
  146. if command -v curl >/dev/null 2>&1; then
  147. if curl -s --connect-timeout 2 http://127.0.0.1:$PORT/v1/models > /dev/null 2>&1; then
  148. echo "🎯 API 响应正常"
  149. else
  150. echo "⚠️ API 无响应(服务可能正在启动)"
  151. fi
  152. fi
  153. # 显示进程内存使用
  154. if command -v ps >/dev/null 2>&1; then
  155. MEM=$(ps -o rss= -p $PID 2>/dev/null | awk '{printf "%.2f GB", $1/1024/1024}')
  156. if [ -n "$MEM" ]; then
  157. echo "💾 内存使用: $MEM"
  158. fi
  159. fi
  160. if [ -f $LOGFILE ]; then
  161. echo "📄 最近日志(最后 3 行):"
  162. tail -3 $LOGFILE | sed 's/^/ /'
  163. fi
  164. else
  165. echo "❌ MinerU2.5-Pro llama-server 未在运行"
  166. if [ -f $PIDFILE ]; then
  167. echo "删除过期的 PID 文件..."
  168. rm -f $PIDFILE
  169. fi
  170. fi
  171. }
  172. logs() {
  173. if [ -f $LOGFILE ]; then
  174. echo "📄 MinerU2.5-Pro llama-server 日志:"
  175. echo "====================="
  176. tail -f $LOGFILE
  177. else
  178. echo "❌ 日志文件不存在: $LOGFILE"
  179. fi
  180. }
  181. config() {
  182. echo "📋 当前配置:"
  183. echo " Conda 环境: $CONDA_ENV"
  184. echo " Host: $HOST"
  185. echo " Port: $PORT"
  186. echo " 模型别名: $MODEL_NAME"
  187. echo " 主模型路径: $MODEL_PATH"
  188. echo " 多模态投影器: $MMPROJ_PATH"
  189. echo " 上下文长度: $CONTEXT_SIZE"
  190. echo " GPU 层数: $GPU_LAYERS"
  191. echo " 线程数: $THREADS"
  192. echo " 批处理大小: $BATCH_SIZE"
  193. echo " 微批处理大小: $UBATCH_SIZE"
  194. echo " PID 文件: $PIDFILE"
  195. echo " 日志文件: $LOGFILE"
  196. echo ""
  197. echo "📦 模型文件检查:"
  198. if [ -f "$MODEL_PATH" ]; then
  199. SIZE=$(du -h "$MODEL_PATH" | cut -f1)
  200. echo " ✅ 主模型存在 ($SIZE)"
  201. else
  202. echo " ❌ 主模型不存在: $MODEL_PATH"
  203. fi
  204. if [ -f "$MMPROJ_PATH" ]; then
  205. SIZE=$(du -h "$MMPROJ_PATH" | cut -f1)
  206. echo " ✅ 多模态投影器存在 ($SIZE)"
  207. else
  208. echo " ❌ 多模态投影器不存在: $MMPROJ_PATH"
  209. fi
  210. echo ""
  211. echo "🔧 环境检查:"
  212. echo " llama-server: $(which llama-server 2>/dev/null || echo '未安装')"
  213. if command -v llama-server >/dev/null 2>&1; then
  214. LLAMA_VERSION=$(llama-server --version 2>&1 | head -1 || echo 'Unknown')
  215. echo " 版本: $LLAMA_VERSION"
  216. fi
  217. echo " Conda: $(which conda 2>/dev/null || echo '未找到')"
  218. echo " 当前 Python: $(which python 2>/dev/null || echo '未找到')"
  219. echo ""
  220. echo "💻 系统信息:"
  221. echo " 架构: $(uname -m)"
  222. echo " 系统版本: $(sw_vers -productVersion 2>/dev/null || echo 'Unknown')"
  223. echo " 总内存: $(sysctl -n hw.memsize 2>/dev/null | awk '{printf "%.1f GB", $1/1024/1024/1024}' || echo 'Unknown')"
  224. echo " CPU 核心: $(sysctl -n hw.ncpu 2>/dev/null || echo 'Unknown')"
  225. }
  226. test_api() {
  227. echo "🧪 测试 MinerU2.5-Pro llama-server API..."
  228. if [ ! -f $PIDFILE ] || ! kill -0 $(cat $PIDFILE) 2>/dev/null; then
  229. echo "❌ MinerU2.5-Pro llama-server 服务未在运行"
  230. return 1
  231. fi
  232. if ! command -v curl >/dev/null 2>&1; then
  233. echo "❌ curl 命令未找到"
  234. return 1
  235. fi
  236. echo "📡 测试 /v1/models 端点..."
  237. response=$(curl -s --connect-timeout 10 http://127.0.0.1:$PORT/v1/models)
  238. if [ $? -eq 0 ]; then
  239. echo "✅ Models 端点可访问"
  240. echo "$response" | python3 -m json.tool 2>/dev/null || echo "$response"
  241. else
  242. echo "❌ Models 端点不可访问"
  243. fi
  244. echo ""
  245. echo "📡 测试 /health 端点..."
  246. health=$(curl -s --connect-timeout 5 http://127.0.0.1:$PORT/health)
  247. if [ $? -eq 0 ]; then
  248. echo "✅ Health 端点: $health"
  249. else
  250. echo "⚠️ Health 端点不可访问"
  251. fi
  252. }
  253. test_client() {
  254. echo "🧪 测试 MinerU2.5-Pro 与 llama-server 集成..."
  255. if [ ! -f $PIDFILE ] || ! kill -0 $(cat $PIDFILE) 2>/dev/null; then
  256. echo "❌ MinerU2.5-Pro llama-server 服务未在运行,请先启动: $0 start"
  257. return 1
  258. fi
  259. CONFIG_FILE="/Users/zhch158/workspace/repository.git/ocr_platform/ocr_tools/universal_doc_parser/config/bank_statement_mineru_vl_local.yaml"
  260. echo "📄 配置文件: $CONFIG_FILE"
  261. echo ""
  262. echo "确保配置文件中 vl_recognition.server_url 指向: http://localhost:$PORT"
  263. echo ""
  264. echo "测试命令示例:"
  265. echo " cd /Users/zhch158/workspace/repository.git/ocr_platform/ocr_tools/universal_doc_parser"
  266. echo " conda activate mineru2"
  267. echo " python main_v2.py -i /path/to/test.pdf -c $CONFIG_FILE -o /tmp/test_output -s bank_statement --pages 1 --streaming"
  268. echo ""
  269. echo "或者使用 curl 直接测试 API:"
  270. echo " curl -X POST http://localhost:$PORT/v1/chat/completions \\"
  271. echo " -H 'Content-Type: application/json' \\"
  272. echo " -d '{"
  273. echo " \"model\": \"$MODEL_NAME\","
  274. echo " \"messages\": ["
  275. echo " {"
  276. echo " \"role\": \"user\","
  277. echo " \"content\": ["
  278. echo " {\"type\": \"text\", \"text\": \"Table Recognition:\"},"
  279. echo " {\"type\": \"image_url\", \"image_url\": {\"url\": \"file:///path/to/image.png\"}}"
  280. echo " ]"
  281. echo " }"
  282. echo " ],"
  283. echo " \"max_tokens\": 8192"
  284. echo " }'"
  285. }
  286. usage() {
  287. echo "MinerU2.5-Pro llama-server 服务守护进程(macOS)"
  288. echo "==========================================="
  289. echo "用法: $0 {start|stop|restart|status|logs|config|test|test-client}"
  290. echo ""
  291. echo "命令:"
  292. echo " start - 启动 MinerU2.5-Pro llama-server 服务"
  293. echo " stop - 停止 MinerU2.5-Pro llama-server 服务"
  294. echo " restart - 重启 MinerU2.5-Pro llama-server 服务"
  295. echo " status - 显示服务状态和资源使用"
  296. echo " logs - 显示服务日志(跟踪模式)"
  297. echo " config - 显示当前配置"
  298. echo " test - 测试 /v1/models API 端点"
  299. echo " test-client - 显示如何测试与配置文件集成"
  300. echo ""
  301. echo "配置(编辑脚本修改):"
  302. echo " Host: $HOST"
  303. echo " Port: $PORT"
  304. echo " 主模型: $MODEL_PATH"
  305. echo " 多模态投影器: $MMPROJ_PATH"
  306. echo " 上下文长度: $CONTEXT_SIZE"
  307. echo " GPU 层数: $GPU_LAYERS (Metal)"
  308. echo ""
  309. echo "示例:"
  310. echo " ./mineru_local_daemon.sh start"
  311. echo " ./mineru_local_daemon.sh status"
  312. echo " ./mineru_local_daemon.sh logs"
  313. echo " ./mineru_local_daemon.sh test"
  314. echo ""
  315. echo "前置要求:"
  316. echo " 1. 安装 llama.cpp: brew install llama.cpp"
  317. echo " 2. 首次下载模型: llama-server -hf mradermacher/MinerU2.5-Pro-2604-1.2B-GGUF:Q8_0"
  318. echo " 3. conda 环境 mineru2 已配置"
  319. }
  320. case "$1" in
  321. start)
  322. start
  323. ;;
  324. stop)
  325. stop
  326. ;;
  327. restart)
  328. stop
  329. sleep 3
  330. start
  331. ;;
  332. status)
  333. status
  334. ;;
  335. logs)
  336. logs
  337. ;;
  338. config)
  339. config
  340. ;;
  341. test)
  342. test_api
  343. ;;
  344. test-client)
  345. test_client
  346. ;;
  347. *)
  348. usage
  349. exit 1
  350. ;;
  351. esac