mineru_local_daemon.sh 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394
  1. #!/bin/bash
  2. # filepath: ocr_platform/ocr_tools/daemons/mineru_local_daemon.sh
  3. # 对应: MinerU2.5-Pro 本地 llama-server 服务(macOS),使用 GGUF 格式模型
  4. # 适用于 Mac M4 Pro 48G,使用 Metal GPU 加速
  5. # 模型下载地址: https://huggingface.co/mradermacher/MinerU2.5-Pro-2604-1.2B-GGUF
  6. #
  7. # 首次下载方式(自动下载 Q8_0 量化版本):
  8. # llama-server -hf mradermacher/MinerU2.5-Pro-2604-1.2B-GGUF:Q8_0
  9. # 模型会缓存到 ~/Library/Caches/llama.cpp/,也可手动移至 ~/models/mineru_vl/
  10. # curl -X POST http://localhost:8103/v1/chat/completions -d @payload.json
  11. LOGDIR="$HOME/workspace/logs"
  12. mkdir -p $LOGDIR
  13. PIDFILE="$LOGDIR/mineru_llamaserver.pid"
  14. LOGFILE="$LOGDIR/mineru_llamaserver.log"
  15. # 配置参数
  16. CONDA_ENV="mineru2"
  17. PORT="8103"
  18. HOST="0.0.0.0"
  19. # 本地 GGUF 模型路径(llama-server -hf 下载后的实际路径)
  20. HF_CACHE="$HOME/models/hf_home/hub/models--mradermacher--MinerU2.5-Pro-2604-1.2B-GGUF/snapshots/70429e9c728b6a5e904f358a9936c17bd3f5f4b8"
  21. MODEL_PATH="$HF_CACHE/MinerU2.5-Pro-2604-1.2B.Q8_0.gguf"
  22. MMPROJ_PATH="$HF_CACHE/MinerU2.5-Pro-2604-1.2B.mmproj-Q8_0.gguf"
  23. # 模型别名(对外暴露的模型 ID,对应 yaml 中的 model 字段)
  24. MODEL_NAME="MinerU2.5-Pro-2604-1.2B"
  25. # llama-server 参数
  26. # 注意:MinerU2.5-Pro n_ctx_train=8192,设置 8192 即可
  27. CONTEXT_SIZE="8192" # 上下文长度(对齐模型 n_ctx_train=8192)
  28. GPU_LAYERS="99" # Metal GPU 层数(99 表示全部)
  29. THREADS="8" # CPU 线程数(M4 Pro 建议值)
  30. BATCH_SIZE="512" # 批处理大小
  31. UBATCH_SIZE="128" # 微批处理大小
  32. # conda 环境激活
  33. if [ -f "$HOME/anaconda3/etc/profile.d/conda.sh" ]; then
  34. source "$HOME/anaconda3/etc/profile.d/conda.sh"
  35. conda activate $CONDA_ENV
  36. elif [ -f "$HOME/miniconda3/etc/profile.d/conda.sh" ]; then
  37. source "$HOME/miniconda3/etc/profile.d/conda.sh"
  38. conda activate $CONDA_ENV
  39. elif [ -f "/opt/miniconda3/etc/profile.d/conda.sh" ]; then
  40. source /opt/miniconda3/etc/profile.d/conda.sh
  41. conda activate $CONDA_ENV
  42. else
  43. echo "Warning: conda initialization file not found, trying direct path"
  44. export PATH="/opt/miniconda3/envs/$CONDA_ENV/bin:$PATH"
  45. fi
  46. start() {
  47. if [ -f $PIDFILE ] && kill -0 $(cat $PIDFILE) 2>/dev/null; then
  48. echo "MinerU2.5-Pro llama-server 已在运行"
  49. return 1
  50. fi
  51. echo "启动 MinerU2.5-Pro llama-server 守护进程..."
  52. echo "Host: $HOST, Port: $PORT"
  53. echo "主模型: $MODEL_PATH"
  54. echo "多模态投影器: $MMPROJ_PATH"
  55. echo "上下文长度: $CONTEXT_SIZE"
  56. echo "GPU 层数: $GPU_LAYERS (Metal)"
  57. echo "线程数: $THREADS"
  58. # 检查模型文件是否存在
  59. if [ ! -f "$MODEL_PATH" ]; then
  60. echo "❌ 主模型文件不存在: $MODEL_PATH"
  61. echo "请先运行以下命令下载模型:"
  62. echo " llama-server -hf mradermacher/MinerU2.5-Pro-2604-1.2B-GGUF:Q8_0"
  63. echo "下载完成后更新脚本中的 HF_CACHE 路径(快照 hash 可能不同)"
  64. return 1
  65. fi
  66. if [ ! -f "$MMPROJ_PATH" ]; then
  67. echo "❌ 多模态投影器文件不存在: $MMPROJ_PATH"
  68. echo "请确认 mmproj 文件已下载"
  69. return 1
  70. fi
  71. # 检查 llama-server 命令
  72. if ! command -v llama-server >/dev/null 2>&1; then
  73. echo "❌ llama-server 未找到"
  74. echo "请安装: brew install llama.cpp"
  75. return 1
  76. fi
  77. echo "🔧 使用 llama-server: $(which llama-server)"
  78. echo "🔧 llama.cpp 版本: $(llama-server --version 2>&1 | head -1 || echo 'Unknown')"
  79. echo "💻 系统信息:"
  80. echo " 架构: $(uname -m)"
  81. echo " 系统: $(uname -s)"
  82. echo " 内存: $(sysctl -n hw.memsize | awk '{printf "%.1f GB", $1/1024/1024/1024}')"
  83. # 启动 llama-server
  84. # 注意:MinerU2.5-Pro GGUF 内嵌推荐采样参数(top_k=1, top_p=0.001, temp=0.01),
  85. # llama-server 会自动应用,此处只设 --temp 0 确保确定性解码
  86. nohup llama-server \
  87. -m "$MODEL_PATH" \
  88. --mmproj "$MMPROJ_PATH" \
  89. --alias $MODEL_NAME \
  90. --host $HOST \
  91. --port $PORT \
  92. --media-path $HOME/workspace \
  93. -c $CONTEXT_SIZE \
  94. -ngl $GPU_LAYERS \
  95. -t $THREADS \
  96. -b $BATCH_SIZE \
  97. -ub $UBATCH_SIZE \
  98. --temp 0 \
  99. > $LOGFILE 2>&1 &
  100. echo $! > $PIDFILE
  101. echo "✅ MinerU2.5-Pro llama-server 已启动,PID: $(cat $PIDFILE)"
  102. echo "📋 日志文件: $LOGFILE"
  103. echo "🌐 服务 URL: http://$HOST:$PORT"
  104. echo "📖 OpenAI 兼容 API: http://localhost:$PORT/v1 (chat/completions, models)"
  105. echo ""
  106. echo "等待服务启动..."
  107. sleep 5
  108. status
  109. }
  110. stop() {
  111. if [ ! -f $PIDFILE ]; then
  112. echo "MinerU2.5-Pro llama-server 未在运行"
  113. return 1
  114. fi
  115. PID=$(cat $PIDFILE)
  116. echo "停止 MinerU2.5-Pro llama-server (PID: $PID)..."
  117. kill $PID
  118. for i in {1..30}; do
  119. if ! kill -0 $PID 2>/dev/null; then
  120. break
  121. fi
  122. echo "等待进程停止... ($i/30)"
  123. sleep 1
  124. done
  125. if kill -0 $PID 2>/dev/null; then
  126. echo "强制终止进程..."
  127. kill -9 $PID
  128. fi
  129. rm -f $PIDFILE
  130. echo "✅ MinerU2.5-Pro llama-server 已停止"
  131. }
  132. status() {
  133. if [ -f $PIDFILE ] && kill -0 $(cat $PIDFILE) 2>/dev/null; then
  134. PID=$(cat $PIDFILE)
  135. echo "✅ MinerU2.5-Pro llama-server 正在运行 (PID: $PID)"
  136. echo "🌐 服务 URL: http://$HOST:$PORT"
  137. echo "📋 日志文件: $LOGFILE"
  138. # 检查端口监听状态
  139. if lsof -nP -iTCP:$PORT -sTCP:LISTEN >/dev/null 2>&1; then
  140. echo "🔗 端口 $PORT 正在监听"
  141. else
  142. echo "⚠️ 端口 $PORT 未在监听(服务可能正在启动)"
  143. fi
  144. # 检查 API 响应
  145. if command -v curl >/dev/null 2>&1; then
  146. if curl -s --connect-timeout 2 http://127.0.0.1:$PORT/v1/models > /dev/null 2>&1; then
  147. echo "🎯 API 响应正常"
  148. else
  149. echo "⚠️ API 无响应(服务可能正在启动)"
  150. fi
  151. fi
  152. # 显示进程内存使用
  153. if command -v ps >/dev/null 2>&1; then
  154. MEM=$(ps -o rss= -p $PID 2>/dev/null | awk '{printf "%.2f GB", $1/1024/1024}')
  155. if [ -n "$MEM" ]; then
  156. echo "💾 内存使用: $MEM"
  157. fi
  158. fi
  159. if [ -f $LOGFILE ]; then
  160. echo "📄 最近日志(最后 3 行):"
  161. tail -3 $LOGFILE | sed 's/^/ /'
  162. fi
  163. else
  164. echo "❌ MinerU2.5-Pro llama-server 未在运行"
  165. if [ -f $PIDFILE ]; then
  166. echo "删除过期的 PID 文件..."
  167. rm -f $PIDFILE
  168. fi
  169. fi
  170. }
  171. logs() {
  172. if [ -f $LOGFILE ]; then
  173. echo "📄 MinerU2.5-Pro llama-server 日志:"
  174. echo "====================="
  175. tail -f $LOGFILE
  176. else
  177. echo "❌ 日志文件不存在: $LOGFILE"
  178. fi
  179. }
  180. config() {
  181. echo "📋 当前配置:"
  182. echo " Conda 环境: $CONDA_ENV"
  183. echo " Host: $HOST"
  184. echo " Port: $PORT"
  185. echo " 模型别名: $MODEL_NAME"
  186. echo " 主模型路径: $MODEL_PATH"
  187. echo " 多模态投影器: $MMPROJ_PATH"
  188. echo " 上下文长度: $CONTEXT_SIZE"
  189. echo " GPU 层数: $GPU_LAYERS"
  190. echo " 线程数: $THREADS"
  191. echo " 批处理大小: $BATCH_SIZE"
  192. echo " 微批处理大小: $UBATCH_SIZE"
  193. echo " PID 文件: $PIDFILE"
  194. echo " 日志文件: $LOGFILE"
  195. echo ""
  196. echo "📦 模型文件检查:"
  197. if [ -f "$MODEL_PATH" ]; then
  198. SIZE=$(du -h "$MODEL_PATH" | cut -f1)
  199. echo " ✅ 主模型存在 ($SIZE)"
  200. else
  201. echo " ❌ 主模型不存在: $MODEL_PATH"
  202. fi
  203. if [ -f "$MMPROJ_PATH" ]; then
  204. SIZE=$(du -h "$MMPROJ_PATH" | cut -f1)
  205. echo " ✅ 多模态投影器存在 ($SIZE)"
  206. else
  207. echo " ❌ 多模态投影器不存在: $MMPROJ_PATH"
  208. fi
  209. echo ""
  210. echo "🔧 环境检查:"
  211. echo " llama-server: $(which llama-server 2>/dev/null || echo '未安装')"
  212. if command -v llama-server >/dev/null 2>&1; then
  213. LLAMA_VERSION=$(llama-server --version 2>&1 | head -1 || echo 'Unknown')
  214. echo " 版本: $LLAMA_VERSION"
  215. fi
  216. echo " Conda: $(which conda 2>/dev/null || echo '未找到')"
  217. echo " 当前 Python: $(which python 2>/dev/null || echo '未找到')"
  218. echo ""
  219. echo "💻 系统信息:"
  220. echo " 架构: $(uname -m)"
  221. echo " 系统版本: $(sw_vers -productVersion 2>/dev/null || echo 'Unknown')"
  222. echo " 总内存: $(sysctl -n hw.memsize 2>/dev/null | awk '{printf "%.1f GB", $1/1024/1024/1024}' || echo 'Unknown')"
  223. echo " CPU 核心: $(sysctl -n hw.ncpu 2>/dev/null || echo 'Unknown')"
  224. }
  225. test_api() {
  226. echo "🧪 测试 MinerU2.5-Pro llama-server API..."
  227. if [ ! -f $PIDFILE ] || ! kill -0 $(cat $PIDFILE) 2>/dev/null; then
  228. echo "❌ MinerU2.5-Pro llama-server 服务未在运行"
  229. return 1
  230. fi
  231. if ! command -v curl >/dev/null 2>&1; then
  232. echo "❌ curl 命令未找到"
  233. return 1
  234. fi
  235. echo "📡 测试 /v1/models 端点..."
  236. response=$(curl -s --connect-timeout 10 http://127.0.0.1:$PORT/v1/models)
  237. if [ $? -eq 0 ]; then
  238. echo "✅ Models 端点可访问"
  239. echo "$response" | python3 -m json.tool 2>/dev/null || echo "$response"
  240. else
  241. echo "❌ Models 端点不可访问"
  242. fi
  243. echo ""
  244. echo "📡 测试 /health 端点..."
  245. health=$(curl -s --connect-timeout 5 http://127.0.0.1:$PORT/health)
  246. if [ $? -eq 0 ]; then
  247. echo "✅ Health 端点: $health"
  248. else
  249. echo "⚠️ Health 端点不可访问"
  250. fi
  251. }
  252. test_client() {
  253. echo "🧪 测试 MinerU2.5-Pro 与 llama-server 集成..."
  254. if [ ! -f $PIDFILE ] || ! kill -0 $(cat $PIDFILE) 2>/dev/null; then
  255. echo "❌ MinerU2.5-Pro llama-server 服务未在运行,请先启动: $0 start"
  256. return 1
  257. fi
  258. CONFIG_FILE="/Users/zhch158/workspace/repository.git/ocr_platform/ocr_tools/universal_doc_parser/config/bank_statement_mineru_vl_local.yaml"
  259. echo "📄 配置文件: $CONFIG_FILE"
  260. echo ""
  261. echo "确保配置文件中 vl_recognition.server_url 指向: http://localhost:$PORT"
  262. echo ""
  263. echo "测试命令示例:"
  264. echo " cd /Users/zhch158/workspace/repository.git/ocr_platform/ocr_tools/universal_doc_parser"
  265. echo " conda activate mineru2"
  266. echo " python main_v2.py -i /path/to/test.pdf -c $CONFIG_FILE -o /tmp/test_output -s bank_statement --pages 1 --streaming"
  267. echo ""
  268. echo "或者使用 curl 直接测试 API:"
  269. echo " curl -X POST http://localhost:$PORT/v1/chat/completions \\"
  270. echo " -H 'Content-Type: application/json' \\"
  271. echo " -d '{"
  272. echo " \"model\": \"$MODEL_NAME\","
  273. echo " \"messages\": ["
  274. echo " {"
  275. echo " \"role\": \"user\","
  276. echo " \"content\": ["
  277. echo " {\"type\": \"text\", \"text\": \"Table Recognition:\"},"
  278. echo " {\"type\": \"image_url\", \"image_url\": {\"url\": \"file:///path/to/image.png\"}}"
  279. echo " ]"
  280. echo " }"
  281. echo " ],"
  282. echo " \"max_tokens\": 8192"
  283. echo " }'"
  284. }
  285. usage() {
  286. echo "MinerU2.5-Pro llama-server 服务守护进程(macOS)"
  287. echo "==========================================="
  288. echo "用法: $0 {start|stop|restart|status|logs|config|test|test-client}"
  289. echo ""
  290. echo "命令:"
  291. echo " start - 启动 MinerU2.5-Pro llama-server 服务"
  292. echo " stop - 停止 MinerU2.5-Pro llama-server 服务"
  293. echo " restart - 重启 MinerU2.5-Pro llama-server 服务"
  294. echo " status - 显示服务状态和资源使用"
  295. echo " logs - 显示服务日志(跟踪模式)"
  296. echo " config - 显示当前配置"
  297. echo " test - 测试 /v1/models API 端点"
  298. echo " test-client - 显示如何测试与配置文件集成"
  299. echo ""
  300. echo "配置(编辑脚本修改):"
  301. echo " Host: $HOST"
  302. echo " Port: $PORT"
  303. echo " 主模型: $MODEL_PATH"
  304. echo " 多模态投影器: $MMPROJ_PATH"
  305. echo " 上下文长度: $CONTEXT_SIZE"
  306. echo " GPU 层数: $GPU_LAYERS (Metal)"
  307. echo ""
  308. echo "示例:"
  309. echo " ./mineru_local_daemon.sh start"
  310. echo " ./mineru_local_daemon.sh status"
  311. echo " ./mineru_local_daemon.sh logs"
  312. echo " ./mineru_local_daemon.sh test"
  313. echo ""
  314. echo "前置要求:"
  315. echo " 1. 安装 llama.cpp: brew install llama.cpp"
  316. echo " 2. 首次下载模型: llama-server -hf mradermacher/MinerU2.5-Pro-2604-1.2B-GGUF:Q8_0"
  317. echo " 3. conda 环境 mineru2 已配置"
  318. }
  319. case "$1" in
  320. start)
  321. start
  322. ;;
  323. stop)
  324. stop
  325. ;;
  326. restart)
  327. stop
  328. sleep 3
  329. start
  330. ;;
  331. status)
  332. status
  333. ;;
  334. logs)
  335. logs
  336. ;;
  337. config)
  338. config
  339. ;;
  340. test)
  341. test_api
  342. ;;
  343. test-client)
  344. test_client
  345. ;;
  346. *)
  347. usage
  348. exit 1
  349. ;;
  350. esac