docker-compose.yml 1.6 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344
  1. version: '3.8'
  2. services:
  3. dots-ocr-server:
  4. image: dots-ocr:latest
  5. container_name: dots-ocr-container
  6. ports:
  7. - "8000:8000"
  8. volumes:
  9. #download model to local,model url:https://www.modelscope.cn/models/rednote-hilab/dots.ocr
  10. - ./model/dots.ocr:/workspace/weights/DotsOCR
  11. environment:
  12. - CUDA_VISIBLE_DEVICES=0
  13. - PYTHONPATH=/workspace/weights:$PYTHONPATH
  14. deploy:
  15. resources:
  16. reservations:
  17. devices:
  18. - capabilities: [gpu]
  19. device_ids: ['0']
  20. entrypoint: /bin/bash
  21. command:
  22. - -c
  23. - |
  24. set -ex;
  25. echo '--- Starting setup and server ---';
  26. echo 'Modifying vllm entrypoint...';
  27. # This sed command patches the vllm entrypoint script to import the custom modeling code.
  28. sed -i '/^from vllm\.entrypoints\.cli\.main import main/a from DotsOCR import modeling_dots_ocr_vllm' $(which vllm) && \
  29. echo 'vllm script after patch:';
  30. # Show the patched part of the vllm script for verification.
  31. grep -A 1 'from vllm.entrypoints.cli.main import main' $(which vllm) && \
  32. echo 'Starting server...';
  33. # Use 'exec' to replace the current shell process with the vllm server,
  34. # ensuring logs are properly forwarded to Docker's standard output.
  35. exec vllm serve /workspace/weights/DotsOCR \
  36. --tensor-parallel-size 1 \
  37. --gpu-memory-utilization 0.8 \
  38. --chat-template-content-format string \
  39. --served-model-name dotsocr-model \
  40. --trust-remote-code