x-defaults: &defaults restart: unless-stopped logging: driver: json-file options: max-size: 100m max-file: "3" x-mineru-vllm: &mineru-vllm <<: *defaults image: ${GLOBAL_REGISTRY:-}alexsuntop/mineru:${BUILD_VERSION:-2.7.0} build: context: . dockerfile: Dockerfile environment: TZ: ${TZ:-UTC} MINERU_MODEL_SOURCE: local ulimits: memlock: -1 stack: 67108864 ipc: host deploy: resources: limits: cpus: "16.0" memory: 32G reservations: cpus: "8.0" memory: 16G devices: - driver: nvidia device_ids: ["0"] capabilities: [gpu] services: mineru-openai-server: <<: *mineru-vllm ports: - ${MINERU_PORT_OVERRIDE_VLLM:-30000}:30000 entrypoint: mineru-openai-server command: # ==================== Engine Selection ==================== # WARNING: Only ONE engine can be enabled at a time! # Choose 'vllm' OR 'lmdeploy' (uncomment one line below) - --engine vllm # --engine lmdeploy # ==================== vLLM Engine Parameters ==================== # Uncomment if using --engine vllm - --host 0.0.0.0 - --port 30000 # Multi-GPU configuration (increase throughput) # --data-parallel-size 2 # Single GPU memory optimization (reduce if VRAM insufficient) # --gpu-memory-utilization 0.5 # Try 0.4 or lower if issues persist # ==================== LMDeploy Engine Parameters ==================== # Uncomment if using --engine lmdeploy # --server-name 0.0.0.0 # --server-port 30000 # Multi-GPU configuration (increase throughput) # --dp 2 # Single GPU memory optimization (reduce if VRAM insufficient) # --cache-max-entry-count 0.5 # Try 0.4 or lower if issues persist healthcheck: test: ["CMD-SHELL", "curl -f http://localhost:30000/health || exit 1"] interval: 30s timeout: 10s retries: 3 start_period: 60s