x-defaults: &defaults restart: unless-stopped logging: driver: json-file options: max-size: 100m max-file: "3" x-mineru-sglang: &mineru-sglang <<: *defaults image: ${MINERU_DOCKER_IMAGE:-alexsuntop/mineru-sglang:2.2.2} environment: TZ: ${TZ:-UTC} MINERU_MODEL_SOURCE: local ulimits: memlock: -1 stack: 67108864 ipc: host deploy: resources: limits: cpus: ${MINERU_SGLANG_CPU_LIMIT:-8.0} memory: ${MINERU_SGLANG_MEMORY_LIMIT:-4G} reservations: cpus: ${MINERU_SGLANG_CPU_RESERVATION:-1.0} memory: ${MINERU_SGLANG_MEMORY_RESERVATION:-2G} devices: - driver: nvidia device_ids: [ '0' ] capabilities: [ gpu ] services: mineru-sglang-server: <<: *mineru-sglang profiles: ["sglang-server"] ports: - ${MINERU_PORT_OVERRIDE_SGLANG:-30000}:30000 entrypoint: mineru-sglang-server command: - --host 0.0.0.0 - --port 30000 # If using multiple GPUs, increase throughput using vllm's multi-GPU parallel mode. # - --data-parallel-size 2 # If running on a single GPU and encountering VRAM shortage, reduce the KV cache size by this parameter, # if VRAM issues persist, try lowering it further to `0.4` or below. # - --gpu-memory-utilization 0.5 healthcheck: test: ["CMD-SHELL", "curl -f http://localhost:30000/health || exit 1"] interval: 30s timeout: 10s retries: 3 start_period: 60s mineru-api: <<: *mineru-sglang profiles: ["api"] ports: - ${MINERU_PORT_OVERRIDE_API:-8000}:8000 entrypoint: mineru-api command: - --host 0.0.0.0 - --port 8000 # If using multiple GPUs, increase throughput using vllm's multi-GPU parallel mode. # - --data-parallel-size 2 # If running on a single GPU and encountering VRAM shortage, reduce the KV cache size by this parameter, # if VRAM issues persist, try lowering it further to `0.4` or below. # - --gpu-memory-utilization 0.5 healthcheck: test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:8000/health"] interval: 30s timeout: 10s retries: 3 start_period: 60s mineru-gradio: <<: *mineru-sglang profiles: ["gradio"] ports: - ${MINERU_PORT_OVERRIDE_GRADIO:-7860}:7860 entrypoint: mineru-gradio command: - --server-name 0.0.0.0 - --server-port 7860 # Enable the vllm engine for Gradio - --enable-vllm-engine true # If you want to disable the API, set this to false # - --enable-api false # If you want to limit the number of pages for conversion, set this to a specific number # - --max-convert-pages 20 # If using multiple GPUs, increase throughput using vllm's multi-GPU parallel mode. # - --data-parallel-size 2 # If running on a single GPU and encountering VRAM shortage, reduce the KV cache size by this parameter, # if VRAM issues persist, try lowering it further to `0.4` or below. # - --gpu-memory-utilization 0.5 healthcheck: test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:7860/"] interval: 30s timeout: 10s retries: 3 start_period: 60s