Files
compose-anything/builds/mineru-vllm/docker-compose.yaml
2025-11-08 21:57:17 +08:00

110 lines
3.2 KiB
YAML

x-defaults: &defaults
restart: unless-stopped
logging:
driver: json-file
options:
max-size: 100m
max-file: "3"
x-mineru-vllm: &mineru-vllm
<<: *defaults
image: ${MINERU_DOCKER_IMAGE:-alexsuntop/mineru:2.6.2}
build:
context: .
dockerfile: Dockerfile
environment:
TZ: ${TZ:-UTC}
MINERU_MODEL_SOURCE: local
ulimits:
memlock: -1
stack: 67108864
ipc: host
deploy:
resources:
limits:
cpus: '16.0'
memory: 32G
reservations:
cpus: '8.0'
memory: 16G
devices:
- driver: nvidia
device_ids: [ '0' ]
capabilities: [ gpu ]
services:
mineru-vllm-server:
<<: *mineru-vllm
profiles: ["vllm-server"]
ports:
- ${MINERU_PORT_OVERRIDE_VLLM:-30000}:30000
entrypoint: mineru-vllm-server
command:
- --host 0.0.0.0
- --port 30000
# If using multiple GPUs, increase throughput using vllm's multi-GPU parallel mode.
# - --data-parallel-size 2
# If running on a single GPU and encountering VRAM shortage, reduce the KV cache size by this parameter,
# if VRAM issues persist, try lowering it further to `0.4` or below.
# - --gpu-memory-utilization 0.5
healthcheck:
test: ["CMD-SHELL", "curl -f http://localhost:30000/health || exit 1"]
interval: 30s
timeout: 10s
retries: 3
start_period: 60s
mineru-api:
<<: *mineru-vllm
profiles: ["api"]
ports:
- ${MINERU_PORT_OVERRIDE_API:-8000}:8000
entrypoint: mineru-api
command:
- --host 0.0.0.0
- --port 8000
# If using multiple GPUs, increase throughput using vllm's multi-GPU parallel mode.
# - --data-parallel-size 2
# If running on a single GPU and encountering VRAM shortage, reduce the KV cache size by this parameter,
# if VRAM issues persist, try lowering it further to `0.4` or below.
# - --gpu-memory-utilization 0.5
healthcheck:
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:8000/health"]
interval: 30s
timeout: 10s
retries: 3
start_period: 60s
mineru-gradio:
<<: *mineru-vllm
profiles: ["gradio"]
ports:
- ${MINERU_PORT_OVERRIDE_GRADIO:-7860}:7860
entrypoint: mineru-gradio
command:
- --server-name 0.0.0.0
- --server-port 7860
# Enable the vllm engine for Gradio
- --enable-vllm-engine true
# If you want to disable the API, set this to false
# - --enable-api false
# If you want to limit the number of pages for conversion, set this to a specific number
# - --max-convert-pages 20
# If using multiple GPUs, increase throughput using vllm's multi-GPU parallel mode.
# - --data-parallel-size 2
# If running on a single GPU and encountering VRAM shortage, reduce the KV cache size by this parameter,
# if VRAM issues persist, try lowering it further to `0.4` or below.
# - --gpu-memory-utilization 0.5
healthcheck:
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:7860/"]
interval: 30s
timeout: 10s
retries: 3
start_period: 60s