48 lines
1.0 KiB
YAML
48 lines
1.0 KiB
YAML
x-defaults: &defaults
|
|
restart: unless-stopped
|
|
logging:
|
|
driver: json-file
|
|
options:
|
|
max-size: 100m
|
|
max-file: "3"
|
|
|
|
services:
|
|
lama-cleaner:
|
|
<<: *defaults
|
|
image: ${DOCKER_REGISTRY:-docker.io}/local/lama-cleaner:${BUILD_VERSION:-1.6.0}
|
|
ports:
|
|
- 8080:8080
|
|
build:
|
|
context: .
|
|
dockerfile: Dockerfile
|
|
environment:
|
|
TZ: ${TZ:-UTC}
|
|
HF_ENDPOINT: ${HF_ENDPOINT:-}
|
|
volumes:
|
|
- ./models:/root/.cache
|
|
command:
|
|
- iopaint
|
|
- start
|
|
- --model=lama
|
|
- --device=cuda
|
|
- --port=8080
|
|
- --host=0.0.0.0
|
|
deploy:
|
|
resources:
|
|
limits:
|
|
cpus: '2.0'
|
|
memory: 4G
|
|
reservations:
|
|
cpus: '1.0'
|
|
memory: 2G
|
|
devices:
|
|
- driver: nvidia
|
|
device_ids: ['0']
|
|
capabilities: [compute, utility]
|
|
healthcheck:
|
|
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:8080/"]
|
|
interval: 30s
|
|
timeout: 10s
|
|
retries: 3
|
|
start_period: 60s
|