style: lint code

This commit is contained in:
Sun-ZhenXing
2026-02-23 17:41:05 +08:00
parent ced072de66
commit 7e14b01b78
240 changed files with 4301 additions and 1539 deletions

View File

@@ -5,17 +5,17 @@ binds:
- policies:
cors:
allowOrigins:
- "*"
- '*'
allowHeaders:
- mcp-protocol-version
- content-type
- cache-control
exposeHeaders:
- "Mcp-Session-Id"
- Mcp-Session-Id
backends:
- mcp:
targets:
- name: everything
stdio:
cmd: npx
args: ["@modelcontextprotocol/server-everything"]
args: ['@modelcontextprotocol/server-everything']

View File

@@ -4,23 +4,23 @@ x-defaults: &defaults
driver: json-file
options:
max-size: 100m
max-file: "3"
max-file: '3'
services:
agentgateway:
<<: *defaults
image: ${GLOBAL_REGISTRY:-}cr.agentgateway.dev/agentgateway:${AGENTGATEWAY_VERSION:-0.11.2}
ports:
- "${AGENTGATEWAY_PORT_OVERRIDE:-3000}:3000"
- "127.0.0.1:${AGENTGATEWAY_ADMIN_PORT_OVERRIDE:-15000}:15000"
- '${AGENTGATEWAY_PORT_OVERRIDE:-3000}:3000'
- '127.0.0.1:${AGENTGATEWAY_ADMIN_PORT_OVERRIDE:-15000}:15000'
volumes:
- ./config.yaml:/config.yaml:ro
environment:
- TZ=${TZ:-UTC}
- ADMIN_ADDR=${AGENTGATEWAY_ADMIN_ADDR:-0.0.0.0:15000}
command: ["-f", "/config.yaml"]
command: [-f, /config.yaml]
healthcheck:
test: ["CMD", "agentgateway", "--version"]
test: [CMD, agentgateway, --version]
interval: 30s
timeout: 10s
retries: 3

View File

@@ -4,15 +4,15 @@ x-defaults: &defaults
driver: json-file
options:
max-size: 100m
max-file: "3"
max-file: '3'
services:
apache:
<<: *defaults
image: ${GLOBAL_REGISTRY:-}httpd:${APACHE_VERSION:-2.4.62-alpine3.20}
ports:
- "${APACHE_HTTP_PORT_OVERRIDE:-80}:80"
- "${APACHE_HTTPS_PORT_OVERRIDE:-443}:443"
- '${APACHE_HTTP_PORT_OVERRIDE:-80}:80'
- '${APACHE_HTTPS_PORT_OVERRIDE:-443}:443'
volumes:
- apache_logs:/usr/local/apache2/logs
- ./htdocs:/usr/local/apache2/htdocs:ro
@@ -33,7 +33,7 @@ services:
cpus: ${APACHE_CPU_RESERVATION:-0.25}
memory: ${APACHE_MEMORY_RESERVATION:-128M}
healthcheck:
test: ["CMD", "httpd", "-t"]
test: [CMD, httpd, -t]
interval: 30s
timeout: 10s
retries: 3

View File

@@ -129,14 +129,14 @@ apisix:
etcd:
host:
- "http://etcd:2379"
prefix: "/apisix"
- 'http://etcd:2379'
prefix: /apisix
timeout: 30
plugin_attr:
prometheus:
export_addr:
ip: "0.0.0.0"
ip: 0.0.0.0
port: 9091
```

View File

@@ -129,14 +129,14 @@ apisix:
etcd:
host:
- "http://etcd:2379"
prefix: "/apisix"
- 'http://etcd:2379'
prefix: /apisix
timeout: 30
plugin_attr:
prometheus:
export_addr:
ip: "0.0.0.0"
ip: 0.0.0.0
port: 9091
```

View File

@@ -4,16 +4,16 @@ x-defaults: &defaults
driver: json-file
options:
max-size: 100m
max-file: "3"
max-file: '3'
services:
apisix:
<<: *defaults
image: ${GLOBAL_REGISTRY:-}apache/apisix:${APISIX_VERSION:-3.13.0-debian}
ports:
- "${APISIX_HTTP_PORT_OVERRIDE:-9080}:9080"
- "${APISIX_HTTPS_PORT_OVERRIDE:-9443}:9443"
- "${APISIX_ADMIN_PORT_OVERRIDE:-9180}:9180"
- '${APISIX_HTTP_PORT_OVERRIDE:-9080}:9080'
- '${APISIX_HTTPS_PORT_OVERRIDE:-9443}:9443'
- '${APISIX_ADMIN_PORT_OVERRIDE:-9180}:9180'
volumes:
- apisix_logs:/usr/local/apisix/logs
@@ -36,7 +36,7 @@ services:
memory: ${APISIX_MEMORY_RESERVATION:-256M}
healthcheck:
test:
["CMD-SHELL", "curl -f http://localhost:9080/apisix/status || exit 1"]
[CMD-SHELL, 'curl -f http://localhost:9080/apisix/status || exit 1']
interval: 30s
timeout: 10s
retries: 3
@@ -46,7 +46,7 @@ services:
<<: *defaults
image: quay.io/coreos/etcd:${ETCD_VERSION:-v3.6.0}
ports:
- "${ETCD_CLIENT_PORT_OVERRIDE:-2379}:2379"
- '${ETCD_CLIENT_PORT_OVERRIDE:-2379}:2379'
volumes:
- etcd_data:/etcd-data
environment:
@@ -90,7 +90,7 @@ services:
cpus: ${ETCD_CPU_RESERVATION:-0.1}
memory: ${ETCD_MEMORY_RESERVATION:-128M}
healthcheck:
test: ["CMD", "etcdctl", "endpoint", "health"]
test: [CMD, etcdctl, endpoint, health]
interval: 30s
timeout: 10s
retries: 3
@@ -101,7 +101,7 @@ services:
<<: *defaults
image: ${GLOBAL_REGISTRY:-}apache/apisix-dashboard:${APISIX_DASHBOARD_VERSION:-3.0.1-alpine}
ports:
- "${APISIX_DASHBOARD_PORT_OVERRIDE:-9000}:9000"
- '${APISIX_DASHBOARD_PORT_OVERRIDE:-9000}:9000'
volumes:
- dashboard_conf:/usr/local/apisix-dashboard/conf
environment:

View File

@@ -2,7 +2,7 @@ global:
scrape_interval: 15s
scrape_configs:
- job_name: 'bifrost'
metrics_path: '/metrics'
- job_name: bifrost
metrics_path: /metrics
static_configs:
- targets: ['bifrost:8080']

View File

@@ -4,7 +4,7 @@ x-defaults: &defaults
driver: json-file
options:
max-size: 100m
max-file: "3"
max-file: '3'
services:
bifrost:
@@ -13,7 +13,7 @@ services:
volumes:
- bifrost_data:/app/data
ports:
- "${BIFROST_PORT:-28080}:8080"
- '${BIFROST_PORT:-28080}:8080'
environment:
- TZ=${TZ:-UTC}
deploy:
@@ -26,14 +26,13 @@ services:
memory: ${BIFROST_MEMORY_RESERVATION:-128M}
healthcheck:
test:
[
"CMD",
"wget",
"--no-verbose",
"--tries=1",
"--spider",
"http://localhost:8080/health",
]
- CMD
- wget
- --no-verbose
- --tries=1
- --spider
- 'http://localhost:8080/health'
interval: 30s
timeout: 10s
retries: 3
@@ -42,27 +41,26 @@ services:
prometheus:
<<: *defaults
image: ${GLOBAL_REGISTRY:-}prom/prometheus:${PROMETHEUS_VERSION:-v3.8.1}
profiles: ["telemetry"]
profiles: [telemetry]
volumes:
- ./config/prometheus.yml:/etc/prometheus/prometheus.yml
- prometheus_data:/prometheus
command:
- "--config.file=/etc/prometheus/prometheus.yml"
- "--storage.tsdb.path=/prometheus"
- "--web.console.libraries=/usr/share/prometheus/console_libraries"
- "--web.console.templates=/usr/share/prometheus/consoles"
- --config.file=/etc/prometheus/prometheus.yml
- --storage.tsdb.path=/prometheus
- --web.console.libraries=/usr/share/prometheus/console_libraries
- --web.console.templates=/usr/share/prometheus/consoles
ports:
- "${PROMETHEUS_PORT:-29090}:9090"
- '${PROMETHEUS_PORT:-29090}:9090'
healthcheck:
test:
[
"CMD",
"wget",
"--no-verbose",
"--tries=1",
"--spider",
"http://localhost:9090/-/healthy",
]
- CMD
- wget
- --no-verbose
- --tries=1
- --spider
- 'http://localhost:9090/-/healthy'
interval: 30s
timeout: 10s
retries: 3
@@ -79,7 +77,7 @@ services:
grafana:
<<: *defaults
image: ${GLOBAL_REGISTRY:-}grafana/grafana:${GRAFANA_VERSION:-12.3.1}
profiles: ["telemetry"]
profiles: [telemetry]
volumes:
- ./config/grafana/datasources.yml:/etc/grafana/provisioning/datasources/datasources.yml
- grafana_data:/var/lib/grafana
@@ -88,17 +86,16 @@ services:
- GF_SECURITY_ADMIN_PASSWORD=${GRAFANA_ADMIN_PASSWORD:-admin}
- GF_USERS_ALLOW_SIGN_UP=false
ports:
- "${GRAFANA_PORT:-23000}:3000"
- '${GRAFANA_PORT:-23000}:3000'
healthcheck:
test:
[
"CMD",
"wget",
"--no-verbose",
"--tries=1",
"--spider",
"http://localhost:3000/api/health",
]
- CMD
- wget
- --no-verbose
- --tries=1
- --spider
- 'http://localhost:3000/api/health'
interval: 30s
timeout: 10s
retries: 3

View File

@@ -31,7 +31,7 @@ x-defaults: &defaults
driver: json-file
options:
max-size: 100m
max-file: "3"
max-file: '3'
services:
budibase:
@@ -39,7 +39,7 @@ services:
image: ${GLOBAL_REGISTRY:-}budibase/budibase:${BUDIBASE_VERSION:-3.23.0}
container_name: budibase
ports:
- "${BUDIBASE_PORT_OVERRIDE:-10000}:80"
- '${BUDIBASE_PORT_OVERRIDE:-10000}:80'
environment:
# Core settings
- APP_PORT=${BUDIBASE_APP_PORT:-4002}
@@ -75,14 +75,13 @@ services:
condition: service_healthy
healthcheck:
test:
[
"CMD",
"wget",
"--quiet",
"--tries=1",
"--spider",
"http://localhost/health",
]
- CMD
- wget
- --quiet
- --tries=1
- --spider
- 'http://localhost/health'
interval: 30s
timeout: 10s
retries: 3
@@ -90,11 +89,11 @@ services:
deploy:
resources:
limits:
cpus: "${BUDIBASE_CPU_LIMIT:-2.0}"
memory: "${BUDIBASE_MEMORY_LIMIT:-2G}"
cpus: '${BUDIBASE_CPU_LIMIT:-2.0}'
memory: '${BUDIBASE_MEMORY_LIMIT:-2G}'
reservations:
cpus: "${BUDIBASE_CPU_RESERVATION:-0.5}"
memory: "${BUDIBASE_MEMORY_RESERVATION:-512M}"
cpus: '${BUDIBASE_CPU_RESERVATION:-0.5}'
memory: '${BUDIBASE_MEMORY_RESERVATION:-512M}'
redis:
<<: *defaults
@@ -104,18 +103,18 @@ services:
volumes:
- redis_data:/data
healthcheck:
test: ["CMD", "redis-cli", "ping"]
test: [CMD, redis-cli, ping]
interval: 10s
timeout: 5s
retries: 5
deploy:
resources:
limits:
cpus: "${REDIS_CPU_LIMIT:-0.5}"
memory: "${REDIS_MEMORY_LIMIT:-512M}"
cpus: '${REDIS_CPU_LIMIT:-0.5}'
memory: '${REDIS_MEMORY_LIMIT:-512M}'
reservations:
cpus: "${REDIS_CPU_RESERVATION:-0.1}"
memory: "${REDIS_MEMORY_RESERVATION:-128M}"
cpus: '${REDIS_CPU_RESERVATION:-0.1}'
memory: '${REDIS_MEMORY_RESERVATION:-128M}'
volumes:
budibase_data:

View File

@@ -4,14 +4,14 @@ x-defaults: &defaults
driver: json-file
options:
max-size: 100m
max-file: "3"
max-file: '3'
services:
bytebot-desktop:
<<: *defaults
image: ${GHCR_IO_REGISTRY:-ghcr.io}/bytebot-ai/bytebot-desktop:${BYTEBOT_VERSION:-edge}
ports:
- "${BYTEBOT_DESKTOP_PORT_OVERRIDE:-9990}:9990"
- '${BYTEBOT_DESKTOP_PORT_OVERRIDE:-9990}:9990'
environment:
- TZ=${TZ:-UTC}
shm_size: 2gb
@@ -25,14 +25,13 @@ services:
memory: ${BYTEBOT_DESKTOP_MEMORY_RESERVATION:-2G}
healthcheck:
test:
[
"CMD",
"wget",
"--no-verbose",
"--tries=1",
"--spider",
"http://localhost:9990/",
]
- CMD
- wget
- --no-verbose
- --tries=1
- --spider
- 'http://localhost:9990/'
interval: 30s
timeout: 10s
retries: 3
@@ -47,7 +46,7 @@ services:
bytebot-db:
condition: service_healthy
ports:
- "${BYTEBOT_AGENT_PORT_OVERRIDE:-9991}:9991"
- '${BYTEBOT_AGENT_PORT_OVERRIDE:-9991}:9991'
environment:
- TZ=${TZ:-UTC}
- BYTEBOTD_URL=http://bytebot-desktop:9990
@@ -65,14 +64,13 @@ services:
memory: ${BYTEBOT_AGENT_MEMORY_RESERVATION:-512M}
healthcheck:
test:
[
"CMD",
"wget",
"--no-verbose",
"--tries=1",
"--spider",
"http://localhost:9991/health",
]
- CMD
- wget
- --no-verbose
- --tries=1
- --spider
- 'http://localhost:9991/health'
interval: 30s
timeout: 10s
retries: 3
@@ -85,7 +83,7 @@ services:
bytebot-agent:
condition: service_healthy
ports:
- "${BYTEBOT_UI_PORT_OVERRIDE:-9992}:9992"
- '${BYTEBOT_UI_PORT_OVERRIDE:-9992}:9992'
environment:
- TZ=${TZ:-UTC}
- BYTEBOT_AGENT_BASE_URL=http://localhost:9991
@@ -100,14 +98,13 @@ services:
memory: ${BYTEBOT_UI_MEMORY_RESERVATION:-256M}
healthcheck:
test:
[
"CMD",
"wget",
"--no-verbose",
"--tries=1",
"--spider",
"http://localhost:9992/",
]
- CMD
- wget
- --no-verbose
- --tries=1
- --spider
- 'http://localhost:9992/'
interval: 30s
timeout: 10s
retries: 3
@@ -133,7 +130,7 @@ services:
cpus: ${BYTEBOT_DB_CPU_RESERVATION:-0.25}
memory: ${BYTEBOT_DB_MEMORY_RESERVATION:-256M}
healthcheck:
test: ["CMD-SHELL", "pg_isready -U $$POSTGRES_USER -d $$POSTGRES_DB"]
test: [CMD-SHELL, pg_isready -U $$POSTGRES_USER -d $$POSTGRES_DB]
interval: 30s
timeout: 10s
retries: 3

View File

@@ -57,7 +57,7 @@ Please modify the `.env` file as needed for your use case.
```sql
-- Create a keyspace
CREATE KEYSPACE test_keyspace
CREATE KEYSPACE test_keyspace
WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1};
-- Use the keyspace
@@ -71,7 +71,7 @@ CREATE TABLE users (
);
-- Insert data
INSERT INTO users (id, name, email)
INSERT INTO users (id, name, email)
VALUES (uuid(), 'John Doe', 'john@example.com');
-- Query data

View File

@@ -57,7 +57,7 @@
```sql
-- 创建键空间
CREATE KEYSPACE test_keyspace
CREATE KEYSPACE test_keyspace
WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1};
-- 使用键空间
@@ -71,7 +71,7 @@ CREATE TABLE users (
);
-- 插入数据
INSERT INTO users (id, name, email)
INSERT INTO users (id, name, email)
VALUES (uuid(), 'John Doe', 'john@example.com');
-- 查询数据

View File

@@ -4,15 +4,15 @@ x-defaults: &defaults
driver: json-file
options:
max-size: 100m
max-file: "3"
max-file: '3'
services:
cassandra:
<<: *defaults
image: ${GLOBAL_REGISTRY:-}cassandra:${CASSANDRA_VERSION:-5.0.2}
ports:
- "${CASSANDRA_CQL_PORT_OVERRIDE:-9042}:9042"
- "${CASSANDRA_THRIFT_PORT_OVERRIDE:-9160}:9160"
- '${CASSANDRA_CQL_PORT_OVERRIDE:-9042}:9042'
- '${CASSANDRA_THRIFT_PORT_OVERRIDE:-9160}:9160'
volumes:
- cassandra_data:/var/lib/cassandra
- cassandra_logs:/var/log/cassandra
@@ -39,7 +39,7 @@ services:
cpus: ${CASSANDRA_CPU_RESERVATION:-0.50}
memory: ${CASSANDRA_MEMORY_RESERVATION:-1G}
healthcheck:
test: ["CMD-SHELL", "cqlsh -e 'DESCRIBE CLUSTER'"]
test: [CMD-SHELL, "cqlsh -e 'DESCRIBE CLUSTER'"]
interval: 30s
timeout: 10s
retries: 5

View File

@@ -4,15 +4,15 @@ x-defaults: &defaults
driver: json-file
options:
max-size: 100m
max-file: "3"
max-file: '3'
services:
clash:
<<: *defaults
image: ${GLOBAL_REGISTRY:-}centralx/clash:${CLASH_VERSION:-1.18.0}
ports:
- "7880:80"
- "7890:7890"
- '7880:80'
- '7890:7890'
volumes:
- ./config.yaml:/home/runner/.config/clash/config.yaml
environment:
@@ -26,7 +26,7 @@ services:
cpus: ${CLASH_CPU_RESERVATION:-0.25}
memory: ${CLASH_MEMORY_RESERVATION:-256M}
healthcheck:
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:80/"]
test: [CMD, wget, --no-verbose, --tries=1, --spider, 'http://localhost:80/']
interval: 30s
timeout: 10s
retries: 3

View File

@@ -4,7 +4,7 @@ x-defaults: &defaults
driver: json-file
options:
max-size: 100m
max-file: "3"
max-file: '3'
services:
clickhouse:
@@ -24,10 +24,10 @@ services:
# - ./config.xml:/etc/clickhouse-server/config.d/config.xml
# - ./users.xml:/etc/clickhouse-server/users.d/users.xml
ports:
- "${CLICKHOUSE_HTTP_PORT_OVERRIDE:-8123}:8123"
- "${CLICKHOUSE_NATIVE_PORT_OVERRIDE:-9000}:9000"
- "${CLICKHOUSE_MYSQL_PORT_OVERRIDE:-9004}:9004"
- "${CLICKHOUSE_POSTGRES_PORT_OVERRIDE:-9005}:9005"
- '${CLICKHOUSE_HTTP_PORT_OVERRIDE:-8123}:8123'
- '${CLICKHOUSE_NATIVE_PORT_OVERRIDE:-9000}:9000'
- '${CLICKHOUSE_MYSQL_PORT_OVERRIDE:-9004}:9004'
- '${CLICKHOUSE_POSTGRES_PORT_OVERRIDE:-9005}:9005'
ulimits:
nofile:
soft: 262144
@@ -42,10 +42,9 @@ services:
memory: ${CLICKHOUSE_MEMORY_RESERVATION:-1G}
healthcheck:
test:
[
"CMD-SHELL",
"wget --no-verbose --tries=1 --spider http://localhost:8123/ping || exit 1",
]
- CMD-SHELL
- 'wget --no-verbose --tries=1 --spider http://localhost:8123/ping || exit 1'
interval: 30s
timeout: 10s
retries: 3

View File

@@ -29,7 +29,7 @@ x-defaults: &defaults
driver: json-file
options:
max-size: 100m
max-file: "3"
max-file: '3'
services:
conductor-server:
@@ -40,8 +40,8 @@ services:
dockerfile: Dockerfile
container_name: conductor-server
ports:
- "${CONDUCTOR_SERVER_PORT_OVERRIDE:-8080}:8080"
- "${CONDUCTOR_UI_PORT_OVERRIDE:-5000}:5000"
- '${CONDUCTOR_SERVER_PORT_OVERRIDE:-8080}:8080'
- '${CONDUCTOR_UI_PORT_OVERRIDE:-5000}:5000'
environment:
# Database configuration
- spring.datasource.url=jdbc:postgresql://postgres:5432/${POSTGRES_DB}
@@ -67,14 +67,13 @@ services:
condition: service_healthy
healthcheck:
test:
[
"CMD",
"wget",
"--quiet",
"--tries=1",
"--spider",
"http://localhost:8080/health",
]
- CMD
- wget
- --quiet
- --tries=1
- --spider
- 'http://localhost:8080/health'
interval: 30s
timeout: 10s
retries: 5
@@ -82,11 +81,11 @@ services:
deploy:
resources:
limits:
cpus: "${CONDUCTOR_CPU_LIMIT:-2.0}"
memory: "${CONDUCTOR_MEMORY_LIMIT:-2G}"
cpus: '${CONDUCTOR_CPU_LIMIT:-2.0}'
memory: '${CONDUCTOR_MEMORY_LIMIT:-2G}'
reservations:
cpus: "${CONDUCTOR_CPU_RESERVATION:-0.5}"
memory: "${CONDUCTOR_MEMORY_RESERVATION:-512M}"
cpus: '${CONDUCTOR_CPU_RESERVATION:-0.5}'
memory: '${CONDUCTOR_MEMORY_RESERVATION:-512M}'
postgres:
<<: *defaults
@@ -102,21 +101,20 @@ services:
- postgres_data:/var/lib/postgresql/data
healthcheck:
test:
[
"CMD-SHELL",
"pg_isready -U ${POSTGRES_USER:-conductor} -d ${POSTGRES_DB:-conductor}",
]
- CMD-SHELL
- 'pg_isready -U ${POSTGRES_USER:-conductor} -d ${POSTGRES_DB:-conductor}'
interval: 10s
timeout: 5s
retries: 5
deploy:
resources:
limits:
cpus: "${POSTGRES_CPU_LIMIT:-1.0}"
memory: "${POSTGRES_MEMORY_LIMIT:-1G}"
cpus: '${POSTGRES_CPU_LIMIT:-1.0}'
memory: '${POSTGRES_MEMORY_LIMIT:-1G}'
reservations:
cpus: "${POSTGRES_CPU_RESERVATION:-0.25}"
memory: "${POSTGRES_MEMORY_RESERVATION:-256M}"
cpus: '${POSTGRES_CPU_RESERVATION:-0.25}'
memory: '${POSTGRES_MEMORY_RESERVATION:-256M}'
elasticsearch:
<<: *defaults
@@ -131,7 +129,7 @@ services:
- elasticsearch_data:/usr/share/elasticsearch/data
healthcheck:
test:
["CMD-SHELL", "curl -f http://localhost:9200/_cluster/health || exit 1"]
[CMD-SHELL, 'curl -f http://localhost:9200/_cluster/health || exit 1']
interval: 30s
timeout: 10s
retries: 5
@@ -139,11 +137,11 @@ services:
deploy:
resources:
limits:
cpus: "${ELASTICSEARCH_CPU_LIMIT:-2.0}"
memory: "${ELASTICSEARCH_MEMORY_LIMIT:-2G}"
cpus: '${ELASTICSEARCH_CPU_LIMIT:-2.0}'
memory: '${ELASTICSEARCH_MEMORY_LIMIT:-2G}'
reservations:
cpus: "${ELASTICSEARCH_CPU_RESERVATION:-0.5}"
memory: "${ELASTICSEARCH_MEMORY_RESERVATION:-1G}"
cpus: '${ELASTICSEARCH_CPU_RESERVATION:-0.5}'
memory: '${ELASTICSEARCH_MEMORY_RESERVATION:-1G}'
volumes:
postgres_data:

View File

@@ -4,18 +4,18 @@ x-defaults: &defaults
driver: json-file
options:
max-size: 100m
max-file: "3"
max-file: '3'
services:
consul:
<<: *defaults
image: ${GLOBAL_REGISTRY:-}consul:${CONSUL_VERSION:-1.20.3}
ports:
- "${CONSUL_HTTP_PORT_OVERRIDE:-8500}:8500"
- "${CONSUL_DNS_PORT_OVERRIDE:-8600}:8600/udp"
- "${CONSUL_SERF_LAN_PORT_OVERRIDE:-8301}:8301"
- "${CONSUL_SERF_WAN_PORT_OVERRIDE:-8302}:8302"
- "${CONSUL_SERVER_RPC_PORT_OVERRIDE:-8300}:8300"
- '${CONSUL_HTTP_PORT_OVERRIDE:-8500}:8500'
- '${CONSUL_DNS_PORT_OVERRIDE:-8600}:8600/udp'
- '${CONSUL_SERF_LAN_PORT_OVERRIDE:-8301}:8301'
- '${CONSUL_SERF_WAN_PORT_OVERRIDE:-8302}:8302'
- '${CONSUL_SERVER_RPC_PORT_OVERRIDE:-8300}:8300'
volumes:
- consul_data:/consul/data
- consul_config:/consul/config
@@ -44,7 +44,7 @@ services:
cpus: ${CONSUL_CPU_RESERVATION:-0.25}
memory: ${CONSUL_MEMORY_RESERVATION:-128M}
healthcheck:
test: ["CMD-SHELL", "consul members"]
test: [CMD-SHELL, consul members]
interval: 30s
timeout: 10s
retries: 3

View File

@@ -4,7 +4,7 @@ x-defaults: &defaults
driver: json-file
options:
max-size: 100m
max-file: "3"
max-file: '3'
services:
dnsmasq:
@@ -13,8 +13,8 @@ services:
volumes:
- ./dnsmasq.conf:/etc/dnsmasq.conf:ro
ports:
- "${DNSMASQ_DNS_PORT_OVERRIDE:-53}:53/udp"
- "${DNSMASQ_DNS_PORT_OVERRIDE:-53}:53/tcp"
- '${DNSMASQ_DNS_PORT_OVERRIDE:-53}:53/udp'
- '${DNSMASQ_DNS_PORT_OVERRIDE:-53}:53/tcp'
environment:
- TZ=${TZ:-UTC}
cap_drop:
@@ -23,7 +23,7 @@ services:
- NET_ADMIN
- NET_BIND_SERVICE
healthcheck:
test: ["CMD", "nslookup", "-timeout=1", "localhost", "127.0.0.1"]
test: [CMD, nslookup, -timeout=1, localhost, 127.0.0.1]
interval: 30s
timeout: 10s
retries: 3

View File

@@ -4,15 +4,15 @@ x-defaults: &defaults
driver: json-file
options:
max-size: 100m
max-file: "3"
max-file: '3'
services:
docker_android:
<<: *defaults
image: ${GLOBAL_REGISTRY:-}halimqarroum/docker-android:${DOCKER_ANDROID_VERSION:-api-33}
ports:
- "${DOCKER_ANDROID_ADB_PORT_OVERRIDE:-5555}:5555"
- "${DOCKER_ANDROID_CONSOLE_PORT_OVERRIDE:-5554}:5554"
- '${DOCKER_ANDROID_ADB_PORT_OVERRIDE:-5555}:5555'
- '${DOCKER_ANDROID_CONSOLE_PORT_OVERRIDE:-5554}:5554'
volumes:
- docker_android_data:/data
- ${DOCKER_ANDROID_KEYS_DIR:-./keys}:/keys:ro
@@ -24,9 +24,9 @@ services:
- MEMORY=${DOCKER_ANDROID_MEMORY:-8192}
- CORES=${DOCKER_ANDROID_CORES:-4}
devices:
- "${DOCKER_ANDROID_KVM_DEVICE:-/dev/kvm}:/dev/kvm"
- '${DOCKER_ANDROID_KVM_DEVICE:-/dev/kvm}:/dev/kvm'
healthcheck:
test: ["CMD-SHELL", "adb devices 2>/dev/null | grep -q emulator"]
test: [CMD-SHELL, 'adb devices 2>/dev/null | grep -q emulator']
interval: 30s
timeout: 10s
retries: 5
@@ -46,8 +46,8 @@ services:
- gpu
image: ${GLOBAL_REGISTRY:-}halimqarroum/docker-android:${DOCKER_ANDROID_GPU_VERSION:-api-33-cuda}
ports:
- "${DOCKER_ANDROID_ADB_PORT_OVERRIDE:-5555}:5555"
- "${DOCKER_ANDROID_CONSOLE_PORT_OVERRIDE:-5554}:5554"
- '${DOCKER_ANDROID_ADB_PORT_OVERRIDE:-5555}:5555'
- '${DOCKER_ANDROID_CONSOLE_PORT_OVERRIDE:-5554}:5554'
volumes:
- docker_android_data:/data
- ${DOCKER_ANDROID_KEYS_DIR:-./keys}:/keys:ro
@@ -59,9 +59,9 @@ services:
- MEMORY=${DOCKER_ANDROID_MEMORY:-8192}
- CORES=${DOCKER_ANDROID_CORES:-4}
devices:
- "${DOCKER_ANDROID_KVM_DEVICE:-/dev/kvm}:/dev/kvm"
- '${DOCKER_ANDROID_KVM_DEVICE:-/dev/kvm}:/dev/kvm'
healthcheck:
test: ["CMD-SHELL", "adb devices 2>/dev/null | grep -q emulator"]
test: [CMD-SHELL, 'adb devices 2>/dev/null | grep -q emulator']
interval: 30s
timeout: 10s
retries: 5

View File

@@ -4,7 +4,7 @@ x-defaults: &defaults
driver: json-file
options:
max-size: 100m
max-file: "3"
max-file: '3'
services:
registry:
@@ -33,7 +33,7 @@ services:
cpus: ${REGISTRY_CPU_RESERVATION:-0.1}
memory: ${REGISTRY_MEMORY_RESERVATION:-128M}
healthcheck:
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:5000/"]
test: [CMD, wget, --no-verbose, --tries=1, --spider, 'http://localhost:5000/']
interval: 30s
timeout: 10s
retries: 3

View File

@@ -4,14 +4,14 @@ x-defaults: &defaults
driver: json-file
options:
max-size: 100m
max-file: "3"
max-file: '3'
services:
dockge:
<<: *defaults
image: ${GLOBAL_REGISTRY:-}louislam/dockge:${DOCKGE_VERSION:-1}
ports:
- "${PORT_OVERRIDE:-5001}:5001"
- '${PORT_OVERRIDE:-5001}:5001'
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- dockge_data:/app/data
@@ -30,7 +30,7 @@ services:
cpus: ${DOCKGE_CPU_RESERVATION:-0.25}
memory: ${DOCKGE_MEMORY_RESERVATION:-256M}
healthcheck:
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:5001/"]
test: [CMD, wget, --no-verbose, --tries=1, --spider, 'http://localhost:5001/']
interval: 30s
timeout: 10s
retries: 3

View File

@@ -8,7 +8,7 @@ x-defaults: &defaults
driver: json-file
options:
max-size: 100m
max-file: "3"
max-file: '3'
services:
# Doris Frontend (FE) node
@@ -17,10 +17,10 @@ services:
image: ${GLOBAL_REGISTRY:-}apache/doris:${DORIS_VERSION:-3.0.0}-fe-x86_64
hostname: doris-fe
ports:
- "${DORIS_FE_QUERY_PORT_OVERRIDE:-9030}:9030"
- "${DORIS_FE_HTTP_PORT_OVERRIDE:-8030}:8030"
- "${DORIS_FE_RPC_PORT_OVERRIDE:-9020}:9020"
- "${DORIS_FE_EDIT_LOG_PORT_OVERRIDE:-9010}:9010"
- '${DORIS_FE_QUERY_PORT_OVERRIDE:-9030}:9030'
- '${DORIS_FE_HTTP_PORT_OVERRIDE:-8030}:8030'
- '${DORIS_FE_RPC_PORT_OVERRIDE:-9020}:9020'
- '${DORIS_FE_EDIT_LOG_PORT_OVERRIDE:-9010}:9010'
volumes:
- doris_fe_data:/opt/apache-doris/fe/doris-meta
environment:
@@ -28,7 +28,7 @@ services:
- FE_SERVERS=doris-fe:9010
- FRONTEND_REPLICAS=1
healthcheck:
test: ["CMD-SHELL", "curl -sf http://localhost:8030/api/v2/system/info || exit 1"]
test: [CMD-SHELL, 'curl -sf http://localhost:8030/api/v2/system/info || exit 1']
interval: 30s
timeout: 10s
retries: 5
@@ -48,9 +48,9 @@ services:
image: ${GLOBAL_REGISTRY:-}apache/doris:${DORIS_VERSION:-3.0.0}-be-x86_64
hostname: doris-be
ports:
- "${DORIS_BE_HEARTBEAT_PORT_OVERRIDE:-9050}:9050"
- "${DORIS_BE_THRIFT_RPC_PORT_OVERRIDE:-9060}:9060"
- "${DORIS_BE_HTTP_PORT_OVERRIDE:-8040}:8040"
- '${DORIS_BE_HEARTBEAT_PORT_OVERRIDE:-9050}:9050'
- '${DORIS_BE_THRIFT_RPC_PORT_OVERRIDE:-9060}:9060'
- '${DORIS_BE_HTTP_PORT_OVERRIDE:-8040}:8040'
volumes:
- doris_be_data:/opt/apache-doris/be/storage
environment:
@@ -60,7 +60,7 @@ services:
doris-fe:
condition: service_healthy
healthcheck:
test: ["CMD-SHELL", "curl -sf http://localhost:8040/api/health || exit 1"]
test: [CMD-SHELL, 'curl -sf http://localhost:8040/api/health || exit 1']
interval: 30s
timeout: 10s
retries: 5

View File

@@ -4,13 +4,13 @@ x-defaults: &defaults
driver: json-file
options:
max-size: 100m
max-file: "3"
max-file: '3'
services:
duckdb:
<<: *defaults
image: ${GLOBAL_REGISTRY:-}davidgasquez/duckdb:${DUCKDB_VERSION:-v1.1.3}
command: ["duckdb", "/data/duckdb.db"]
command: [duckdb, /data/duckdb.db]
stdin_open: true
tty: true
environment:
@@ -28,7 +28,7 @@ services:
cpus: ${DUCKDB_CPU_RESERVATION:-0.5}
memory: ${DUCKDB_MEMORY_RESERVATION:-512M}
healthcheck:
test: ["CMD-SHELL", "duckdb /data/duckdb.db -c 'SELECT 1' || exit 1"]
test: [CMD-SHELL, "duckdb /data/duckdb.db -c 'SELECT 1' || exit 1"]
interval: 30s
timeout: 10s
retries: 3

View File

@@ -4,15 +4,15 @@ x-defaults: &defaults
driver: json-file
options:
max-size: 100m
max-file: "3"
max-file: '3'
services:
elasticsearch:
<<: *defaults
image: docker.elastic.co/elasticsearch/elasticsearch-wolfi:${ELASTICSEARCH_VERSION:-9.3.0}
ports:
- "${ELASTICSEARCH_HTTP_PORT_OVERRIDE:-9200}:9200"
- "${ELASTICSEARCH_TRANSPORT_PORT_OVERRIDE:-9300}:9300"
- '${ELASTICSEARCH_HTTP_PORT_OVERRIDE:-9200}:9200'
- '${ELASTICSEARCH_TRANSPORT_PORT_OVERRIDE:-9300}:9300'
volumes:
- elasticsearch_data:/usr/share/elasticsearch/data
- elasticsearch_logs:/usr/share/elasticsearch/logs
@@ -28,7 +28,7 @@ services:
- xpack.security.enabled=${ELASTICSEARCH_SECURITY_ENABLED:-false}
- xpack.security.http.ssl.enabled=${ELASTICSEARCH_SSL_ENABLED:-false}
- xpack.security.transport.ssl.enabled=${ELASTICSEARCH_SSL_ENABLED:-false}
- "ES_JAVA_OPTS=-Xms${ELASTICSEARCH_HEAP_SIZE:-1g} -Xmx${ELASTICSEARCH_HEAP_SIZE:-1g}"
- 'ES_JAVA_OPTS=-Xms${ELASTICSEARCH_HEAP_SIZE:-1g} -Xmx${ELASTICSEARCH_HEAP_SIZE:-1g}'
ulimits:
memlock:
soft: -1
@@ -43,7 +43,7 @@ services:
memory: ${ELASTICSEARCH_MEMORY_RESERVATION:-1G}
healthcheck:
test:
["CMD-SHELL", "curl -f http://localhost:9200/_cluster/health || exit 1"]
[CMD-SHELL, 'curl -f http://localhost:9200/_cluster/health || exit 1']
interval: 30s
timeout: 10s
retries: 5

View File

@@ -4,15 +4,15 @@ x-defaults: &defaults
driver: json-file
options:
max-size: 100m
max-file: "3"
max-file: '3'
services:
etcd:
<<: *defaults
image: quay.io/coreos/etcd:${ETCD_VERSION:-v3.6.0}
ports:
- "${ETCD_CLIENT_PORT_OVERRIDE:-2379}:2379"
- "${ETCD_PEER_PORT_OVERRIDE:-2380}:2380"
- '${ETCD_CLIENT_PORT_OVERRIDE:-2379}:2379'
- '${ETCD_PEER_PORT_OVERRIDE:-2380}:2380'
volumes:
- etcd_data:/etcd-data
environment:
@@ -56,7 +56,7 @@ services:
cpus: ${ETCD_CPU_RESERVATION:-0.25}
memory: ${ETCD_MEMORY_RESERVATION:-256M}
healthcheck:
test: ["CMD", "etcdctl", "endpoint", "health"]
test: [CMD, etcdctl, endpoint, health]
interval: 30s
timeout: 10s
retries: 3

View File

@@ -4,21 +4,21 @@ x-defaults: &defaults
driver: json-file
options:
max-size: 100m
max-file: "3"
max-file: '3'
services:
falkordb:
<<: *defaults
image: ${GLOBAL_REGISTRY:-}falkordb/falkordb:${FALKORDB_VERSION:-v4.14.11}
ports:
- "${FALKORDB_PORT_OVERRIDE:-6379}:6379"
- "${FALKORDB_BROWSER_PORT_OVERRIDE:-3000}:3000"
- '${FALKORDB_PORT_OVERRIDE:-6379}:6379'
- '${FALKORDB_BROWSER_PORT_OVERRIDE:-3000}:3000'
volumes:
- falkordb_data:/data
environment:
- TZ=${TZ:-UTC}
healthcheck:
test: ["CMD", "redis-cli", "ping"]
test: [CMD, redis-cli, ping]
interval: 30s
timeout: 10s
retries: 3

View File

@@ -4,7 +4,7 @@ x-defaults: &defaults
driver: json-file
options:
max-size: 100m
max-file: "3"
max-file: '3'
x-common-env: &common-env
REDIS_URL: ${REDIS_URL:-redis://redis:6379}
@@ -73,7 +73,7 @@ services:
nuq-postgres:
condition: service_started
ports:
- "${FIRECRAWL_PORT_OVERRIDE:-3002}:${INTERNAL_PORT:-3002}"
- '${FIRECRAWL_PORT_OVERRIDE:-3002}:${INTERNAL_PORT:-3002}'
command: node dist/src/harness.js --start-docker
deploy:
resources:
@@ -85,14 +85,13 @@ services:
memory: ${FIRECRAWL_API_MEMORY_RESERVATION:-2G}
healthcheck:
test:
[
"CMD",
"wget",
"--no-verbose",
"--tries=1",
"--spider",
"http://localhost:3002/health",
]
- CMD
- wget
- --no-verbose
- --tries=1
- --spider
- 'http://localhost:3002/health'
interval: 30s
timeout: 10s
retries: 3
@@ -113,7 +112,7 @@ services:
cpus: ${REDIS_CPU_RESERVATION:-0.25}
memory: ${REDIS_MEMORY_RESERVATION:-256M}
healthcheck:
test: ["CMD", "redis-cli", "ping"]
test: [CMD, redis-cli, ping]
interval: 10s
timeout: 3s
retries: 3
@@ -127,7 +126,7 @@ services:
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-postgres}
POSTGRES_DB: ${POSTGRES_DB:-postgres}
ports:
- "${POSTGRES_PORT_OVERRIDE:-5432}:5432"
- '${POSTGRES_PORT_OVERRIDE:-5432}:5432'
volumes:
- postgres_data:/var/lib/postgresql/data
deploy:
@@ -139,7 +138,7 @@ services:
cpus: ${NUQPOSTGRES_CPU_RESERVATION:-0.5}
memory: ${NUQPOSTGRES_MEMORY_RESERVATION:-512M}
healthcheck:
test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER:-postgres}"]
test: [CMD-SHELL, 'pg_isready -U ${POSTGRES_USER:-postgres}']
interval: 10s
timeout: 5s
retries: 5

View File

@@ -4,7 +4,7 @@ x-defaults: &defaults
driver: json-file
options:
max-size: 100m
max-file: "3"
max-file: '3'
services:
jobmanager:
@@ -24,8 +24,8 @@ services:
# Custom Flink jobs
# - ./jobs:/opt/flink/jobs
ports:
- "${FLINK_JOBMANAGER_RPC_PORT_OVERRIDE:-6123}:6123"
- "${FLINK_JOBMANAGER_UI_PORT_OVERRIDE:-8081}:8081"
- '${FLINK_JOBMANAGER_RPC_PORT_OVERRIDE:-6123}:6123'
- '${FLINK_JOBMANAGER_UI_PORT_OVERRIDE:-8081}:8081'
deploy:
resources:
limits:
@@ -35,7 +35,7 @@ services:
cpus: ${FLINK_JOBMANAGER_CPU_RESERVATION:-0.5}
memory: ${FLINK_JOBMANAGER_MEMORY_RESERVATION:-1G}
healthcheck:
test: ["CMD-SHELL", "wget --no-verbose --tries=1 --spider http://localhost:8081 || exit 1"]
test: [CMD-SHELL, 'wget --no-verbose --tries=1 --spider http://localhost:8081 || exit 1']
interval: 30s
timeout: 10s
retries: 3
@@ -68,7 +68,7 @@ services:
cpus: ${FLINK_TASKMANAGER_CPU_RESERVATION:-0.5}
memory: ${FLINK_TASKMANAGER_MEMORY_RESERVATION:-1G}
healthcheck:
test: ["CMD-SHELL", "ps aux | grep -v grep | grep -q taskmanager || exit 1"]
test: [CMD-SHELL, 'ps aux | grep -v grep | grep -q taskmanager || exit 1']
interval: 30s
timeout: 10s
retries: 3

View File

@@ -4,14 +4,14 @@ x-defaults: &defaults
driver: json-file
options:
max-size: 100m
max-file: "3"
max-file: '3'
services:
flowise:
<<: *defaults
image: ${GLOBAL_REGISTRY:-}flowiseai/flowise:${FLOWISE_VERSION:-3.0.12}
ports:
- "${FLOWISE_PORT_OVERRIDE:-3000}:3000"
- '${FLOWISE_PORT_OVERRIDE:-3000}:3000'
volumes:
- flowise_data:/root/.flowise
environment:
@@ -21,12 +21,11 @@ services:
- FLOWISE_PASSWORD=${FLOWISE_PASSWORD:-}
healthcheck:
test:
[
"CMD",
"node",
"-e",
"require('http').get('http://localhost:3000/api/v1/ping',res=>process.exit(res.statusCode===200?0:1)).on('error',()=>process.exit(1))"
]
- CMD
- node
- -e
- "require('http').get('http://localhost:3000/api/v1/ping',res=>process.exit(res.statusCode===200?0:1)).on('error',()=>process.exit(1))"
interval: 30s
timeout: 10s
retries: 5

View File

@@ -4,14 +4,14 @@ x-defaults: &defaults
driver: json-file
options:
max-size: 100m
max-file: "3"
max-file: '3'
services:
frpc:
<<: *defaults
image: ${GLOBAL_REGISTRY:-}snowdreamtech/frpc:${FRPC_VERSION:-0.65.0}
ports:
- "${FRP_ADMIN_PORT:-7400}:${FRP_ADMIN_PORT:-7400}"
- '${FRP_ADMIN_PORT:-7400}:${FRP_ADMIN_PORT:-7400}'
volumes:
- ./frpc.toml:/etc/frp/frpc.toml
environment:
@@ -25,7 +25,7 @@ services:
FRP_ADMIN_USER: ${FRP_ADMIN_USER:-admin}
FRP_ADMIN_PASSWORD: ${FRP_ADMIN_PASSWORD:-password}
extra_hosts:
- "host.docker.internal:host-gateway"
- 'host.docker.internal:host-gateway'
deploy:
resources:
limits:
@@ -36,12 +36,11 @@ services:
memory: ${FRPC_MEMORY_RESERVATION:-64M}
healthcheck:
test:
[
"CMD",
"sh",
"-c",
"curl -f http://$${FRP_ADMIN_USER}:$${FRP_ADMIN_PASSWORD}@localhost:$${FRP_ADMIN_PORT}/api/status || exit 1",
]
- CMD
- sh
- -c
- 'curl -f http://$${FRP_ADMIN_USER}:$${FRP_ADMIN_PASSWORD}@localhost:$${FRP_ADMIN_PORT}/api/status || exit 1'
interval: 30s
timeout: 10s
retries: 3

View File

@@ -4,7 +4,7 @@ x-defaults: &defaults
driver: json-file
options:
max-size: 100m
max-file: "3"
max-file: '3'
services:
frps:
@@ -26,7 +26,7 @@ services:
FRP_ADMIN_USER: ${FRP_ADMIN_USER:-admin}
FRP_ADMIN_PASSWORD: ${FRP_ADMIN_PASSWORD:-password}
extra_hosts:
- "host.docker.internal:host-gateway"
- 'host.docker.internal:host-gateway'
deploy:
resources:
limits:
@@ -37,12 +37,11 @@ services:
memory: ${FRPS_MEMORY_RESERVATION:-64M}
healthcheck:
test:
[
"CMD",
"sh",
"-c",
"curl -f http://$${FRP_ADMIN_USER}:$${FRP_ADMIN_PASSWORD}@localhost:$${FRP_ADMIN_PORT}/api/serverinfo || exit 1",
]
- CMD
- sh
- -c
- 'curl -f http://$${FRP_ADMIN_USER}:$${FRP_ADMIN_PASSWORD}@localhost:$${FRP_ADMIN_PORT}/api/serverinfo || exit 1'
interval: 30s
timeout: 10s
retries: 3

View File

@@ -17,8 +17,8 @@ This service sets up a Gitea Runner.
```yaml
cache:
enabled: true
dir: ""
host: "192.168.8.17"
dir: ''
host: 192.168.8.17
port: 8088
```

View File

@@ -17,8 +17,8 @@
```yaml
cache:
enabled: true
dir: ""
host: "192.168.8.17"
dir: ''
host: 192.168.8.17
port: 8088
```

View File

@@ -36,40 +36,40 @@ runner:
# It works when something like `uses: actions/checkout@v4` is used and DEFAULT_ACTIONS_URL is set to github,
# and github_mirror is not empty. In this case,
# it replaces https://github.com with the value here, which is useful for some special network environments.
github_mirror: ""
github_mirror: ''
# The labels of a runner are used to determine which jobs the runner can run, and how to run them.
# Like: "macos-arm64:host" or "ubuntu-latest:docker://docker.gitea.com/runner-images:ubuntu-latest"
# Find more images provided by Gitea at https://gitea.com/docker.gitea.com/runner-images .
# If it's empty when registering, it will ask for inputting labels.
# If it's empty when execute `daemon`, will use labels in `.runner` file.
labels:
- "ubuntu-latest:docker://docker.gitea.com/runner-images:ubuntu-latest"
- "ubuntu-22.04:docker://docker.gitea.com/runner-images:ubuntu-22.04"
- "ubuntu-20.04:docker://docker.gitea.com/runner-images:ubuntu-20.04"
- 'ubuntu-latest:docker://docker.gitea.com/runner-images:ubuntu-latest'
- 'ubuntu-22.04:docker://docker.gitea.com/runner-images:ubuntu-22.04'
- 'ubuntu-20.04:docker://docker.gitea.com/runner-images:ubuntu-20.04'
cache:
# Enable cache server to use actions/cache.
enabled: true
# The directory to store the cache data.
# If it's empty, the cache data will be stored in $HOME/.cache/actcache.
dir: ""
dir: ''
# The host of the cache server.
# It's not for the address to listen, but the address to connect from job containers.
# So 0.0.0.0 is a bad choice, leave it empty to detect automatically.
host: ""
host: ''
# The port of the cache server.
# 0 means to use a random available port.
port: 0
# The external cache server URL. Valid only when enable is true.
# If it's specified, act_runner will use this URL as the ACTIONS_CACHE_URL rather than start a server by itself.
# The URL should generally end with "/".
external_server: ""
external_server: ''
container:
# Specifies the network to which the container will connect.
# Could be host, bridge or the name of a custom network.
# If it's empty, act_runner will create a network automatically.
network: ""
network: ''
# Whether to use privileged mode or not when launching task containers (privileged mode is required for Docker-in-Docker).
privileged: false
# And other options to be used when the container is started (eg, --add-host=my.gitea.url:host-gateway).
@@ -94,7 +94,7 @@ container:
# If it's empty, act_runner will find an available docker host automatically.
# If it's "-", act_runner will find an available docker host automatically, but the docker host won't be mounted to the job containers and service containers.
# If it's not empty or "-", the specified docker host will be used. An error will be returned if it doesn't work.
docker_host: ""
docker_host: ''
# Pull docker image(s) even if already present
force_pull: true
# Rebuild docker image(s) even if already present

View File

@@ -4,7 +4,7 @@ x-defaults: &defaults
driver: json-file
options:
max-size: 100m
max-file: "3"
max-file: '3'
services:
gitea_runner:

View File

@@ -4,7 +4,7 @@ x-defaults: &defaults
driver: json-file
options:
max-size: 100m
max-file: "3"
max-file: '3'
services:
gitea:
@@ -23,8 +23,8 @@ services:
- gitea_data:/var/lib/gitea
- gitea_config:/etc/gitea
ports:
- "${GITEA_HTTP_PORT:-3000}:3000"
- "${GITEA_SSH_PORT:-2222}:2222"
- '${GITEA_HTTP_PORT:-3000}:3000'
- '${GITEA_SSH_PORT:-2222}:2222'
depends_on:
db:
condition: service_healthy
@@ -38,14 +38,13 @@ services:
memory: ${GITEA_MEMORY_RESERVATION:-512M}
healthcheck:
test:
[
"CMD",
"wget",
"--no-verbose",
"--tries=1",
"--spider",
"http://localhost:3000/",
]
- CMD
- wget
- --no-verbose
- --tries=1
- --spider
- 'http://localhost:3000/'
interval: 30s
timeout: 10s
retries: 3
@@ -72,7 +71,7 @@ services:
cpus: ${GITEA_DB_CPU_RESERVATION:-0.5}
memory: ${GITEA_DB_MEMORY_RESERVATION:-512M}
healthcheck:
test: ["CMD-SHELL", "pg_isready -U $$POSTGRES_USER -d $$POSTGRES_DB"]
test: [CMD-SHELL, pg_isready -U $$POSTGRES_USER -d $$POSTGRES_DB]
interval: 30s
timeout: 10s
retries: 3

View File

@@ -25,24 +25,23 @@ You can edit the `config.toml` file to modify the configuration:
```toml
[[runners]]
[runners.docker]
tls_verify = false
pull_policy = "if-not-present"
image = "local/docker:1.0"
privileged = true
disable_entrypoint_overwrite = false
oom_kill_disable = false
disable_cache = false
volumes = [
"/var/run/docker.sock:/var/run/docker.sock",
"/cache",
"/builds:/builds"
]
extra_hosts = [
"host.docker.internal:host-gateway",
]
shm_size = 0
network_mtu = 0
[runners.docker]
tls_verify = false
pull_policy = "if-not-present"
image = "local/docker:1.0"
privileged = true
disable_entrypoint_overwrite = false
oom_kill_disable = false
disable_cache = false
volumes = [
"/var/run/docker.sock:/var/run/docker.sock",
"/cache",
"/builds:/builds"
]
extra_hosts = [ "host.docker.internal:host-gateway", ]
shm_size = 0
network_mtu = 0
```
## Services

View File

@@ -25,24 +25,23 @@
```toml
[[runners]]
[runners.docker]
tls_verify = false
pull_policy = "if-not-present"
image = "local/docker:1.0"
privileged = true
disable_entrypoint_overwrite = false
oom_kill_disable = false
disable_cache = false
volumes = [
"/var/run/docker.sock:/var/run/docker.sock",
"/cache",
"/builds:/builds"
]
extra_hosts = [
"host.docker.internal:host-gateway",
]
shm_size = 0
network_mtu = 0
[runners.docker]
tls_verify = false
pull_policy = "if-not-present"
image = "local/docker:1.0"
privileged = true
disable_entrypoint_overwrite = false
oom_kill_disable = false
disable_cache = false
volumes = [
"/var/run/docker.sock:/var/run/docker.sock",
"/cache",
"/builds:/builds"
]
extra_hosts = [ "host.docker.internal:host-gateway", ]
shm_size = 0
network_mtu = 0
```
## 服务

View File

@@ -4,7 +4,7 @@ x-defaults: &defaults
driver: json-file
options:
max-size: 100m
max-file: "3"
max-file: '3'
services:
gitlab-runner:

View File

@@ -4,16 +4,16 @@ x-defaults: &defaults
driver: json-file
options:
max-size: 100m
max-file: "3"
max-file: '3'
services:
gitlab:
<<: *defaults
image: ${GLOBAL_REGISTRY:-}gitlab/gitlab-ce:${GITLAB_VERSION:-18.8.3-ce.0}
ports:
- "${GITLAB_PORT_OVERRIDE_HTTPS:-5443}:443"
- "${GITLAB_PORT_OVERRIDE_HTTP:-5080}:80"
- "${GITLAB_PORT_OVERRIDE_SSH:-5022}:22"
- '${GITLAB_PORT_OVERRIDE_HTTPS:-5443}:443'
- '${GITLAB_PORT_OVERRIDE_HTTP:-5080}:80'
- '${GITLAB_PORT_OVERRIDE_SSH:-5022}:22'
volumes:
- ./config:/etc/gitlab
- gitlab_logs:/var/log/gitlab
@@ -30,7 +30,7 @@ services:
cpus: ${GITLAB_CPU_RESERVATION:-1.0}
memory: ${GITLAB_MEMORY_RESERVATION:-4G}
healthcheck:
test: ["CMD", "/opt/gitlab/bin/gitlab-healthcheck", "--fail"]
test: [CMD, /opt/gitlab/bin/gitlab-healthcheck, --fail]
interval: 60s
timeout: 30s
retries: 5

View File

@@ -50,8 +50,8 @@ deploy:
reservations:
devices:
- driver: nvidia
device_ids: [ '0' ]
capabilities: [ gpu ]
device_ids: ['0']
capabilities: [gpu]
```
### Requirements
@@ -78,8 +78,8 @@ To use AMD GPUs with ROCm support:
reservations:
devices:
- driver: amdgpu
device_ids: [ '0' ]
capabilities: [ gpu ]
device_ids: ['0']
capabilities: [gpu]
```
## Usage

View File

@@ -50,8 +50,8 @@ deploy:
reservations:
devices:
- driver: nvidia
device_ids: [ '0' ]
capabilities: [ gpu ]
device_ids: ['0']
capabilities: [gpu]
```
### 要求
@@ -78,8 +78,8 @@ deploy:
reservations:
devices:
- driver: amdgpu
device_ids: [ '0' ]
capabilities: [ gpu ]
device_ids: ['0']
capabilities: [gpu]
```
## 使用方法

View File

@@ -4,14 +4,14 @@ x-defaults: &defaults
driver: json-file
options:
max-size: 100m
max-file: "3"
max-file: '3'
services:
gpustack:
<<: *defaults
image: ${GLOBAL_REGISTRY:-}gpustack/gpustack:${GPUSTACK_VERSION:-v0.7.1}
ports:
- "${GPUSTACK_PORT_OVERRIDE:-80}:80"
- '${GPUSTACK_PORT_OVERRIDE:-80}:80'
volumes:
- gpustack_data:/var/lib/gpustack
environment:
@@ -33,10 +33,10 @@ services:
memory: ${GPUSTACK_MEMORY_RESERVATION:-4G}
devices:
- driver: nvidia
device_ids: [ '0' ]
capabilities: [ gpu ]
device_ids: ['0']
capabilities: [gpu]
healthcheck:
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:80/health"]
test: [CMD, wget, --no-verbose, --tries=1, --spider, 'http://localhost:80/health']
interval: 30s
timeout: 10s
retries: 3

View File

@@ -4,14 +4,14 @@ x-defaults: &defaults
driver: json-file
options:
max-size: 100m
max-file: "3"
max-file: '3'
services:
grafana:
<<: *defaults
image: ${GLOBAL_REGISTRY:-}grafana/grafana:${GRAFANA_VERSION:-12.3.2}
ports:
- "${GRAFANA_PORT_OVERRIDE:-3000}:3000"
- '${GRAFANA_PORT_OVERRIDE:-3000}:3000'
volumes:
- grafana_data:/var/lib/grafana
- grafana_logs:/var/log/grafana
@@ -27,7 +27,7 @@ services:
- GF_INSTALL_PLUGINS=${GRAFANA_PLUGINS:-}
- GF_SERVER_ROOT_URL=${GRAFANA_ROOT_URL:-http://localhost:3000}
- GF_SECURITY_SECRET_KEY=${GRAFANA_SECRET_KEY:-}
user: "472:472" # Grafana user
user: '472:472' # Grafana user
deploy:
resources:
limits:
@@ -38,14 +38,13 @@ services:
memory: ${GRAFANA_MEMORY_RESERVATION:-256M}
healthcheck:
test:
[
"CMD",
"wget",
"--no-verbose",
"--tries=1",
"--spider",
"http://localhost:3000/api/health",
]
- CMD
- wget
- --no-verbose
- --tries=1
- --spider
- 'http://localhost:3000/api/health'
interval: 30s
timeout: 10s
retries: 3

View File

@@ -4,14 +4,14 @@ x-defaults: &defaults
driver: json-file
options:
max-size: 100m
max-file: "3"
max-file: '3'
services:
halo:
<<: *defaults
image: ${GLOBAL_REGISTRY:-}halohub/halo:${HALO_VERSION:-2.21.9}
ports:
- "${HALO_PORT:-8090}:8090"
- '${HALO_PORT:-8090}:8090'
volumes:
- halo_data:/root/.halo2
environment:
@@ -35,7 +35,7 @@ services:
cpus: ${HALO_CPU_RESERVATION:-0.5}
memory: ${HALO_MEMORY_RESERVATION:-512M}
healthcheck:
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:8090/actuator/health"]
test: [CMD, wget, --no-verbose, --tries=1, --spider, 'http://localhost:8090/actuator/health']
interval: 30s
timeout: 10s
retries: 3
@@ -53,7 +53,7 @@ services:
volumes:
- halo_db_data:/var/lib/postgresql/data
healthcheck:
test: ["CMD-SHELL", "pg_isready"]
test: [CMD-SHELL, pg_isready]
interval: 10s
timeout: 5s
retries: 5

View File

@@ -4,7 +4,7 @@ x-defaults: &defaults
driver: json-file
options:
max-size: 100m
max-file: "3"
max-file: '3'
services:
# Harbor Core
@@ -47,7 +47,7 @@ services:
cpus: ${HARBOR_CORE_CPU_RESERVATION:-0.5}
memory: ${HARBOR_CORE_MEMORY_RESERVATION:-1G}
healthcheck:
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:8080/api/v2.0/ping"]
test: [CMD, wget, --no-verbose, --tries=1, --spider, 'http://localhost:8080/api/v2.0/ping']
interval: 30s
timeout: 10s
retries: 3
@@ -102,7 +102,7 @@ services:
cpus: ${HARBOR_REGISTRY_CPU_RESERVATION:-0.25}
memory: ${HARBOR_REGISTRY_MEMORY_RESERVATION:-512M}
healthcheck:
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:5000/"]
test: [CMD, wget, --no-verbose, --tries=1, --spider, 'http://localhost:5000/']
interval: 30s
timeout: 10s
retries: 3
@@ -128,8 +128,8 @@ services:
<<: *defaults
image: ${GLOBAL_REGISTRY:-}goharbor/nginx-photon:${HARBOR_VERSION:-v2.12.0}
ports:
- "${HARBOR_HTTP_PORT_OVERRIDE:-80}:8080"
- "${HARBOR_HTTPS_PORT_OVERRIDE:-443}:8443"
- '${HARBOR_HTTP_PORT_OVERRIDE:-80}:8080'
- '${HARBOR_HTTPS_PORT_OVERRIDE:-443}:8443'
depends_on:
harbor-core:
condition: service_healthy
@@ -148,7 +148,7 @@ services:
cpus: ${HARBOR_PROXY_CPU_RESERVATION:-0.25}
memory: ${HARBOR_PROXY_MEMORY_RESERVATION:-256M}
healthcheck:
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:8080/"]
test: [CMD, wget, --no-verbose, --tries=1, --spider, 'http://localhost:8080/']
interval: 30s
timeout: 10s
retries: 3
@@ -173,7 +173,7 @@ services:
cpus: ${HARBOR_DB_CPU_RESERVATION:-0.25}
memory: ${HARBOR_DB_MEMORY_RESERVATION:-256M}
healthcheck:
test: ["CMD-SHELL", "pg_isready -U postgres"]
test: [CMD-SHELL, pg_isready -U postgres]
interval: 30s
timeout: 10s
retries: 3
@@ -196,7 +196,7 @@ services:
cpus: ${HARBOR_REDIS_CPU_RESERVATION:-0.10}
memory: ${HARBOR_REDIS_MEMORY_RESERVATION:-64M}
healthcheck:
test: ["CMD", "redis-cli", "ping"]
test: [CMD, redis-cli, ping]
interval: 10s
timeout: 3s
retries: 3

View File

@@ -4,7 +4,7 @@ x-defaults: &defaults
driver: json-file
options:
max-size: 100m
max-file: "3"
max-file: '3'
services:
hbase:
@@ -17,11 +17,11 @@ services:
- hbase_data:/hbase-data
- hbase_zookeeper_data:/zookeeper-data
ports:
- "${HBASE_MASTER_PORT_OVERRIDE:-16000}:16000"
- "${HBASE_MASTER_INFO_PORT_OVERRIDE:-16010}:16010"
- "${HBASE_REGIONSERVER_PORT_OVERRIDE:-16020}:16020"
- "${HBASE_REGIONSERVER_INFO_PORT_OVERRIDE:-16030}:16030"
- "${HBASE_ZOOKEEPER_PORT_OVERRIDE:-2181}:2181"
- '${HBASE_MASTER_PORT_OVERRIDE:-16000}:16000'
- '${HBASE_MASTER_INFO_PORT_OVERRIDE:-16010}:16010'
- '${HBASE_REGIONSERVER_PORT_OVERRIDE:-16020}:16020'
- '${HBASE_REGIONSERVER_INFO_PORT_OVERRIDE:-16030}:16030'
- '${HBASE_ZOOKEEPER_PORT_OVERRIDE:-2181}:2181'
deploy:
resources:
limits:
@@ -31,7 +31,7 @@ services:
cpus: ${HBASE_CPU_RESERVATION:-1.0}
memory: ${HBASE_MEMORY_RESERVATION:-2G}
healthcheck:
test: ["CMD-SHELL", "echo 'status' | hbase shell -n || exit 1"]
test: [CMD-SHELL, "echo 'status' | hbase shell -n || exit 1"]
interval: 30s
timeout: 10s
retries: 5

View File

@@ -4,7 +4,7 @@ x-defaults: &defaults
driver: json-file
options:
max-size: 100m
max-file: "3"
max-file: '3'
services:
influxdb:
@@ -24,7 +24,7 @@ services:
- influxdb_data:/var/lib/influxdb2
- influxdb_config:/etc/influxdb2
ports:
- "${INFLUXDB_PORT_OVERRIDE:-8086}:8086"
- '${INFLUXDB_PORT_OVERRIDE:-8086}:8086'
deploy:
resources:
limits:
@@ -34,7 +34,7 @@ services:
cpus: ${INFLUXDB_CPU_RESERVATION:-0.5}
memory: ${INFLUXDB_MEMORY_RESERVATION:-512M}
healthcheck:
test: ["CMD", "influx", "ping"]
test: [CMD, influx, ping]
interval: 30s
timeout: 10s
retries: 3

View File

@@ -32,7 +32,7 @@ x-defaults: &defaults
driver: json-file
options:
max-size: 100m
max-file: "3"
max-file: '3'
services:
inngest:
@@ -46,8 +46,8 @@ services:
--postgres-uri postgresql://${INNGEST_PG_USER:-inngest}:${INNGEST_PG_PASSWORD:-inngest}@postgres:5432/${INNGEST_PG_DB:-inngest}?sslmode=disable
--redis-uri redis://redis:6379
ports:
- "${INNGEST_PORT_OVERRIDE:-8288}:8288"
- "${INNGEST_GATEWAY_PORT_OVERRIDE:-8289}:8289"
- '${INNGEST_PORT_OVERRIDE:-8288}:8288'
- '${INNGEST_GATEWAY_PORT_OVERRIDE:-8289}:8289'
environment:
- TZ=${TZ:-UTC}
- INNGEST_EVENT_KEY=${INNGEST_EVENT_KEY:-deadbeefcafebabe0123456789abcdef}
@@ -60,13 +60,12 @@ services:
condition: service_healthy
healthcheck:
test:
[
"CMD",
"wget",
"--spider",
"--quiet",
"http://localhost:8288/v0/health",
]
- CMD
- wget
- --spider
- --quiet
- 'http://localhost:8288/v0/health'
interval: 30s
timeout: 10s
retries: 3
@@ -92,10 +91,9 @@ services:
- POSTGRES_DB=${INNGEST_PG_DB:-inngest}
healthcheck:
test:
[
"CMD-SHELL",
"pg_isready -U ${INNGEST_PG_USER:-inngest} -d ${INNGEST_PG_DB:-inngest}",
]
- CMD-SHELL
- 'pg_isready -U ${INNGEST_PG_USER:-inngest} -d ${INNGEST_PG_DB:-inngest}'
interval: 10s
timeout: 5s
retries: 5
@@ -123,7 +121,7 @@ services:
volumes:
- inngest_redis_data:/data
healthcheck:
test: ["CMD", "redis-cli", "ping"]
test: [CMD, redis-cli, ping]
interval: 10s
timeout: 3s
retries: 3

View File

@@ -52,14 +52,14 @@ Jenkins can be configured using Configuration as Code (JCasC). Create a `jenkins
Example configuration:
```yaml
jenkins:
systemMessage: "Jenkins configured automatically by Jenkins Configuration as Code plugin"
systemMessage: Jenkins configured automatically by Jenkins Configuration as Code plugin
securityRealm:
local:
allowsSignup: false
users:
- id: admin
password: admin123
- id: admin
password: admin123
authorizationStrategy:
loggedInUsersCanDoAnything:

View File

@@ -53,14 +53,14 @@ Jenkins 可以使用配置即代码JCasC进行配置。创建一个 `jenki
```yaml
jenkins:
systemMessage: "Jenkins configured automatically by Jenkins Configuration as Code plugin"
systemMessage: Jenkins configured automatically by Jenkins Configuration as Code plugin
securityRealm:
local:
allowsSignup: false
users:
- id: admin
password: admin123
- id: admin
password: admin123
authorizationStrategy:
loggedInUsersCanDoAnything:

View File

@@ -4,15 +4,15 @@ x-defaults: &defaults
driver: json-file
options:
max-size: 100m
max-file: "3"
max-file: '3'
services:
jenkins:
<<: *defaults
image: ${GLOBAL_REGISTRY:-}jenkins/jenkins:${JENKINS_VERSION:-2.541-lts-jdk17}
ports:
- "${JENKINS_HTTP_PORT_OVERRIDE:-8080}:8080"
- "${JENKINS_AGENT_PORT_OVERRIDE:-50000}:50000"
- '${JENKINS_HTTP_PORT_OVERRIDE:-8080}:8080'
- '${JENKINS_AGENT_PORT_OVERRIDE:-50000}:50000'
volumes:
- jenkins_home:/var/jenkins_home
- /var/run/docker.sock:/var/run/docker.sock:ro
@@ -24,7 +24,7 @@ services:
- JENKINS_OPTS=${JENKINS_OPTS:---httpPort=8080}
- JAVA_OPTS=${JAVA_OPTS:--Djenkins.install.runSetupWizard=false -Xmx2g}
- CASC_JENKINS_CONFIG=${CASC_JENKINS_CONFIG:-/var/jenkins_home/casc_configs}
user: "${JENKINS_USER_ID:-1000}:${JENKINS_GROUP_ID:-1000}"
user: '${JENKINS_USER_ID:-1000}:${JENKINS_GROUP_ID:-1000}'
deploy:
resources:
limits:
@@ -34,7 +34,7 @@ services:
cpus: ${JENKINS_CPU_RESERVATION:-0.50}
memory: ${JENKINS_MEMORY_RESERVATION:-1G}
healthcheck:
test: ["CMD-SHELL", "curl -f http://localhost:8080/login || exit 1"]
test: [CMD-SHELL, 'curl -f http://localhost:8080/login || exit 1']
interval: 30s
timeout: 10s
retries: 5

View File

@@ -4,7 +4,7 @@ x-defaults: &defaults
driver: json-file
options:
max-size: 100m
max-file: "3"
max-file: '3'
services:
# Zookeeper for Kafka coordination
@@ -12,7 +12,7 @@ services:
<<: *defaults
image: ${GLOBAL_REGISTRY:-}confluentinc/cp-zookeeper:${KAFKA_VERSION:-7.8.0}
ports:
- "${ZOOKEEPER_CLIENT_PORT_OVERRIDE:-2181}:2181"
- '${ZOOKEEPER_CLIENT_PORT_OVERRIDE:-2181}:2181'
volumes:
- zookeeper_data:/var/lib/zookeeper/data
- zookeeper_log:/var/lib/zookeeper/log
@@ -34,7 +34,7 @@ services:
cpus: ${ZOOKEEPER_CPU_RESERVATION:-0.25}
memory: ${ZOOKEEPER_MEMORY_RESERVATION:-256M}
healthcheck:
test: ["CMD-SHELL", "echo ruok | nc localhost 2181 | grep imok"]
test: [CMD-SHELL, 'echo ruok | nc localhost 2181 | grep imok']
interval: 30s
timeout: 10s
retries: 3
@@ -48,8 +48,8 @@ services:
zookeeper:
condition: service_healthy
ports:
- "${KAFKA_BROKER_PORT_OVERRIDE:-9092}:9092"
- "${KAFKA_JMX_PORT_OVERRIDE:-9999}:9999"
- '${KAFKA_BROKER_PORT_OVERRIDE:-9092}:9092'
- '${KAFKA_JMX_PORT_OVERRIDE:-9999}:9999'
volumes:
- kafka_data:/var/lib/kafka/data
environment:
@@ -83,10 +83,9 @@ services:
memory: ${KAFKA_MEMORY_RESERVATION:-1G}
healthcheck:
test:
[
"CMD-SHELL",
"kafka-broker-api-versions --bootstrap-server localhost:9092",
]
- CMD-SHELL
- 'kafka-broker-api-versions --bootstrap-server localhost:9092'
interval: 30s
timeout: 10s
retries: 5
@@ -104,7 +103,7 @@ services:
zookeeper:
condition: service_healthy
ports:
- "${KAFKA_UI_PORT_OVERRIDE:-8080}:8080"
- '${KAFKA_UI_PORT_OVERRIDE:-8080}:8080'
environment:
- TZ=${TZ:-UTC}
- KAFKA_CLUSTERS_0_NAME=local

View File

@@ -30,7 +30,7 @@ x-defaults: &defaults
driver: json-file
options:
max-size: 100m
max-file: "3"
max-file: '3'
services:
kestra:
@@ -39,8 +39,8 @@ services:
container_name: kestra
command: server standalone
ports:
- "${KESTRA_PORT_OVERRIDE:-8080}:8080"
- "${KESTRA_MANAGEMENT_PORT:-8081}:8081"
- '${KESTRA_PORT_OVERRIDE:-8080}:8080'
- '${KESTRA_MANAGEMENT_PORT:-8081}:8081'
environment:
# Database configuration
- KESTRA_CONFIGURATION=datasources.postgres.url=jdbc:postgresql://postgres:5432/${POSTGRES_DB}
@@ -71,12 +71,12 @@ services:
volumes:
- kestra_data:/app/storage
- kestra_logs:/app/logs
- /var/run/docker.sock:/var/run/docker.sock:ro # For Docker task runner
- /var/run/docker.sock:/var/run/docker.sock:ro # For Docker task runner
depends_on:
postgres:
condition: service_healthy
healthcheck:
test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://localhost:8080/health"]
test: [CMD, wget, --quiet, --tries=1, --spider, 'http://localhost:8080/health']
interval: 30s
timeout: 10s
retries: 5
@@ -84,11 +84,11 @@ services:
deploy:
resources:
limits:
cpus: "${KESTRA_CPU_LIMIT:-2.0}"
memory: "${KESTRA_MEMORY_LIMIT:-2G}"
cpus: '${KESTRA_CPU_LIMIT:-2.0}'
memory: '${KESTRA_MEMORY_LIMIT:-2G}'
reservations:
cpus: "${KESTRA_CPU_RESERVATION:-0.5}"
memory: "${KESTRA_MEMORY_RESERVATION:-512M}"
cpus: '${KESTRA_CPU_RESERVATION:-0.5}'
memory: '${KESTRA_MEMORY_RESERVATION:-512M}'
postgres:
<<: *defaults
@@ -103,18 +103,18 @@ services:
volumes:
- postgres_data:/var/lib/postgresql/data
healthcheck:
test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER:-kestra} -d ${POSTGRES_DB:-kestra}"]
test: [CMD-SHELL, 'pg_isready -U ${POSTGRES_USER:-kestra} -d ${POSTGRES_DB:-kestra}']
interval: 10s
timeout: 5s
retries: 5
deploy:
resources:
limits:
cpus: "${POSTGRES_CPU_LIMIT:-1.0}"
memory: "${POSTGRES_MEMORY_LIMIT:-1G}"
cpus: '${POSTGRES_CPU_LIMIT:-1.0}'
memory: '${POSTGRES_MEMORY_LIMIT:-1G}'
reservations:
cpus: "${POSTGRES_CPU_RESERVATION:-0.25}"
memory: "${POSTGRES_MEMORY_RESERVATION:-256M}"
cpus: '${POSTGRES_CPU_RESERVATION:-0.25}'
memory: '${POSTGRES_MEMORY_RESERVATION:-256M}'
volumes:
postgres_data:

View File

@@ -45,8 +45,8 @@ Uncomment the configuration volume in `docker-compose.yaml` and create `kibana.y
```yaml
server.name: kibana
server.host: "0.0.0.0"
elasticsearch.hosts: ["http://elasticsearch:9200"]
server.host: 0.0.0.0
elasticsearch.hosts: ['http://elasticsearch:9200']
monitoring.ui.container.elasticsearch.enabled: true
```

View File

@@ -45,8 +45,8 @@ Kibana 需要运行 Elasticsearch。确保 Elasticsearch 在配置的 `ELASTICSE
```yaml
server.name: kibana
server.host: "0.0.0.0"
elasticsearch.hosts: ["http://elasticsearch:9200"]
server.host: 0.0.0.0
elasticsearch.hosts: ['http://elasticsearch:9200']
monitoring.ui.container.elasticsearch.enabled: true
```

View File

@@ -4,14 +4,14 @@ x-defaults: &defaults
driver: json-file
options:
max-size: 100m
max-file: "3"
max-file: '3'
services:
kibana:
<<: *defaults
image: ${GLOBAL_REGISTRY:-}docker.elastic.co/kibana/kibana:${KIBANA_VERSION:-9.2.0}
ports:
- "${KIBANA_PORT_OVERRIDE:-5601}:5601"
- '${KIBANA_PORT_OVERRIDE:-5601}:5601'
volumes:
- kibana_data:/usr/share/kibana/data
@@ -35,7 +35,7 @@ services:
cpus: ${KIBANA_CPU_RESERVATION:-0.25}
memory: ${KIBANA_MEMORY_RESERVATION:-512M}
healthcheck:
test: ["CMD-SHELL", "curl -f http://localhost:5601/api/status || exit 1"]
test: [CMD-SHELL, 'curl -f http://localhost:5601/api/status || exit 1']
interval: 30s
timeout: 10s
retries: 5

View File

@@ -4,14 +4,14 @@ x-defaults: &defaults
driver: json-file
options:
max-size: 100m
max-file: "3"
max-file: '3'
services:
kodbox:
<<: *defaults
image: ${GLOBAL_REGISTRY:-}kodcloud/kodbox:${KODBOX_VERSION:-1.62}
ports:
- "${KODBOX_PORT:-80}:80"
- '${KODBOX_PORT:-80}:80'
volumes:
- kodbox_data:/var/www/html
environment:
@@ -38,7 +38,7 @@ services:
cpus: ${KODBOX_CPU_RESERVATION:-0.5}
memory: ${KODBOX_MEMORY_RESERVATION:-256M}
healthcheck:
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:80/"]
test: [CMD, wget, --no-verbose, --tries=1, --spider, 'http://localhost:80/']
interval: 30s
timeout: 10s
retries: 3
@@ -60,7 +60,7 @@ services:
volumes:
- kodbox_db_data:/var/lib/mysql
healthcheck:
test: ["CMD", "mysqladmin", "ping", "-h", "localhost", "-u", "root", "-p${MYSQL_ROOT_PASSWORD:-root123}"]
test: [CMD, mysqladmin, ping, -h, localhost, -u, root, '-p${MYSQL_ROOT_PASSWORD:-root123}']
interval: 10s
timeout: 5s
retries: 5
@@ -86,7 +86,7 @@ services:
volumes:
- kodbox_redis_data:/data
healthcheck:
test: ["CMD", "redis-cli", "ping"]
test: [CMD, redis-cli, ping]
interval: 5s
timeout: 3s
retries: 5

View File

@@ -4,7 +4,7 @@ x-defaults: &defaults
driver: json-file
options:
max-size: 100m
max-file: "3"
max-file: '3'
services:
# Kong Database
@@ -27,7 +27,7 @@ services:
cpus: ${KONG_DB_CPU_RESERVATION:-0.25}
memory: ${KONG_DB_MEMORY_RESERVATION:-128M}
healthcheck:
test: ["CMD-SHELL", "pg_isready -U kong"]
test: [CMD-SHELL, pg_isready -U kong]
interval: 30s
timeout: 5s
retries: 5
@@ -48,7 +48,7 @@ services:
- KONG_PG_PASSWORD=${KONG_DB_PASSWORD:-kongpass}
- KONG_PG_DATABASE=kong
command: kong migrations bootstrap
restart: "no"
restart: no
# Kong Gateway
kong:
@@ -60,10 +60,10 @@ services:
kong-migrations:
condition: service_completed_successfully
ports:
- "${KONG_PROXY_PORT_OVERRIDE:-8000}:8000"
- "${KONG_PROXY_SSL_PORT_OVERRIDE:-8443}:8443"
- "${KONG_ADMIN_API_PORT_OVERRIDE:-8001}:8001"
- "${KONG_ADMIN_SSL_PORT_OVERRIDE:-8444}:8444"
- '${KONG_PROXY_PORT_OVERRIDE:-8000}:8000'
- '${KONG_PROXY_SSL_PORT_OVERRIDE:-8443}:8443'
- '${KONG_ADMIN_API_PORT_OVERRIDE:-8001}:8001'
- '${KONG_ADMIN_SSL_PORT_OVERRIDE:-8444}:8444'
# Custom configuration
# - ./kong.conf:/etc/kong/kong.conf:ro
@@ -89,7 +89,7 @@ services:
cpus: ${KONG_CPU_RESERVATION:-0.25}
memory: ${KONG_MEMORY_RESERVATION:-256M}
healthcheck:
test: ["CMD-SHELL", "kong health"]
test: [CMD-SHELL, kong health]
interval: 30s
timeout: 10s
retries: 5
@@ -103,7 +103,7 @@ services:
kong:
condition: service_healthy
ports:
- "${KONG_GUI_PORT_OVERRIDE:-1337}:1337"
- '${KONG_GUI_PORT_OVERRIDE:-1337}:1337'
volumes:
- konga_data:/app/kongadata
environment:

View File

@@ -4,15 +4,15 @@ x-defaults: &defaults
driver: json-file
options:
max-size: 100m
max-file: "3"
max-file: '3'
services:
libreoffice:
<<: *defaults
image: lscr.io/linuxserver/libreoffice:${LIBREOFFICE_VERSION:-latest}
ports:
- "${LIBREOFFICE_HTTP_PORT_OVERRIDE:-3000}:3000"
- "${LIBREOFFICE_HTTPS_PORT_OVERRIDE:-3001}:3001"
- '${LIBREOFFICE_HTTP_PORT_OVERRIDE:-3000}:3000'
- '${LIBREOFFICE_HTTPS_PORT_OVERRIDE:-3001}:3001'
volumes:
- libreoffice_config:/config
environment:
@@ -33,7 +33,7 @@ services:
cpus: ${LIBREOFFICE_CPU_RESERVATION:-0.50}
memory: ${LIBREOFFICE_MEMORY_RESERVATION:-512M}
healthcheck:
test: ["CMD", "curl", "-f", "-k", "https://localhost:3001/"]
test: [CMD, curl, -f, -k, 'https://localhost:3001/']
interval: 30s
timeout: 10s
retries: 3

View File

@@ -8,7 +8,7 @@ x-defaults: &defaults
driver: json-file
options:
max-size: 100m
max-file: "3"
max-file: '3'
services:
libsql:
@@ -16,8 +16,8 @@ services:
image: ${GHCR_IO_REGISTRY:-ghcr.io}/tursodatabase/libsql-server:${LIBSQL_VERSION:-latest}
platform: ${LIBSQL_PLATFORM:-linux/amd64}
ports:
- "${LIBSQL_HTTP_PORT_OVERRIDE:-8080}:8080" # HTTP/Hrana API port
- "${LIBSQL_GRPC_PORT_OVERRIDE:-5001}:5001" # gRPC port for replication
- '${LIBSQL_HTTP_PORT_OVERRIDE:-8080}:8080' # HTTP/Hrana API port
- '${LIBSQL_GRPC_PORT_OVERRIDE:-5001}:5001' # gRPC port for replication
volumes:
- libsql_data:/var/lib/sqld
environment:
@@ -31,7 +31,7 @@ services:
# - SQLD_AUTH_JWT_KEY_FILE=${LIBSQL_AUTH_JWT_KEY_FILE:-}
# - SQLD_AUTH_JWT_KEY=${LIBSQL_AUTH_JWT_KEY:-}
healthcheck:
test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://localhost:8080/health"]
test: [CMD, wget, --quiet, --tries=1, --spider, 'http://localhost:8080/health']
interval: 30s
timeout: 10s
retries: 3
@@ -53,8 +53,8 @@ services:
profiles:
- replica
ports:
- "${LIBSQL_REPLICA_HTTP_PORT_OVERRIDE:-8081}:8080"
- "${LIBSQL_REPLICA_GRPC_PORT_OVERRIDE:-5002}:5001"
- '${LIBSQL_REPLICA_HTTP_PORT_OVERRIDE:-8081}:8080'
- '${LIBSQL_REPLICA_GRPC_PORT_OVERRIDE:-5002}:5001'
volumes:
- libsql_replica_data:/var/lib/sqld
environment:
@@ -65,7 +65,7 @@ services:
- SQLD_HTTP_LISTEN_ADDR=${LIBSQL_HTTP_LISTEN_ADDR:-0.0.0.0:8080}
- SQLD_GRPC_LISTEN_ADDR=${LIBSQL_GRPC_LISTEN_ADDR:-0.0.0.0:5001}
healthcheck:
test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://localhost:8080/health"]
test: [CMD, wget, --quiet, --tries=1, --spider, 'http://localhost:8080/health']
interval: 30s
timeout: 10s
retries: 3

View File

@@ -4,7 +4,7 @@ x-defaults: &defaults
driver: json-file
options:
max-size: 100m
max-file: "3"
max-file: '3'
services:
litellm:
@@ -20,7 +20,7 @@ services:
# command:
# - "--config=/app/config.yaml"
ports:
- "${LITELLM_PORT_OVERRIDE:-4000}:4000"
- '${LITELLM_PORT_OVERRIDE:-4000}:4000'
environment:
- DATABASE_URL=postgresql://llmproxy:${POSTGRES_PASSWORD}@db:5432/litellm
- STORE_MODEL_IN_DB=True
@@ -31,13 +31,13 @@ services:
db:
condition: service_healthy
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:4000/health/liveliness"]
test: [CMD, curl, -f, 'http://localhost:4000/health/liveliness']
interval: 30s
timeout: 10s
retries: 3
start_period: 40s
extra_hosts:
- "host.docker.internal:host-gateway"
- 'host.docker.internal:host-gateway'
deploy:
resources:
limits:
@@ -56,11 +56,11 @@ services:
- POSTGRES_PASSWORD=${POSTGRES_PASSWORD}
- TZ=${TZ:-UTC}
ports:
- "${POSTGRES_PORT_OVERRIDE:-5432}:5432"
- '${POSTGRES_PORT_OVERRIDE:-5432}:5432'
volumes:
- postgres_data:/var/lib/postgresql/data
healthcheck:
test: ["CMD-SHELL", "pg_isready -d litellm -U llmproxy"]
test: [CMD-SHELL, pg_isready -d litellm -U llmproxy]
interval: 10s
timeout: 5s
retries: 5
@@ -83,15 +83,15 @@ services:
- prometheus_data:/prometheus
- ./prometheus.yml:/etc/prometheus/prometheus.yml:ro
ports:
- "${PROMETHEUS_PORT_OVERRIDE:-9090}:9090"
- '${PROMETHEUS_PORT_OVERRIDE:-9090}:9090'
command:
- "--config.file=/etc/prometheus/prometheus.yml"
- "--storage.tsdb.path=/prometheus"
- "--storage.tsdb.retention.time=15d"
- --config.file=/etc/prometheus/prometheus.yml
- --storage.tsdb.path=/prometheus
- --storage.tsdb.retention.time=15d
environment:
- TZ=${TZ:-UTC}
healthcheck:
test: ["CMD", "wget", "--spider", "-q", "http://localhost:9090/-/healthy"]
test: [CMD, wget, --spider, -q, 'http://localhost:9090/-/healthy']
interval: 30s
timeout: 10s
retries: 3

View File

@@ -2,6 +2,6 @@ global:
scrape_interval: 15s
scrape_configs:
- job_name: 'litellm'
- job_name: litellm
static_configs:
- targets: ['litellm:4000'] # Assuming Litellm exposes metrics at port 4000
- targets: ['litellm:4000'] # Assuming Litellm exposes metrics at port 4000

View File

@@ -8,7 +8,7 @@ x-defaults: &defaults
driver: json-file
options:
max-size: 100m
max-file: "3"
max-file: '3'
services:
# llama.cpp server - OpenAI-compatible API server
@@ -17,32 +17,31 @@ services:
<<: *defaults
image: ${GHCR_REGISTRY:-ghcr.io/}ggml-org/llama.cpp:${LLAMA_CPP_SERVER_VARIANT:-server}
ports:
- "${LLAMA_CPP_SERVER_PORT_OVERRIDE:-8080}:8080"
- '${LLAMA_CPP_SERVER_PORT_OVERRIDE:-8080}:8080'
volumes:
- llama_cpp_models:/models
command:
- "-m"
- "${LLAMA_CPP_MODEL_PATH:-/models/model.gguf}"
- "--port"
- "8080"
- "--host"
- "0.0.0.0"
- "-n"
- "${LLAMA_CPP_CONTEXT_SIZE:-512}"
- "--n-gpu-layers"
- "${LLAMA_CPP_GPU_LAYERS:-0}"
- -m
- '${LLAMA_CPP_MODEL_PATH:-/models/model.gguf}'
- --port
- '8080'
- --host
- 0.0.0.0
- -n
- '${LLAMA_CPP_CONTEXT_SIZE:-512}'
- --n-gpu-layers
- '${LLAMA_CPP_GPU_LAYERS:-0}'
environment:
- TZ=${TZ:-UTC}
healthcheck:
test:
[
"CMD",
"wget",
"--quiet",
"--tries=1",
"--spider",
"http://localhost:8080/health",
]
- CMD
- wget
- --quiet
- --tries=1
- --spider
- 'http://localhost:8080/health'
interval: 30s
timeout: 10s
retries: 3
@@ -63,32 +62,31 @@ services:
<<: *defaults
image: ${GHCR_REGISTRY:-ghcr.io/}ggml-org/llama.cpp:server-cuda
ports:
- "${LLAMA_CPP_SERVER_PORT_OVERRIDE:-8080}:8080"
- '${LLAMA_CPP_SERVER_PORT_OVERRIDE:-8080}:8080'
volumes:
- llama_cpp_models:/models
command:
- "-m"
- "${LLAMA_CPP_MODEL_PATH:-/models/model.gguf}"
- "--port"
- "8080"
- "--host"
- "0.0.0.0"
- "-n"
- "${LLAMA_CPP_CONTEXT_SIZE:-512}"
- "--n-gpu-layers"
- "${LLAMA_CPP_GPU_LAYERS:-99}"
- -m
- '${LLAMA_CPP_MODEL_PATH:-/models/model.gguf}'
- --port
- '8080'
- --host
- 0.0.0.0
- -n
- '${LLAMA_CPP_CONTEXT_SIZE:-512}'
- --n-gpu-layers
- '${LLAMA_CPP_GPU_LAYERS:-99}'
environment:
- TZ=${TZ:-UTC}
healthcheck:
test:
[
"CMD",
"wget",
"--quiet",
"--tries=1",
"--spider",
"http://localhost:8080/health",
]
- CMD
- wget
- --quiet
- --tries=1
- --spider
- 'http://localhost:8080/health'
interval: 30s
timeout: 10s
retries: 3
@@ -114,35 +112,34 @@ services:
<<: *defaults
image: ${GHCR_REGISTRY:-ghcr.io/}ggml-org/llama.cpp:server-rocm
ports:
- "${LLAMA_CPP_SERVER_PORT_OVERRIDE:-8080}:8080"
- '${LLAMA_CPP_SERVER_PORT_OVERRIDE:-8080}:8080'
volumes:
- llama_cpp_models:/models
devices:
- /dev/kfd
- /dev/dri
command:
- "-m"
- "${LLAMA_CPP_MODEL_PATH:-/models/model.gguf}"
- "--port"
- "8080"
- "--host"
- "0.0.0.0"
- "-n"
- "${LLAMA_CPP_CONTEXT_SIZE:-512}"
- "--n-gpu-layers"
- "${LLAMA_CPP_GPU_LAYERS:-99}"
- -m
- '${LLAMA_CPP_MODEL_PATH:-/models/model.gguf}'
- --port
- '8080'
- --host
- 0.0.0.0
- -n
- '${LLAMA_CPP_CONTEXT_SIZE:-512}'
- --n-gpu-layers
- '${LLAMA_CPP_GPU_LAYERS:-99}'
environment:
- TZ=${TZ:-UTC}
healthcheck:
test:
[
"CMD",
"wget",
"--quiet",
"--tries=1",
"--spider",
"http://localhost:8080/health",
]
- CMD
- wget
- --quiet
- --tries=1
- --spider
- 'http://localhost:8080/health'
interval: 30s
timeout: 10s
retries: 3
@@ -167,12 +164,12 @@ services:
- llama_cpp_models:/models
entrypoint: /app/llama-cli
command:
- "-m"
- "${LLAMA_CPP_MODEL_PATH:-/models/model.gguf}"
- "-p"
- "${LLAMA_CPP_PROMPT:-Hello, how are you?}"
- "-n"
- "${LLAMA_CPP_CONTEXT_SIZE:-512}"
- -m
- '${LLAMA_CPP_MODEL_PATH:-/models/model.gguf}'
- -p
- '${LLAMA_CPP_PROMPT:-Hello, how are you?}'
- -n
- '${LLAMA_CPP_CONTEXT_SIZE:-512}'
environment:
- TZ=${TZ:-UTC}
deploy:
@@ -192,7 +189,7 @@ services:
image: ${GHCR_REGISTRY:-ghcr.io/}ggml-org/llama.cpp:${LLAMA_CPP_FULL_VARIANT:-full}
volumes:
- llama_cpp_models:/models
command: ["sleep", "infinity"]
command: [sleep, infinity]
environment:
- TZ=${TZ:-UTC}
deploy:

View File

@@ -4,14 +4,14 @@ x-defaults: &defaults
driver: json-file
options:
max-size: 100m
max-file: "3"
max-file: '3'
services:
lmdeploy:
<<: *defaults
image: ${GLOBAL_REGISTRY:-}openmmlab/lmdeploy:${LMDEPLOY_VERSION:-v0.11.1-cu12.8}
ports:
- "${LMDEPLOY_PORT_OVERRIDE:-23333}:23333"
- '${LMDEPLOY_PORT_OVERRIDE:-23333}:23333'
volumes:
- lmdeploy_data:/root/.cache
environment:
@@ -23,11 +23,11 @@ services:
- api_server
- ${LMDEPLOY_MODEL:-internlm/internlm2-chat-1_8b}
- --server-name
- "0.0.0.0"
- 0.0.0.0
- --server-port
- "23333"
- '23333'
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:23333/v1/models"]
test: [CMD, curl, -f, 'http://localhost:23333/v1/models']
interval: 30s
timeout: 10s
retries: 3

View File

@@ -4,17 +4,17 @@ x-defaults: &defaults
driver: json-file
options:
max-size: 100m
max-file: "3"
max-file: '3'
services:
logstash:
<<: *defaults
image: docker.elastic.co/logstash/logstash:${LOGSTASH_VERSION:-8.16.1}
ports:
- "${LOGSTASH_BEATS_PORT_OVERRIDE:-5044}:5044"
- "${LOGSTASH_TCP_PORT_OVERRIDE:-5000}:5000/tcp"
- "${LOGSTASH_UDP_PORT_OVERRIDE:-5000}:5000/udp"
- "${LOGSTASH_HTTP_PORT_OVERRIDE:-9600}:9600"
- '${LOGSTASH_BEATS_PORT_OVERRIDE:-5044}:5044'
- '${LOGSTASH_TCP_PORT_OVERRIDE:-5000}:5000/tcp'
- '${LOGSTASH_UDP_PORT_OVERRIDE:-5000}:5000/udp'
- '${LOGSTASH_HTTP_PORT_OVERRIDE:-9600}:9600'
volumes:
- logstash_data:/usr/share/logstash/data
- logstash_logs:/usr/share/logstash/logs
@@ -44,7 +44,7 @@ services:
cpus: ${LOGSTASH_CPU_RESERVATION:-0.50}
memory: ${LOGSTASH_MEMORY_RESERVATION:-1G}
healthcheck:
test: ["CMD-SHELL", "curl -f http://localhost:9600/_node/stats || exit 1"]
test: [CMD-SHELL, 'curl -f http://localhost:9600/_node/stats || exit 1']
interval: 30s
timeout: 10s
retries: 5

View File

@@ -90,8 +90,8 @@ You can configure Docker to send logs directly to Loki:
logging:
driver: loki
options:
loki-url: "http://localhost:3100/loki/api/v1/push"
loki-batch-size: "400"
loki-url: 'http://localhost:3100/loki/api/v1/push'
loki-batch-size: '400'
```
### Using HTTP API

View File

@@ -90,8 +90,8 @@ services:
logging:
driver: loki
options:
loki-url: "http://localhost:3100/loki/api/v1/push"
loki-batch-size: "400"
loki-url: 'http://localhost:3100/loki/api/v1/push'
loki-batch-size: '400'
```
### 使用 HTTP API

View File

@@ -4,21 +4,21 @@ x-defaults: &defaults
driver: json-file
options:
max-size: 100m
max-file: "3"
max-file: '3'
services:
loki:
<<: *defaults
image: ${GLOBAL_REGISTRY:-}grafana/loki:${LOKI_VERSION:-3.3.2}
ports:
- "${LOKI_PORT_OVERRIDE:-3100}:3100"
- '${LOKI_PORT_OVERRIDE:-3100}:3100'
volumes:
- loki_data:/loki
- ./loki-config.yaml:/etc/loki/local-config.yaml:ro
environment:
- TZ=${TZ:-UTC}
command: -config.file=/etc/loki/local-config.yaml
user: "10001:10001" # Loki user
user: '10001:10001' # Loki user
deploy:
resources:
limits:
@@ -28,7 +28,7 @@ services:
cpus: ${LOKI_CPU_RESERVATION:-0.25}
memory: ${LOKI_MEMORY_RESERVATION:-256M}
healthcheck:
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:3100/ready"]
test: [CMD, wget, --no-verbose, --tries=1, --spider, 'http://localhost:3100/ready']
interval: 30s
timeout: 10s
retries: 3

View File

@@ -46,5 +46,5 @@ ruler:
# Refer to the buildReport method to see what goes into a report.
#
# If you would like to disable reporting, uncomment the following lines:
#analytics:
# analytics:
# reporting_enabled: false

View File

@@ -4,7 +4,7 @@ x-defaults: &defaults
driver: json-file
options:
max-size: 100m
max-file: "3"
max-file: '3'
x-mariadb-galera: &mariadb-galera
<<: *defaults
@@ -34,7 +34,7 @@ x-mariadb-galera: &mariadb-galera
cpus: ${MARIADB_CPU_RESERVATION:-1.0}
memory: ${MARIADB_MEMORY_RESERVATION:-1G}
healthcheck:
test: ["CMD", "healthcheck.sh", "--connect", "--innodb_initialized"]
test: [CMD, healthcheck.sh, --connect, --innodb_initialized]
interval: 30s
timeout: 10s
retries: 3
@@ -45,7 +45,7 @@ services:
<<: *mariadb-galera
hostname: mariadb-galera-1
ports:
- "${MARIADB_PORT_1_OVERRIDE:-3306}:3306"
- '${MARIADB_PORT_1_OVERRIDE:-3306}:3306'
environment:
<<: *galera-env
WSREP_NODE_ADDRESS: mariadb-galera-1
@@ -67,7 +67,7 @@ services:
<<: *mariadb-galera
hostname: mariadb-galera-2
ports:
- "${MARIADB_PORT_2_OVERRIDE:-3307}:3306"
- '${MARIADB_PORT_2_OVERRIDE:-3307}:3306'
environment:
<<: *galera-env
WSREP_NODE_ADDRESS: mariadb-galera-2
@@ -91,7 +91,7 @@ services:
<<: *mariadb-galera
hostname: mariadb-galera-3
ports:
- "${MARIADB_PORT_3_OVERRIDE:-3308}:3306"
- '${MARIADB_PORT_3_OVERRIDE:-3308}:3306'
environment:
<<: *galera-env
WSREP_NODE_ADDRESS: mariadb-galera-3

View File

@@ -4,14 +4,14 @@ x-defaults: &defaults
driver: json-file
options:
max-size: 100m
max-file: "3"
max-file: '3'
services:
memos:
<<: *defaults
image: ${GLOBAL_REGISTRY:-}neosmemo/memos:${MEMOS_VERSION:-0.25.3}
ports:
- "${MEMOS_PORT_OVERRIDE:-5230}:5230"
- '${MEMOS_PORT_OVERRIDE:-5230}:5230'
volumes:
- memos_data:/var/opt/memos
environment:
@@ -33,7 +33,7 @@ services:
cpus: ${MEMOS_CPU_RESERVATION:-0.25}
memory: ${MEMOS_MEMORY_RESERVATION:-128M}
healthcheck:
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:5230/"]
test: [CMD, wget, --no-verbose, --tries=1, --spider, 'http://localhost:5230/']
interval: 30s
timeout: 10s
retries: 3

View File

@@ -4,7 +4,7 @@ x-defaults: &defaults
driver: json-file
options:
max-size: 100m
max-file: "3"
max-file: '3'
services:
milvus-standalone-embed:
@@ -14,7 +14,7 @@ services:
- seccomp:unconfined
environment:
TZ: ${TZ:-UTC}
ETCD_USE_EMBED: "true"
ETCD_USE_EMBED: 'true'
ETCD_DATA_DIR: /var/lib/milvus/etcd
ETCD_CONFIG_PATH: /milvus/configs/embed_etcd.yaml
COMMON_STORAGETYPE: local
@@ -24,16 +24,16 @@ services:
- ./embed_etcd.yaml:/milvus/configs/embed_etcd.yaml
- ./user.yaml:/milvus/configs/user.yaml
ports:
- "${MILVUS_PORT_OVERRIDE_HTTP:-19530}:19530"
- "${MILVUS_PORT_OVERRIDE_WEBUI:-9091}:9091"
- "${MILVUS_PORT_OVERRIDE_ETCD:-2379}:2379"
- '${MILVUS_PORT_OVERRIDE_HTTP:-19530}:19530'
- '${MILVUS_PORT_OVERRIDE_WEBUI:-9091}:9091'
- '${MILVUS_PORT_OVERRIDE_ETCD:-2379}:2379'
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:9091/healthz"]
test: [CMD, curl, -f, 'http://localhost:9091/healthz']
interval: 30s
start_period: 90s
timeout: 20s
retries: 3
command: ["milvus", "run", "standalone"]
command: [milvus, run, standalone]
deploy:
resources:
limits:
@@ -52,7 +52,7 @@ services:
TZ: ${TZ:-UTC}
MILVUS_URL: ${MILVUS_URL:-milvus-standalone-embed:19530}
ports:
- "${ATTU_OVERRIDE_PORT:-8000}:3000"
- '${ATTU_OVERRIDE_PORT:-8000}:3000'
depends_on:
milvus-standalone-embed:
condition: service_healthy
@@ -66,14 +66,13 @@ services:
memory: ${ATTU_MEMORY_RESERVATION:-128M}
healthcheck:
test:
[
"CMD",
"wget",
"--no-verbose",
"--tries=1",
"--spider",
"http://localhost:3000/",
]
- CMD
- wget
- --no-verbose
- --tries=1
- --spider
- 'http://localhost:3000/'
interval: 30s
timeout: 10s
retries: 3

View File

@@ -4,7 +4,7 @@ x-defaults: &defaults
driver: json-file
options:
max-size: 100m
max-file: "3"
max-file: '3'
services:
etcd:
@@ -20,7 +20,7 @@ services:
- etcd_data:/etcd
command: etcd -advertise-client-urls=http://etcd:2379 -listen-client-urls http://0.0.0.0:2379 --data-dir /etcd
healthcheck:
test: ["CMD", "etcdctl", "endpoint", "health"]
test: [CMD, etcdctl, endpoint, health]
interval: 30s
timeout: 20s
retries: 3
@@ -42,13 +42,13 @@ services:
MINIO_ROOT_USER: ${MINIO_ROOT_USER:-minioadmin}
MINIO_ROOT_PASSWORD: ${MINIO_ROOT_PASSWORD:-minioadmin}
ports:
- "${MINIO_PORT_OVERRIDE_API:-9000}:9000"
- "${MINIO_PORT_OVERRIDE_WEBUI:-9001}:9001"
- '${MINIO_PORT_OVERRIDE_API:-9000}:9000'
- '${MINIO_PORT_OVERRIDE_WEBUI:-9001}:9001'
volumes:
- minio_data:/minio_data
command: minio server /minio_data --console-address ":9001"
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"]
test: [CMD, curl, -f, 'http://localhost:9000/minio/health/live']
interval: 30s
timeout: 20s
retries: 3
@@ -65,7 +65,7 @@ services:
milvus-standalone:
<<: *defaults
image: ${GLOBAL_REGISTRY:-}milvusdb/milvus:${MILVUS_VERSION:-v2.6.7}
command: ["milvus", "run", "standalone"]
command: [milvus, run, standalone]
security_opt:
- seccomp:unconfined
environment:
@@ -76,14 +76,14 @@ services:
volumes:
- milvus_data:/var/lib/milvus
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:9091/healthz"]
test: [CMD, curl, -f, 'http://localhost:9091/healthz']
interval: 30s
start_period: 90s
timeout: 20s
retries: 3
ports:
- "${MILVUS_PORT_OVERRIDE_HTTP:-19530}:19530"
- "${MILVUS_PORT_OVERRIDE_WEBUI:-9091}:9091"
- '${MILVUS_PORT_OVERRIDE_HTTP:-19530}:19530'
- '${MILVUS_PORT_OVERRIDE_WEBUI:-9091}:9091'
depends_on:
etcd:
condition: service_healthy
@@ -107,7 +107,7 @@ services:
- TZ=${TZ:-UTC}
- MILVUS_URL=${MILVUS_URL:-milvus-standalone:19530}
ports:
- "${ATTU_PORT_OVERRIDE:-8000}:3000"
- '${ATTU_PORT_OVERRIDE:-8000}:3000'
depends_on:
milvus-standalone:
condition: service_healthy
@@ -121,14 +121,13 @@ services:
memory: ${ATTU_MEMORY_RESERVATION:-128M}
healthcheck:
test:
[
"CMD",
"wget",
"--no-verbose",
"--tries=1",
"--spider",
"http://localhost:3000/",
]
- CMD
- wget
- --no-verbose
- --tries=1
- --spider
- 'http://localhost:3000/'
interval: 30s
timeout: 10s
retries: 3

View File

@@ -4,7 +4,7 @@ x-defaults: &defaults
driver: json-file
options:
max-size: 100m
max-file: "3"
max-file: '3'
services:
minecraft-bedrock:
@@ -12,24 +12,24 @@ services:
image: ${GLOBAL_REGISTRY:-}itzg/minecraft-bedrock-server:${BEDROCK_VERSION:-latest}
environment:
TZ: ${TZ:-UTC}
EULA: "${EULA:-TRUE}"
VERSION: "${MINECRAFT_VERSION:-LATEST}"
GAMEMODE: "${GAMEMODE:-survival}"
DIFFICULTY: "${DIFFICULTY:-easy}"
SERVER_NAME: "${SERVER_NAME:-Dedicated Server}"
MAX_PLAYERS: "${MAX_PLAYERS:-10}"
ALLOW_CHEATS: "${ALLOW_CHEATS:-false}"
LEVEL_NAME: "${LEVEL_NAME:-Bedrock level}"
LEVEL_SEED: "${LEVEL_SEED:-}"
ONLINE_MODE: "${ONLINE_MODE:-true}"
WHITE_LIST: "${WHITE_LIST:-false}"
SERVER_PORT: "${SERVER_PORT:-19132}"
SERVER_PORT_V6: "${SERVER_PORT_V6:-19133}"
UID: "${UID:-1000}"
GID: "${GID:-1000}"
EULA: '${EULA:-TRUE}'
VERSION: '${MINECRAFT_VERSION:-LATEST}'
GAMEMODE: '${GAMEMODE:-survival}'
DIFFICULTY: '${DIFFICULTY:-easy}'
SERVER_NAME: '${SERVER_NAME:-Dedicated Server}'
MAX_PLAYERS: '${MAX_PLAYERS:-10}'
ALLOW_CHEATS: '${ALLOW_CHEATS:-false}'
LEVEL_NAME: '${LEVEL_NAME:-Bedrock level}'
LEVEL_SEED: '${LEVEL_SEED:-}'
ONLINE_MODE: '${ONLINE_MODE:-true}'
WHITE_LIST: '${WHITE_LIST:-false}'
SERVER_PORT: '${SERVER_PORT:-19132}'
SERVER_PORT_V6: '${SERVER_PORT_V6:-19133}'
UID: '${UID:-1000}'
GID: '${GID:-1000}'
ports:
- "${SERVER_PORT_OVERRIDE:-19132}:19132/udp"
- "${SERVER_PORT_V6_OVERRIDE:-19133}:19133/udp"
- '${SERVER_PORT_OVERRIDE:-19132}:19132/udp'
- '${SERVER_PORT_V6_OVERRIDE:-19133}:19133/udp'
volumes:
- bedrock_data:/data
stdin_open: true
@@ -43,7 +43,7 @@ services:
cpus: ${BEDROCK_CPU_RESERVATION:-1.0}
memory: ${BEDROCK_MEMORY_RESERVATION:-1G}
healthcheck:
test: ["CMD-SHELL", "[ -f /data/valid_known_packs.json ]"]
test: [CMD-SHELL, '[ -f /data/valid_known_packs.json ]']
interval: 30s
timeout: 10s
retries: 3

View File

@@ -4,15 +4,15 @@ x-defaults: &defaults
driver: json-file
options:
max-size: 100m
max-file: "3"
max-file: '3'
services:
minio:
<<: *defaults
image: ${CGR_DEV_REGISTRY:cgr.dev-}/chainguard/minio:${MINIO_VERSION:-0.20251015}
ports:
- "${MINIO_PORT_OVERRIDE_API:-9000}:9000"
- "${MINIO_PORT_OVERRIDE_WEBUI:-9001}:9001"
- '${MINIO_PORT_OVERRIDE_API:-9000}:9000'
- '${MINIO_PORT_OVERRIDE_WEBUI:-9001}:9001'
environment:
TZ: ${TZ:-UTC}
MINIO_ROOT_USER: ${MINIO_ROOT_USER:-minioadmin}
@@ -21,7 +21,7 @@ services:
- minio_data:/data
command: server /data --console-address ':9001'
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"]
test: [CMD, curl, -f, 'http://localhost:9000/minio/health/live']
interval: 30s
timeout: 20s
retries: 5

View File

@@ -4,7 +4,7 @@ x-defaults: &defaults
driver: json-file
options:
max-size: 100m
max-file: "3"
max-file: '3'
services:
postgres:
@@ -26,7 +26,7 @@ services:
cpus: ${POSTGRES_MLFLOW_CPU_RESERVATION:-0.5}
memory: ${POSTGRES_MLFLOW_MEMORY_RESERVATION:-512M}
healthcheck:
test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER:-mlflow}"]
test: [CMD-SHELL, 'pg_isready -U ${POSTGRES_USER:-mlflow}']
interval: 10s
timeout: 5s
retries: 5
@@ -41,8 +41,8 @@ services:
MINIO_ROOT_USER: ${MINIO_ROOT_USER:-minio}
MINIO_ROOT_PASSWORD: ${MINIO_ROOT_PASSWORD:-minio123}
ports:
- "${MINIO_PORT_OVERRIDE:-9000}:9000"
- "${MINIO_CONSOLE_PORT_OVERRIDE:-9001}:9001"
- '${MINIO_PORT_OVERRIDE:-9000}:9000'
- '${MINIO_CONSOLE_PORT_OVERRIDE:-9001}:9001'
volumes:
- minio_data:/data
deploy:
@@ -54,7 +54,7 @@ services:
cpus: ${MINIO_MLFLOW_CPU_RESERVATION:-0.5}
memory: ${MINIO_MLFLOW_MEMORY_RESERVATION:-512M}
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"]
test: [CMD, curl, -f, 'http://localhost:9000/minio/health/live']
interval: 30s
timeout: 10s
retries: 3
@@ -73,7 +73,7 @@ services:
/usr/bin/mc mb minio/${MINIO_BUCKET:-mlflow} --ignore-existing;
exit 0;
"
restart: "no"
restart: no
mlflow:
<<: *defaults
@@ -86,7 +86,7 @@ services:
minio-init:
condition: service_completed_successfully
ports:
- "${MLFLOW_PORT_OVERRIDE:-5000}:5000"
- '${MLFLOW_PORT_OVERRIDE:-5000}:5000'
environment:
TZ: ${TZ:-UTC}
MLFLOW_BACKEND_STORE_URI: postgresql://${POSTGRES_USER:-mlflow}:${POSTGRES_PASSWORD:-mlflow}@postgres:5432/${POSTGRES_DB:-mlflow}
@@ -98,9 +98,9 @@ services:
- mlflow
- server
- --host
- "0.0.0.0"
- 0.0.0.0
- --port
- "5000"
- '5000'
- --backend-store-uri
- postgresql://${POSTGRES_USER:-mlflow}:${POSTGRES_PASSWORD:-mlflow}@postgres:5432/${POSTGRES_DB:-mlflow}
- --default-artifact-root
@@ -114,7 +114,7 @@ services:
cpus: ${MLFLOW_CPU_RESERVATION:-1.0}
memory: ${MLFLOW_MEMORY_RESERVATION:-1G}
healthcheck:
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:5000/health"]
test: [CMD, wget, --no-verbose, --tries=1, --spider, 'http://localhost:5000/health']
interval: 30s
timeout: 10s
retries: 3

View File

@@ -4,7 +4,7 @@ x-defaults: &defaults
driver: json-file
options:
max-size: 100m
max-file: "3"
max-file: '3'
x-mongo: &mongo
<<: *defaults
@@ -30,7 +30,7 @@ x-mongo: &mongo
retries: 10
start_period: 30s
extra_hosts:
- "host.docker.internal:host-gateway"
- 'host.docker.internal:host-gateway'
deploy:
resources:
limits:
@@ -40,12 +40,11 @@ x-mongo: &mongo
cpus: ${MONGO_REPLICA_SINGLE_CPU_RESERVATION:-0.50}
memory: ${MONGO_REPLICA_SINGLE_MEMORY_RESERVATION:-1024M}
services:
mongo1:
<<: *mongo
ports:
- "${MONGO_PORT_OVERRIDE_1:-27017}:27017"
- '${MONGO_PORT_OVERRIDE_1:-27017}:27017'
volumes:
- mongo_data:/data/db
- ./secrets/rs0.key:/data/rs0.key:ro
@@ -67,7 +66,7 @@ services:
volumes:
- ./secrets/rs0.key:/data/rs0.key:ro
extra_hosts:
- "host.docker.internal:host-gateway"
- 'host.docker.internal:host-gateway'
entrypoint:
- bash
- -c
@@ -109,6 +108,5 @@ services:
cpus: ${MONGO_REPLICA_SINGLE_INIT_CPU_RESERVATION:-0.50}
memory: ${MONGO_REPLICA_SINGLE_INIT_MEMORY_RESERVATION:-1024M}
volumes:
mongo_data:

View File

@@ -4,7 +4,7 @@ x-defaults: &defaults
driver: json-file
options:
max-size: 100m
max-file: "3"
max-file: '3'
x-mongo: &mongo
<<: *defaults
@@ -30,7 +30,7 @@ x-mongo: &mongo
retries: 10
start_period: 30s
extra_hosts:
- "host.docker.internal:host-gateway"
- 'host.docker.internal:host-gateway'
deploy:
resources:
limits:
@@ -44,7 +44,7 @@ services:
mongo1:
<<: *mongo
ports:
- "${MONGO_PORT_OVERRIDE_1:-27017}:27017"
- '${MONGO_PORT_OVERRIDE_1:-27017}:27017'
volumes:
- mongo1_data:/data/db
- ./secrets/rs0.key:/data/rs0.key:ro
@@ -52,7 +52,7 @@ services:
mongo2:
<<: *mongo
ports:
- "${MONGO_PORT_OVERRIDE_2:-27018}:27017"
- '${MONGO_PORT_OVERRIDE_2:-27018}:27017'
volumes:
- mongo2_data:/data/db
- ./secrets/rs0.key:/data/rs0.key:ro
@@ -60,7 +60,7 @@ services:
mongo3:
<<: *mongo
ports:
- "${MONGO_PORT_OVERRIDE_3:-27019}:27017"
- '${MONGO_PORT_OVERRIDE_3:-27019}:27017'
volumes:
- mongo3_data:/data/db
- ./secrets/rs0.key:/data/rs0.key:ro
@@ -86,7 +86,7 @@ services:
MONGO_PORT_3: ${MONGO_PORT_OVERRIDE_3:-27019}
MONGO_HOST: ${MONGO_HOST:-host.docker.internal}
extra_hosts:
- "host.docker.internal:host-gateway"
- 'host.docker.internal:host-gateway'
volumes:
- ./secrets/rs0.key:/data/rs0.key:ro
entrypoint:

View File

@@ -4,7 +4,7 @@ x-defaults: &defaults
driver: json-file
options:
max-size: 100m
max-file: "3"
max-file: '3'
services:
mongo:
@@ -16,11 +16,11 @@ services:
MONGO_INITDB_ROOT_PASSWORD: ${MONGO_INITDB_ROOT_PASSWORD:-password}
MONGO_INITDB_DATABASE: ${MONGO_INITDB_DATABASE:-admin}
ports:
- "${MONGO_PORT_OVERRIDE:-27017}:27017"
- '${MONGO_PORT_OVERRIDE:-27017}:27017'
volumes:
- mongo_data:/data/db
healthcheck:
test: ["CMD", "mongosh", "--eval", "db.adminCommand('ping')"]
test: [CMD, mongosh, --eval, "db.adminCommand('ping')"]
interval: 30s
timeout: 10s
retries: 3
@@ -34,6 +34,5 @@ services:
cpus: ${MONGO_CPU_RESERVATION:-0.50}
memory: ${MONGO_MEMORY_RESERVATION:-1024M}
volumes:
mongo_data:

View File

@@ -4,14 +4,14 @@ x-defaults: &defaults
driver: json-file
options:
max-size: 100m
max-file: "3"
max-file: '3'
services:
mysql:
<<: *defaults
image: ${GLOBAL_REGISTRY:-}mysql:${MYSQL_VERSION:-9.4.0}
ports:
- "${MYSQL_PORT_OVERRIDE:-3306}:3306"
- '${MYSQL_PORT_OVERRIDE:-3306}:3306'
volumes:
- mysql_data:/var/lib/mysql
@@ -30,7 +30,7 @@ services:
cpus: ${MYSQL_CPU_RESERVATION:-0.5}
memory: ${MYSQL_MEMORY_RESERVATION:-512M}
healthcheck:
test: ["CMD", "mysqladmin", "ping", "-h", "localhost", "-u", "root", "-p$$MYSQL_ROOT_PASSWORD"]
test: [CMD, mysqladmin, ping, -h, localhost, -u, root, -p$$MYSQL_ROOT_PASSWORD]
interval: 30s
timeout: 10s
retries: 3

View File

@@ -4,16 +4,16 @@ x-defaults: &defaults
driver: json-file
options:
max-size: 100m
max-file: "3"
max-file: '3'
services:
nacos:
<<: *defaults
image: ${GLOBAL_REGISTRY:-}nacos/nacos-server:${NACOS_VERSION:-v3.1.0-slim}
ports:
- "${NACOS_HTTP_PORT_OVERRIDE:-8848}:8848"
- "${NACOS_GRPC_PORT_OVERRIDE:-9848}:9848"
- "${NACOS_GRPC_PORT2_OVERRIDE:-9849}:9849"
- '${NACOS_HTTP_PORT_OVERRIDE:-8848}:8848'
- '${NACOS_GRPC_PORT_OVERRIDE:-9848}:9848'
- '${NACOS_GRPC_PORT2_OVERRIDE:-9849}:9849'
volumes:
- nacos_logs:/home/nacos/logs
environment:
@@ -37,7 +37,7 @@ services:
cpus: ${NACOS_CPU_RESERVATION:-0.5}
memory: ${NACOS_MEMORY_RESERVATION:-512M}
healthcheck:
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:8848/nacos/"]
test: [CMD, wget, --no-verbose, --tries=1, --spider, 'http://localhost:8848/nacos/']
interval: 30s
timeout: 10s
retries: 3

View File

@@ -20,7 +20,7 @@ This service deploys NebulaGraph, a distributed, fast open-source graph database
## Volumes
- `nebula_meta_data`: Meta service data
- `nebula_storage_data`: Storage service data
- `nebula_storage_data`: Storage service data
- `nebula_*_logs`: Log files for each service
## Usage

View File

@@ -4,7 +4,7 @@ x-defaults: &defaults
driver: json-file
options:
max-size: 100m
max-file: "3"
max-file: '3'
services:
metad:
@@ -24,9 +24,9 @@ services:
- nebula_meta_data:/data/meta
- nebula_meta_logs:/logs
ports:
- "9559:9559"
- "19559:19559"
- "19560:19560"
- '9559:9559'
- '19559:19559'
- '19560:19560'
deploy:
resources:
limits:
@@ -36,7 +36,7 @@ services:
cpus: ${NEBULA_METAD_CPU_RESERVATION:-0.25}
memory: ${NEBULA_METAD_MEMORY_RESERVATION:-256M}
healthcheck:
test: ["CMD", "/usr/local/nebula/bin/nebula-metad", "--version"]
test: [CMD, /usr/local/nebula/bin/nebula-metad, --version]
interval: 30s
timeout: 10s
retries: 3
@@ -62,9 +62,9 @@ services:
- nebula_storage_data:/data/storage
- nebula_storage_logs:/logs
ports:
- "9779:9779"
- "19779:19779"
- "19780:19780"
- '9779:9779'
- '19779:19779'
- '19780:19780'
deploy:
resources:
limits:
@@ -74,7 +74,7 @@ services:
cpus: ${NEBULA_STORAGED_CPU_RESERVATION:-0.5}
memory: ${NEBULA_STORAGED_MEMORY_RESERVATION:-512M}
healthcheck:
test: ["CMD", "/usr/local/nebula/bin/nebula-storaged", "--version"]
test: [CMD, /usr/local/nebula/bin/nebula-storaged, --version]
interval: 30s
timeout: 10s
retries: 3
@@ -100,9 +100,9 @@ services:
volumes:
- nebula_graph_logs:/logs
ports:
- "${NEBULA_GRAPHD_PORT_OVERRIDE:-9669}:9669"
- "19669:19669"
- "19670:19670"
- '${NEBULA_GRAPHD_PORT_OVERRIDE:-9669}:9669'
- '19669:19669'
- '19670:19670'
deploy:
resources:
limits:
@@ -112,7 +112,7 @@ services:
cpus: ${NEBULA_GRAPHD_CPU_RESERVATION:-0.5}
memory: ${NEBULA_GRAPHD_MEMORY_RESERVATION:-512M}
healthcheck:
test: ["CMD", "/usr/local/nebula/bin/nebula-graphd", "--version"]
test: [CMD, /usr/local/nebula/bin/nebula-graphd, --version]
interval: 30s
timeout: 10s
retries: 3

View File

@@ -4,15 +4,15 @@ x-defaults: &defaults
driver: json-file
options:
max-size: 100m
max-file: "3"
max-file: '3'
services:
neo4j:
<<: *defaults
image: ${GLOBAL_REGISTRY:-}neo4j:${NEO4J_VERSION:-5.27.4-community}
ports:
- "${NEO4J_HTTP_PORT_OVERRIDE:-7474}:7474"
- "${NEO4J_BOLT_PORT_OVERRIDE:-7687}:7687"
- '${NEO4J_HTTP_PORT_OVERRIDE:-7474}:7474'
- '${NEO4J_BOLT_PORT_OVERRIDE:-7687}:7687'
volumes:
- neo4j_data:/data
- neo4j_logs:/logs
@@ -34,7 +34,7 @@ services:
cpus: ${NEO4J_CPU_RESERVATION:-0.5}
memory: ${NEO4J_MEMORY_RESERVATION:-1G}
healthcheck:
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:7474/"]
test: [CMD, wget, --no-verbose, --tries=1, --spider, 'http://localhost:7474/']
interval: 30s
timeout: 10s
retries: 3

View File

@@ -6,7 +6,7 @@ x-defaults: &defaults
driver: json-file
options:
max-size: 100m
max-file: "3"
max-file: '3'
services:
netdata:
@@ -35,7 +35,7 @@ services:
environment:
- TZ=${TZ:-UTC}
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:19999/api/v1/info"]
test: [CMD, curl, -f, 'http://localhost:19999/api/v1/info']
interval: 30s
timeout: 10s
retries: 3

View File

@@ -8,7 +8,7 @@ x-defaults: &defaults
driver: json-file
options:
max-size: 100m
max-file: "3"
max-file: '3'
services:
nexa-sdk:
@@ -17,7 +17,7 @@ services:
- cpu
image: ${GLOBAL_REGISTRY:-}nexa4ai/nexasdk:${NEXA_SDK_VERSION:-v0.2.65}
ports:
- "${NEXA_SDK_PORT_OVERRIDE:-18181}:18181"
- '${NEXA_SDK_PORT_OVERRIDE:-18181}:18181'
volumes:
- nexa_data:/data
environment:
@@ -25,7 +25,7 @@ services:
- NEXA_TOKEN=${NEXA_TOKEN:-}
command: serve
healthcheck:
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:18181/docs/ui"]
test: [CMD, wget, --no-verbose, --tries=1, --spider, 'http://localhost:18181/docs/ui']
interval: 30s
timeout: 10s
retries: 3
@@ -46,7 +46,7 @@ services:
- gpu
image: ${GLOBAL_REGISTRY:-}nexa4ai/nexasdk:${NEXA_SDK_VERSION:-v0.2.62}-cuda
ports:
- "${NEXA_SDK_PORT_OVERRIDE:-18181}:18181"
- '${NEXA_SDK_PORT_OVERRIDE:-18181}:18181'
volumes:
- nexa_data:/data
environment:
@@ -54,7 +54,7 @@ services:
- NEXA_TOKEN=${NEXA_TOKEN:-}
command: serve
healthcheck:
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:18181/docs/ui"]
test: [CMD, wget, --no-verbose, --tries=1, --spider, 'http://localhost:18181/docs/ui']
interval: 30s
timeout: 10s
retries: 3

View File

@@ -4,15 +4,15 @@ x-defaults: &defaults
driver: json-file
options:
max-size: 100m
max-file: "3"
max-file: '3'
services:
nginx:
<<: *defaults
image: ${GLOBAL_REGISTRY:-}nginx:${NGINX_VERSION:-1.28.2-alpine3.22}
ports:
- "${NGINX_HTTP_PORT_OVERRIDE:-80}:80"
- "${NGINX_HTTPS_PORT_OVERRIDE:-443}:443"
- '${NGINX_HTTP_PORT_OVERRIDE:-80}:80'
- '${NGINX_HTTPS_PORT_OVERRIDE:-443}:443'
volumes:
- nginx_logs:/var/log/nginx
- ./html:/usr/share/nginx/html:ro
@@ -35,14 +35,13 @@ services:
memory: ${NGINX_MEMORY_RESERVATION:-64M}
healthcheck:
test:
[
"CMD",
"wget",
"--no-verbose",
"--tries=1",
"--spider",
"http://localhost:80/",
]
- CMD
- wget
- --no-verbose
- --tries=1
- --spider
- 'http://localhost:80/'
interval: 30s
timeout: 10s
retries: 3

View File

@@ -35,7 +35,7 @@ Add this scrape config to your Prometheus configuration:
```yaml
scrape_configs:
- job_name: 'node'
- job_name: node
static_configs:
- targets: ['localhost:9100']
```

View File

@@ -35,7 +35,7 @@ docker compose up -d
```yaml
scrape_configs:
- job_name: 'node'
- job_name: node
static_configs:
- targets: ['localhost:9100']
```

View File

@@ -4,18 +4,18 @@ x-defaults: &defaults
driver: json-file
options:
max-size: 100m
max-file: "3"
max-file: '3'
services:
node-exporter:
<<: *defaults
image: ${GLOBAL_REGISTRY:-}prom/node-exporter:${NODE_EXPORTER_VERSION:-v1.8.2}
ports:
- "${NODE_EXPORTER_PORT_OVERRIDE:-9100}:9100"
- '${NODE_EXPORTER_PORT_OVERRIDE:-9100}:9100'
command:
- '--path.rootfs=/host'
- '--path.procfs=/host/proc'
- '--path.sysfs=/host/sys'
- --path.rootfs=/host
- --path.procfs=/host/proc
- --path.sysfs=/host/sys
- '--collector.filesystem.mount-points-exclude=^/(sys|proc|dev|host|etc)($$|/)'
environment:
- TZ=${TZ:-UTC}
@@ -30,7 +30,7 @@ services:
cpus: ${NODE_EXPORTER_CPU_RESERVATION:-0.1}
memory: ${NODE_EXPORTER_MEMORY_RESERVATION:-64M}
healthcheck:
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:9100/metrics"]
test: [CMD, wget, --no-verbose, --tries=1, --spider, 'http://localhost:9100/metrics']
interval: 30s
timeout: 10s
retries: 3

View File

@@ -4,7 +4,7 @@ x-defaults: &defaults
driver: json-file
options:
max-size: 100m
max-file: "3"
max-file: '3'
services:
oceanbase:
@@ -23,8 +23,8 @@ services:
volumes:
- oceanbase_data:/root/ob
ports:
- "${OCEANBASE_SQL_PORT_OVERRIDE:-2881}:2881"
- "${OCEANBASE_RPC_PORT_OVERRIDE:-2882}:2882"
- '${OCEANBASE_SQL_PORT_OVERRIDE:-2881}:2881'
- '${OCEANBASE_RPC_PORT_OVERRIDE:-2882}:2882'
deploy:
resources:
limits:
@@ -34,7 +34,7 @@ services:
cpus: ${OCEANBASE_CPU_RESERVATION:-2.0}
memory: ${OCEANBASE_MEMORY_RESERVATION:-8G}
healthcheck:
test: ["CMD-SHELL", "mysql -h127.0.0.1 -P2881 -uroot -p$$OB_ROOT_PASSWORD -e 'SELECT 1' || exit 1"]
test: [CMD-SHELL, "mysql -h127.0.0.1 -P2881 -uroot -p$$OB_ROOT_PASSWORD -e 'SELECT 1' || exit 1"]
interval: 30s
timeout: 10s
retries: 5

View File

@@ -4,7 +4,7 @@ x-defaults: &defaults
driver: json-file
options:
max-size: 100m
max-file: "3"
max-file: '3'
services:
odoo:
@@ -14,7 +14,7 @@ services:
odoo-db:
condition: service_healthy
ports:
- "${ODOO_PORT_OVERRIDE:-8069}:8069"
- '${ODOO_PORT_OVERRIDE:-8069}:8069'
volumes:
- odoo_web_data:/var/lib/odoo
- odoo_addons:/mnt/extra-addons
@@ -35,14 +35,13 @@ services:
memory: ${ODOO_MEMORY_RESERVATION:-1G}
healthcheck:
test:
[
"CMD",
"wget",
"--no-verbose",
"--tries=1",
"--spider",
"http://localhost:8069/",
]
- CMD
- wget
- --no-verbose
- --tries=1
- --spider
- 'http://localhost:8069/'
interval: 30s
timeout: 10s
retries: 3
@@ -68,7 +67,7 @@ services:
cpus: ${ODOO_DB_CPU_RESERVATION:-0.25}
memory: ${ODOO_DB_MEMORY_RESERVATION:-512M}
healthcheck:
test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER:-odoo}"]
test: [CMD-SHELL, 'pg_isready -U ${POSTGRES_USER:-odoo}']
interval: 10s
timeout: 5s
retries: 5

View File

@@ -4,14 +4,14 @@ x-defaults: &defaults
driver: json-file
options:
max-size: 100m
max-file: "3"
max-file: '3'
services:
ollama:
<<: *defaults
image: ${GLOBAL_REGISTRY:-}ollama/ollama:${OLLAMA_VERSION:-0.14.3}
ports:
- "${OLLAMA_PORT_OVERRIDE:-11434}:11434"
- '${OLLAMA_PORT_OVERRIDE:-11434}:11434'
volumes:
- ollama_models:/root/.ollama
environment:
@@ -19,14 +19,13 @@ services:
ipc: host
healthcheck:
test:
[
"CMD",
"wget",
"--no-verbose",
"--tries=1",
"--spider",
"http://localhost:11434/",
]
- CMD
- wget
- --no-verbose
- --tries=1
- --spider
- 'http://localhost:11434/'
interval: 30s
timeout: 10s
retries: 3
@@ -41,7 +40,7 @@ services:
memory: ${OLLAMA_MEMORY_RESERVATION:-4G}
devices:
- driver: nvidia
device_ids: ["0"]
device_ids: ['0']
capabilities: [gpu]
volumes:

View File

@@ -4,14 +4,14 @@ x-defaults: &defaults
driver: json-file
options:
max-size: 100m
max-file: "3"
max-file: '3'
services:
open_webui:
<<: *defaults
image: ${GHCR_IO_REGISTRY:-ghcr.io}/open-webui/open-webui:${OPEN_WEBUI_VERSION:-main}
ports:
- "${OPEN_WEBUI_PORT_OVERRIDE:-8080}:8080"
- '${OPEN_WEBUI_PORT_OVERRIDE:-8080}:8080'
volumes:
- open_webui_data:/app/backend/data
environment:

View File

@@ -4,7 +4,7 @@ x-defaults: &defaults
driver: json-file
options:
max-size: 100m
max-file: "3"
max-file: '3'
services:
opencode:
@@ -12,7 +12,7 @@ services:
image: ${GLOBAL_REGISTRY:-}ghcr.io/anomalyco/opencode:${OPENCODE_VERSION:-1.1.27}
command: web --hostname 0.0.0.0 --port 4096
ports:
- "${OPENCODE_PORT_OVERRIDE:-4096}:4096"
- '${OPENCODE_PORT_OVERRIDE:-4096}:4096'
volumes:
- opencode_data:/root/.opencode
- ${OPENCODE_PROJECT_DIR:-./project}:/app
@@ -29,14 +29,13 @@ services:
working_dir: /app
healthcheck:
test:
[
"CMD",
"wget",
"--quiet",
"--tries=1",
"--spider",
"http://localhost:4096/",
]
- CMD
- wget
- --quiet
- --tries=1
- --spider
- 'http://localhost:4096/'
interval: 30s
timeout: 10s
retries: 3

Binary file not shown.

View File

@@ -4,7 +4,7 @@ x-defaults: &defaults
driver: json-file
options:
max-size: 100m
max-file: "3"
max-file: '3'
services:
db:
@@ -18,9 +18,9 @@ services:
volumes:
- postgres_data:/var/lib/postgresql/data
ports:
- "${POSTGRES_PORT_OVERRIDE:-5432}:5432"
- '${POSTGRES_PORT_OVERRIDE:-5432}:5432'
healthcheck:
test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER:-opencut}"]
test: [CMD-SHELL, 'pg_isready -U ${POSTGRES_USER:-opencut}']
interval: 30s
timeout: 10s
retries: 5
@@ -38,9 +38,9 @@ services:
<<: *defaults
image: ${GLOBAL_REGISTRY:-}redis:${REDIS_VERSION:-7-alpine}
ports:
- "${REDIS_PORT_OVERRIDE:-6379}:6379"
- '${REDIS_PORT_OVERRIDE:-6379}:6379'
healthcheck:
test: ["CMD", "redis-cli", "ping"]
test: [CMD, redis-cli, ping]
interval: 30s
timeout: 10s
retries: 5
@@ -58,16 +58,16 @@ services:
<<: *defaults
image: ${GLOBAL_REGISTRY:-}hiett/serverless-redis-http:${SERVERLESS_REDIS_HTTP_VERSION:-latest}
ports:
- "${SERVERLESS_REDIS_HTTP_PORT_OVERRIDE:-8079}:80"
- '${SERVERLESS_REDIS_HTTP_PORT_OVERRIDE:-8079}:80'
environment:
SRH_MODE: env
SRH_TOKEN: ${SERVERLESS_REDIS_HTTP_TOKEN:?SERVERLESS_REDIS_HTTP_TOKEN is required}
SRH_CONNECTION_STRING: "redis://redis:6379"
SRH_CONNECTION_STRING: 'redis://redis:6379'
depends_on:
redis:
condition: service_healthy
healthcheck:
test: ["CMD-SHELL", "wget --spider -q http://127.0.0.1:80 || exit 1"]
test: [CMD-SHELL, 'wget --spider -q http://127.0.0.1:80 || exit 1']
interval: 30s
timeout: 10s
retries: 5
@@ -85,7 +85,7 @@ services:
<<: *defaults
image: ${OPENCUT_WEB_IMAGE:-opencut/web:latest}
ports:
- "${OPENCUT_WEB_PORT_OVERRIDE:-3100}:3000"
- '${OPENCUT_WEB_PORT_OVERRIDE:-3100}:3000'
environment:
TZ: ${TZ:-UTC}
NODE_ENV: production
@@ -108,7 +108,7 @@ services:
serverless-redis-http:
condition: service_healthy
healthcheck:
test: ["CMD-SHELL", "curl -f http://localhost:3000/api/health || exit 1"]
test: [CMD-SHELL, 'curl -f http://localhost:3000/api/health || exit 1']
interval: 30s
timeout: 10s
retries: 5

View File

@@ -4,14 +4,14 @@ x-defaults: &defaults
driver: json-file
options:
max-size: 100m
max-file: "3"
max-file: '3'
services:
openlist:
<<: *defaults
image: ${GLOBAL_REGISTRY:-}openlistteam/openlist:${OPENLIST_VERSION:-latest}
ports:
- "${OPENLIST_PORT_OVERRIDE:-5244}:5244"
- '${OPENLIST_PORT_OVERRIDE:-5244}:5244'
volumes:
- openlist_data:/opt/openlist/data
environment:
@@ -29,14 +29,13 @@ services:
memory: ${OPENLIST_MEMORY_RESERVATION:-256M}
healthcheck:
test:
[
"CMD",
"wget",
"--no-verbose",
"--tries=1",
"--spider",
"http://localhost:5244/",
]
- CMD
- wget
- --no-verbose
- --tries=1
- --spider
- 'http://localhost:5244/'
interval: 30s
timeout: 10s
retries: 3

View File

@@ -4,7 +4,7 @@ x-defaults: &defaults
driver: json-file
options:
max-size: 100m
max-file: "3"
max-file: '3'
services:
opensearch:
@@ -16,7 +16,7 @@ services:
node.name: opensearch
discovery.type: single-node
bootstrap.memory_lock: true
OPENSEARCH_JAVA_OPTS: "-Xms${OPENSEARCH_HEAP_SIZE:-512m} -Xmx${OPENSEARCH_HEAP_SIZE:-512m}"
OPENSEARCH_JAVA_OPTS: '-Xms${OPENSEARCH_HEAP_SIZE:-512m} -Xmx${OPENSEARCH_HEAP_SIZE:-512m}'
OPENSEARCH_INITIAL_ADMIN_PASSWORD: ${OPENSEARCH_ADMIN_PASSWORD:-Admin@123}
DISABLE_SECURITY_PLUGIN: ${DISABLE_SECURITY_PLUGIN:-false}
ulimits:
@@ -27,8 +27,8 @@ services:
soft: 65536
hard: 65536
ports:
- "${OPENSEARCH_PORT_OVERRIDE:-9200}:9200"
- "${OPENSEARCH_PERF_ANALYZER_PORT_OVERRIDE:-9600}:9600"
- '${OPENSEARCH_PORT_OVERRIDE:-9200}:9200'
- '${OPENSEARCH_PERF_ANALYZER_PORT_OVERRIDE:-9600}:9600'
volumes:
- opensearch_data:/usr/share/opensearch/data
deploy:
@@ -41,7 +41,7 @@ services:
memory: ${OPENSEARCH_MEMORY_RESERVATION:-1G}
healthcheck:
test:
["CMD-SHELL", "curl -f http://localhost:9200/_cluster/health || exit 1"]
[CMD-SHELL, 'curl -f http://localhost:9200/_cluster/health || exit 1']
interval: 30s
timeout: 10s
retries: 3
@@ -51,7 +51,7 @@ services:
<<: *defaults
image: ${GLOBAL_REGISTRY:-}opensearchproject/opensearch-dashboards:${OPENSEARCH_DASHBOARDS_VERSION:-2.19.0}
ports:
- "${OPENSEARCH_DASHBOARDS_PORT_OVERRIDE:-5601}:5601"
- '${OPENSEARCH_DASHBOARDS_PORT_OVERRIDE:-5601}:5601'
environment:
TZ: ${TZ:-UTC}
OPENSEARCH_HOSTS: '["https://opensearch:9200"]'
@@ -69,14 +69,13 @@ services:
memory: ${OPENSEARCH_DASHBOARDS_MEMORY_RESERVATION:-512M}
healthcheck:
test:
[
"CMD",
"wget",
"--no-verbose",
"--tries=1",
"--spider",
"http://localhost:5601/api/status",
]
- CMD
- wget
- --no-verbose
- --tries=1
- --spider
- 'http://localhost:5601/api/status'
interval: 30s
timeout: 10s
retries: 3

Some files were not shown because too many files have changed in this diff Show More