chore: update versions

This commit is contained in:
Sun-ZhenXing
2025-12-30 11:25:14 +08:00
parent cdc76a8ee5
commit b8cb5eeea1
10 changed files with 21 additions and 13 deletions

View File

@@ -112,7 +112,7 @@ Compose Anything helps users quickly deploy various services by providing a set
| [Valkey Cluster](./src/valkey-cluster) | 8.0 |
| [Valkey](./src/valkey) | 8.0 |
| [Verdaccio](./src/verdaccio) | 6.1.2 |
| [vLLM](./src/vllm) | v0.8.0 |
| [vLLM](./src/vllm) | v0.13.0 |
| [Windmill](./src/windmill) | main |
| [ZooKeeper](./src/zookeeper) | 3.9.3 |

View File

@@ -112,7 +112,7 @@ Compose Anything 通过提供一组高质量的 Docker Compose 配置文件,
| [Valkey Cluster](./src/valkey-cluster) | 8.0 |
| [Valkey](./src/valkey) | 8.0 |
| [Verdaccio](./src/verdaccio) | 6.1.2 |
| [vLLM](./src/vllm) | v0.8.0 |
| [vLLM](./src/vllm) | v0.13.0 |
| [Windmill](./src/windmill) | main |
| [ZooKeeper](./src/zookeeper) | 3.9.3 |

View File

@@ -3,7 +3,7 @@ GLOBAL_REGISTRY=
TZ=UTC
# Service Versions
LANGFUSE_VERSION=3
LANGFUSE_VERSION=3.143.0
POSTGRES_VERSION=17
CLICKHOUSE_VERSION=latest
MINIO_VERSION=latest

View File

@@ -45,7 +45,7 @@ This service deploys Langfuse, an open-source LLM engineering platform for obser
| Variable | Description | Default |
| --------------------------------------- | ----------------------------------------------- | ----------------------- |
| `LANGFUSE_VERSION` | Langfuse container image version | `3` |
| `LANGFUSE_VERSION` | Langfuse container image version | `3.143.0` |
| `LANGFUSE_PORT` | Web interface port | `3000` |
| `NEXTAUTH_URL` | Public URL of Langfuse instance | `http://localhost:3000` |
| `NEXTAUTH_SECRET` | NextAuth.js secret (required for production) | `mysecret` |

View File

@@ -45,7 +45,7 @@
| 变量 | 描述 | 默认值 |
| --------------------------------------- | ------------------------------------- | ----------------------- |
| `LANGFUSE_VERSION` | Langfuse 容器镜像版本 | `3` |
| `LANGFUSE_VERSION` | Langfuse 容器镜像版本 | `3.143.0` |
| `LANGFUSE_PORT` | Web 界面端口 | `3000` |
| `NEXTAUTH_URL` | Langfuse 实例的公开 URL | `http://localhost:3000` |
| `NEXTAUTH_SECRET` | NextAuth.js 密钥(生产环境必需) | `mysecret` |

View File

@@ -18,7 +18,7 @@ x-defaults: &defaults
services:
langfuse-worker:
<<: *defaults
image: ${GLOBAL_REGISTRY:-}langfuse/langfuse-worker:${LANGFUSE_VERSION:-3}
image: ${GLOBAL_REGISTRY:-}langfuse/langfuse-worker:${LANGFUSE_VERSION:-3.143.0}
depends_on: &langfuse-depends-on
postgres:
condition: service_healthy
@@ -32,6 +32,7 @@ services:
- ${LANGFUSE_WORKER_PORT_OVERRIDE:-3030}:3030
environment: &langfuse-worker-env
TZ: ${TZ:-UTC}
HOSTNAME: ${HOSTNAME:-0.0.0.0}
NEXTAUTH_URL: ${NEXTAUTH_URL:-http://localhost:3000}
DATABASE_URL: ${DATABASE_URL:-postgresql://postgres:postgres@postgres:5432/postgres}
SALT: ${SALT:-mysalt}
@@ -86,15 +87,22 @@ services:
reservations:
cpus: ${LANGFUSE_WORKER_CPU_RESERVATION:-0.5}
memory: ${LANGFUSE_WORKER_MEMORY_RESERVATION:-512M}
healthcheck:
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://127.0.0.1:3030/api/health"]
interval: 30s
timeout: 10s
retries: 3
start_period: 30s
langfuse-web:
<<: *defaults
image: ${GLOBAL_REGISTRY:-}langfuse/langfuse:${LANGFUSE_VERSION:-3}
image: ${GLOBAL_REGISTRY:-}langfuse/langfuse:${LANGFUSE_VERSION:-3.143.0}
depends_on: *langfuse-depends-on
ports:
- "${LANGFUSE_PORT_OVERRIDE:-3000}:3000"
environment:
<<: *langfuse-worker-env
HOSTNAME: ${HOSTNAME:-0.0.0.0}
NEXTAUTH_SECRET: ${NEXTAUTH_SECRET:-mysecret}
LANGFUSE_INIT_ORG_ID: ${LANGFUSE_INIT_ORG_ID:-}
LANGFUSE_INIT_ORG_NAME: ${LANGFUSE_INIT_ORG_NAME:-}
@@ -114,7 +122,7 @@ services:
cpus: ${LANGFUSE_WEB_CPU_RESERVATION:-0.5}
memory: ${LANGFUSE_WEB_MEMORY_RESERVATION:-512M}
healthcheck:
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:3000/api/public/health"]
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://127.0.0.1:3000/api/public/health"]
interval: 30s
timeout: 10s
retries: 3

View File

@@ -1,5 +1,5 @@
# vLLM version
VLLM_VERSION="v0.12.0"
VLLM_VERSION="v0.13.0"
# Model configuration
VLLM_MODEL="facebook/opt-125m"

View File

@@ -12,7 +12,7 @@ This service deploys vLLM, a high-throughput and memory-efficient inference and
| Variable Name | Description | Default Value |
| -------------------- | -------------------------------------- | ------------------- |
| VLLM_VERSION | vLLM image version | `v0.12.0` |
| VLLM_VERSION | vLLM image version | `v0.13.0` |
| VLLM_MODEL | Model name or path | `facebook/opt-125m` |
| VLLM_MAX_MODEL_LEN | Maximum context length | `2048` |
| VLLM_GPU_MEMORY_UTIL | GPU memory utilization (0.0-1.0) | `0.9` |

View File

@@ -12,7 +12,7 @@
| 变量名 | 说明 | 默认值 |
| ---------------------- | -------------------------------- | ------------------- |
| `VLLM_VERSION` | vLLM 镜像版本 | `v0.12.0` |
| `VLLM_VERSION` | vLLM 镜像版本 | `v0.13.0` |
| `VLLM_MODEL` | 模型名称或路径 | `facebook/opt-125m` |
| `VLLM_MAX_MODEL_LEN` | 最大上下文长度 | `2048` |
| `VLLM_GPU_MEMORY_UTIL` | GPU 内存利用率0.0-1.0 | `0.9` |

View File

@@ -9,7 +9,7 @@ x-defaults: &defaults
services:
vllm:
<<: *defaults
image: ${GLOBAL_REGISTRY:-}vllm/vllm-openai:${VLLM_VERSION:-v0.12.0}
image: ${GLOBAL_REGISTRY:-}vllm/vllm-openai:${VLLM_VERSION:-v0.13.0}
ports:
- "${VLLM_PORT_OVERRIDE:-8000}:8000"
volumes:
@@ -42,7 +42,7 @@ services:
capabilities: [gpu]
shm_size: 4g
healthcheck:
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:8000/health"]
test: ["CMD", "curl", "-f", "http://localhost:8000/health"]
interval: 30s
timeout: 10s
retries: 3