chore: update version
This commit is contained in:
@@ -1,5 +1,5 @@
|
||||
# Bifrost Gateway Version
|
||||
BIFROST_VERSION=v1.3.54
|
||||
BIFROST_VERSION=v1.3.59
|
||||
|
||||
# Port to bind to on the host machine
|
||||
BIFROST_PORT=28080
|
||||
|
||||
@@ -12,7 +12,7 @@ Bifrost is a lightweight, high-performance LLM gateway that supports multiple mo
|
||||
|
||||
## Configuration
|
||||
|
||||
- `BIFROST_VERSION`: The version of the Bifrost image, default is `v1.3.54`.
|
||||
- `BIFROST_VERSION`: The version of the Bifrost image, default is `v1.3.59`.
|
||||
- `BIFROST_PORT`: The port for the Bifrost service, default is `28080`.
|
||||
|
||||
### Telemetry
|
||||
|
||||
@@ -12,7 +12,7 @@ Bifrost 是一个轻量级、高性能的 LLM 网关,支持多种模型和提
|
||||
|
||||
## 配置
|
||||
|
||||
- `BIFROST_VERSION`: Bifrost 镜像的版本,默认为 `v1.3.54`。
|
||||
- `BIFROST_VERSION`: Bifrost 镜像的版本,默认为 `v1.3.59`。
|
||||
- `BIFROST_PORT`: Bifrost 服务的端口,默认为 `28080`。
|
||||
|
||||
### 遥测 (Telemetry)
|
||||
|
||||
@@ -9,7 +9,7 @@ x-defaults: &defaults
|
||||
services:
|
||||
bifrost:
|
||||
<<: *defaults
|
||||
image: ${GLOBAL_REGISTRY:-}maximhq/bifrost:${BIFROST_VERSION:-v1.3.54}
|
||||
image: ${GLOBAL_REGISTRY:-}maximhq/bifrost:${BIFROST_VERSION:-v1.3.59}
|
||||
volumes:
|
||||
- bifrost_data:/app/data
|
||||
ports:
|
||||
|
||||
@@ -1,15 +1,27 @@
|
||||
# Global Registry Prefix (optional)
|
||||
# GHCR_IO_REGISTRY=ghcr.io
|
||||
|
||||
# Open WebUI Version
|
||||
OPEN_WEBUI_VERSION=main
|
||||
|
||||
# Timezone
|
||||
TZ=UTC
|
||||
|
||||
# Port to bind to on the host machine
|
||||
OPEN_WEBUI_PORT=8080
|
||||
OPEN_WEBUI_PORT_OVERRIDE=8080
|
||||
|
||||
# Resource Limits
|
||||
OPEN_WEBUI_CPU_LIMIT=1
|
||||
OPEN_WEBUI_MEMORY_LIMIT=1024M
|
||||
OPEN_WEBUI_CPU_RESERVATION=0.5
|
||||
OPEN_WEBUI_MEMORY_RESERVATION=512M
|
||||
|
||||
# OpenAI API Configuration (optional)
|
||||
OPENAI_API_BASE_URL=https://api.openai.com/v1
|
||||
OPENAI_API_KEY=
|
||||
# OPENAI_API_BASE_URL=https://api.openai.com/v1
|
||||
# OPENAI_API_KEY=
|
||||
|
||||
# Enable Ollama API integration (default: true)
|
||||
ENABLE_OLLAMA_API=true
|
||||
# ENABLE_OLLAMA_API=true
|
||||
|
||||
# WebUI URL (optional, for external access configuration)
|
||||
WEBUI_URL=
|
||||
# WEBUI_URL=
|
||||
|
||||
@@ -12,8 +12,14 @@ This service deploys Open WebUI, a web-based interface for LLMs.
|
||||
|
||||
## Configuration
|
||||
|
||||
- `GHCR_IO_REGISTRY`: The registry prefix for the Open WebUI image, default is `ghcr.io`.
|
||||
- `OPEN_WEBUI_VERSION`: The version of the Open WebUI image, default is `main`.
|
||||
- `TZ`: The timezone for the container, default is `UTC`.
|
||||
- `OPEN_WEBUI_PORT_OVERRIDE`: The host port for Open WebUI, default is `8080`.
|
||||
- `OPEN_WEBUI_CPU_LIMIT`: The CPU limit for the Open WebUI service, default is `1`.
|
||||
- `OPEN_WEBUI_MEMORY_LIMIT`: The memory limit for the Open WebUI service, default is `1024M`.
|
||||
- `OPEN_WEBUI_CPU_RESERVATION`: The CPU reservation for the Open WebUI service, default is `0.5`.
|
||||
- `OPEN_WEBUI_MEMORY_RESERVATION`: The memory reservation for the Open WebUI service, default is `512M`.
|
||||
|
||||
## Volumes
|
||||
|
||||
|
||||
@@ -12,8 +12,14 @@
|
||||
|
||||
## 配置
|
||||
|
||||
- `OPEN_WEBUI_VERSION`: Open WebUI 镜像的版本,默认为 `main`。
|
||||
- `OPEN_WEBUI_PORT_OVERRIDE`: Open WebUI 的主机端口,默认为 `8080`。
|
||||
- `GHCR_IO_REGISTRY` :Open WebUI 镜像的仓库前缀,默认为 `ghcr.io`。
|
||||
- `OPEN_WEBUI_VERSION` :Open WebUI 镜像的版本,默认为 `main`。
|
||||
- `TZ` :容器的时区,默认为 `UTC`。
|
||||
- `OPEN_WEBUI_PORT_OVERRIDE` :Open WebUI 的主机端口,默认为 `8080`。
|
||||
- `OPEN_WEBUI_CPU_LIMIT` :Open WebUI 服务的 CPU 限制,默认为 `1`。
|
||||
- `OPEN_WEBUI_MEMORY_LIMIT` :Open WebUI 服务的内存限制,默认为 `1024M`。
|
||||
- `OPEN_WEBUI_CPU_RESERVATION` :Open WebUI 服务的 CPU 预留,默认为 `0.5`。
|
||||
- `OPEN_WEBUI_MEMORY_RESERVATION` :Open WebUI 服务的内存预留,默认为 `512M`。
|
||||
|
||||
## 卷
|
||||
|
||||
|
||||
@@ -23,10 +23,10 @@ services:
|
||||
resources:
|
||||
limits:
|
||||
cpus: ${OPEN_WEBUI_CPU_LIMIT:-1}
|
||||
memory: ${OPEN_WEBUI_MEMORY_LIMIT:-512M}
|
||||
memory: ${OPEN_WEBUI_MEMORY_LIMIT:-1024M}
|
||||
reservations:
|
||||
cpus: ${OPEN_WEBUI_CPU_RESERVATION:-0.1}
|
||||
memory: ${OPEN_WEBUI_MEMORY_RESERVATION:-128M}
|
||||
cpus: ${OPEN_WEBUI_CPU_RESERVATION:-0.5}
|
||||
memory: ${OPEN_WEBUI_MEMORY_RESERVATION:-512M}
|
||||
|
||||
volumes:
|
||||
open_webui_data:
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Phoenix version
|
||||
PHOENIX_VERSION=12.25.0-nonroot
|
||||
PHOENIX_VERSION=12.27.0-nonroot
|
||||
|
||||
# Timezone
|
||||
TZ=UTC
|
||||
|
||||
@@ -20,7 +20,7 @@ Arize Phoenix is an open-source AI observability platform for LLM applications.
|
||||
|
||||
| Variable Name | Description | Default Value |
|
||||
| -------------------------- | ------------------------------------- | ----------------- |
|
||||
| PHOENIX_VERSION | Phoenix image version | `12.25.0-nonroot` |
|
||||
| PHOENIX_VERSION | Phoenix image version | `12.27.0-nonroot` |
|
||||
| PHOENIX_PORT_OVERRIDE | Host port for Phoenix UI and HTTP API | `6006` |
|
||||
| PHOENIX_GRPC_PORT_OVERRIDE | Host port for OTLP gRPC collector | `4317` |
|
||||
| PHOENIX_ENABLE_PROMETHEUS | Enable Prometheus metrics endpoint | `false` |
|
||||
|
||||
@@ -20,7 +20,7 @@ Arize Phoenix 是一个开源的 AI 可观测性平台,专为 LLM 应用设计
|
||||
|
||||
| 变量名 | 描述 | 默认值 |
|
||||
| -------------------------- | --------------------------------- | ----------------- |
|
||||
| PHOENIX_VERSION | Phoenix 镜像版本 | `12.25.0-nonroot` |
|
||||
| PHOENIX_VERSION | Phoenix 镜像版本 | `12.27.0-nonroot` |
|
||||
| PHOENIX_PORT_OVERRIDE | Phoenix UI 和 HTTP API 的主机端口 | `6006` |
|
||||
| PHOENIX_GRPC_PORT_OVERRIDE | OTLP gRPC 采集器的主机端口 | `4317` |
|
||||
| PHOENIX_ENABLE_PROMETHEUS | 启用 Prometheus 指标端点 | `false` |
|
||||
|
||||
@@ -12,7 +12,7 @@ x-defaults: &defaults
|
||||
services:
|
||||
phoenix:
|
||||
<<: *defaults
|
||||
image: ${GLOBAL_REGISTRY:-}arizephoenix/phoenix:${PHOENIX_VERSION:-12.25.0-nonroot}
|
||||
image: ${GLOBAL_REGISTRY:-}arizephoenix/phoenix:${PHOENIX_VERSION:-12.27.0-nonroot}
|
||||
ports:
|
||||
- "${PHOENIX_PORT_OVERRIDE:-6006}:6006" # UI and OTLP HTTP collector
|
||||
- "${PHOENIX_GRPC_PORT_OVERRIDE:-4317}:4317" # OTLP gRPC collector
|
||||
|
||||
Reference in New Issue
Block a user