feat: add phoenix & trigger.dev

This commit is contained in:
Sun-ZhenXing
2025-12-09 15:19:11 +08:00
parent 0b11022ef8
commit 8f30f94184
20 changed files with 1726 additions and 617 deletions

View File

@@ -68,6 +68,7 @@ Compose Anything helps users quickly deploy various services by providing a set
| [n8n](./src/n8n) | 1.114.0 |
| [Nacos](./src/nacos) | v3.1.0 |
| [NebulaGraph](./src/nebulagraph) | v3.8.0 |
| [NexaSDK](./src/nexa-sdk) | v0.2.62 |
| [Neo4j](./src/neo4j) | 5.27.4 |
| [Nginx](./src/nginx) | 1.29.1 |
| [Node Exporter](./src/node-exporter) | v1.8.2 |
@@ -75,6 +76,7 @@ Compose Anything helps users quickly deploy various services by providing a set
| [Odoo](./src/odoo) | 19.0 |
| [Ollama](./src/ollama) | 0.12.0 |
| [Open WebUI](./src/open-webui) | main |
| [Phoenix (Arize)](./src/phoenix) | 12.19.0 |
| [Open WebUI Rust](./src/open-webui-rust) | latest |
| [OpenCoze](./src/opencoze) | See Docs |
| [OpenCut](./src/opencut) | latest |
@@ -100,6 +102,7 @@ Compose Anything helps users quickly deploy various services by providing a set
| [Temporal](./src/temporal) | 1.24.2 |
| [TiDB](./src/tidb) | v8.5.0 |
| [TiKV](./src/tikv) | v8.5.0 |
| [Trigger.dev](./src/trigger-dev) | v4.2.0 |
| [Valkey Cluster](./src/valkey-cluster) | 8.0 |
| [Valkey](./src/valkey) | 8.0 |
| [Verdaccio](./src/verdaccio) | 6.1.2 |

View File

@@ -68,6 +68,7 @@ Compose Anything 通过提供一组高质量的 Docker Compose 配置文件,
| [n8n](./src/n8n) | 1.114.0 |
| [Nacos](./src/nacos) | v3.1.0 |
| [NebulaGraph](./src/nebulagraph) | v3.8.0 |
| [NexaSDK](./src/nexa-sdk) | v0.2.62 |
| [Neo4j](./src/neo4j) | 5.27.4 |
| [Nginx](./src/nginx) | 1.29.1 |
| [Node Exporter](./src/node-exporter) | v1.8.2 |
@@ -75,6 +76,7 @@ Compose Anything 通过提供一组高质量的 Docker Compose 配置文件,
| [Odoo](./src/odoo) | 19.0 |
| [Ollama](./src/ollama) | 0.12.0 |
| [Open WebUI](./src/open-webui) | main |
| [Phoenix (Arize)](./src/phoenix) | 12.19.0 |
| [Open WebUI Rust](./src/open-webui-rust) | latest |
| [OpenCoze](./src/opencoze) | See Docs |
| [OpenCut](./src/opencut) | latest |
@@ -100,6 +102,7 @@ Compose Anything 通过提供一组高质量的 Docker Compose 配置文件,
| [Temporal](./src/temporal) | 1.24.2 |
| [TiDB](./src/tidb) | v8.5.0 |
| [TiKV](./src/tikv) | v8.5.0 |
| [Trigger.dev](./src/trigger-dev) | v4.2.0 |
| [Valkey Cluster](./src/valkey-cluster) | 8.0 |
| [Valkey](./src/valkey) | 8.0 |
| [Verdaccio](./src/verdaccio) | 6.1.2 |

View File

@@ -1,41 +0,0 @@
# Global registry for container images (optional)
# GLOBAL_REGISTRY=
# Nexa SDK version
NEXA_SDK_VERSION=latest
# Timezone configuration
TZ=UTC
# Port override for host binding
NEXA_SDK_PORT_OVERRIDE=8080
# Server configuration
NEXA_HOST=0.0.0.0:8080
NEXA_KEEPALIVE=300
NEXA_ORIGINS=*
# HuggingFace token for accessing private models (optional)
NEXA_HFTOKEN=
# Logging level (none, debug, info, warn, error)
NEXA_LOG=none
# Model to run (can be any Nexa-compatible model)
# Examples: gemma-2-2b-instruct, qwen3-4b, llama-3-8b, mistral-7b
NEXA_MODEL=gemma-2-2b-instruct
# GPU configuration (for gpu profile only)
# Number of GPU layers to offload (-1 for all layers)
NEXA_GPU_LAYERS=-1
# Shared memory size
NEXA_SHM_SIZE=2g
# Resource limits
NEXA_SDK_CPU_LIMIT=4.0
NEXA_SDK_MEMORY_LIMIT=8G
# Resource reservations
NEXA_SDK_CPU_RESERVATION=2.0
NEXA_SDK_MEMORY_RESERVATION=4G

View File

@@ -1,8 +0,0 @@
# https://github.com/NexaAI/nexa-sdk/issues/684
FROM ubuntu:22.04
RUN apt update && apt install -y libgomp1 curl ffmpeg sox
RUN curl -fsSL https://github.com/NexaAI/nexa-sdk/releases/latest/download/nexa-cli_linux_x86_64.sh | sh
EXPOSE 8080
CMD [ "nexa", "serve", "--host", "0.0.0.0:8080" ]

View File

@@ -1,8 +0,0 @@
# https://github.com/NexaAI/nexa-sdk/issues/684
FROM nvidia/cuda:12.8.1-cudnn-runtime-ubuntu22.04
RUN apt update && apt install -y libgomp1 curl ffmpeg sox
RUN curl -fsSL https://github.com/NexaAI/nexa-sdk/releases/latest/download/nexa-cli_linux_x86_64.sh | sh
EXPOSE 8080
CMD [ "nexa", "serve", "--host", "0.0.0.0:8080" ]

View File

@@ -1,233 +0,0 @@
# Nexa SDK
Nexa SDK is a comprehensive toolkit for running AI models locally. It provides inference for various model types including LLM, VLM (Vision Language Models), TTS (Text-to-Speech), ASR (Automatic Speech Recognition), and more. Built with performance in mind, it supports both CPU and GPU acceleration.
## Features
- **Multi-Model Support**: Run LLM, VLM, TTS, ASR, embedding, reranking, and image generation models
- **OpenAI-Compatible API**: Provides standard OpenAI API endpoints for easy integration
- **GPU Acceleration**: Optional GPU support via NVIDIA CUDA for faster inference
- **Resource Management**: Configurable CPU/memory limits and GPU layer offloading
- **Model Caching**: Persistent model storage for faster startup
- **Profile Support**: Easy switching between CPU-only and GPU-accelerated modes
## Quick Start
### Prerequisites
- Docker and Docker Compose
- For GPU support: NVIDIA Docker runtime and compatible GPU
### Basic Usage (CPU)
```bash
# Copy environment file
cp .env.example .env
# Edit .env to configure your model and settings
# NEXA_MODEL=gemma-2-2b-instruct
# Start the service with CPU profile
docker compose --profile cpu up -d
```
### GPU-Accelerated Usage
```bash
# Copy environment file
cp .env.example .env
# Configure for GPU usage
# NEXA_MODEL=gemma-2-2b-instruct
# NEXA_GPU_LAYERS=-1 # -1 means all layers on GPU
# Start the service with GPU profile
docker compose --profile gpu up -d
```
## Configuration
### Environment Variables
| Variable | Default | Description |
| ------------------------ | --------------------- | ------------------------------------------------------ |
| `NEXA_SDK_VERSION` | `latest` | Nexa SDK Docker image version |
| `NEXA_SDK_PORT_OVERRIDE` | `8080` | Host port for API access |
| `NEXA_MODEL` | `gemma-2-2b-instruct` | Model to load (e.g., qwen3-4b, llama-3-8b, mistral-7b) |
| `NEXA_HOST` | `0.0.0.0:8080` | Server bind address |
| `NEXA_KEEPALIVE` | `300` | Model keepalive timeout in seconds |
| `NEXA_ORIGINS` | `*` | CORS allowed origins |
| `NEXA_HFTOKEN` | - | HuggingFace token for private models |
| `NEXA_LOG` | `none` | Logging level (none, debug, info, warn, error) |
| `NEXA_GPU_LAYERS` | `-1` | GPU layers to offload (-1 = all, 0 = CPU only) |
| `NEXA_SHM_SIZE` | `2g` | Shared memory size |
| `TZ` | `UTC` | Container timezone |
### Resource Limits
| Variable | Default | Description |
| ----------------------------- | ------- | ------------------ |
| `NEXA_SDK_CPU_LIMIT` | `4.0` | Maximum CPU cores |
| `NEXA_SDK_MEMORY_LIMIT` | `8G` | Maximum memory |
| `NEXA_SDK_CPU_RESERVATION` | `2.0` | Reserved CPU cores |
| `NEXA_SDK_MEMORY_RESERVATION` | `4G` | Reserved memory |
### Profiles
- `cpu`: Run with CPU-only inference (default profile needed)
- `gpu`: Run with GPU acceleration (requires NVIDIA GPU)
## Usage Examples
### Test the API
```bash
# Check available models
curl http://localhost:8080/v1/models
# Chat completion
curl http://localhost:8080/v1/chat/completions \
-H "Content-Type: application/json" \
-d '{
"model": "gemma-2-2b-instruct",
"messages": [
{"role": "user", "content": "Hello!"}
]
}'
```
### Using Different Models
Edit `.env` to change the model:
```bash
# Small models for limited resources
NEXA_MODEL=gemma-2-2b-instruct
# or
NEXA_MODEL=qwen3-4b
# Larger models for better quality
NEXA_MODEL=llama-3-8b
# or
NEXA_MODEL=mistral-7b
```
### GPU Configuration
For GPU acceleration, adjust the number of layers:
```bash
# Offload all layers to GPU (fastest)
NEXA_GPU_LAYERS=-1
# Offload 30 layers (hybrid mode)
NEXA_GPU_LAYERS=30
# CPU only
NEXA_GPU_LAYERS=0
```
## Model Management
Models are automatically downloaded on first run and cached in the `nexa_models` volume. The default cache location inside the container is `/root/.cache/nexa`.
To use a different model:
1. Update `NEXA_MODEL` in `.env`
2. Restart the service: `docker compose --profile <cpu|gpu> restart`
## API Endpoints
Nexa SDK provides OpenAI-compatible API endpoints:
- `GET /v1/models` - List available models
- `POST /v1/chat/completions` - Chat completions
- `POST /v1/completions` - Text completions
- `POST /v1/embeddings` - Text embeddings
- `GET /health` - Health check
- `GET /docs` - API documentation (Swagger UI)
## Troubleshooting
### Out of Memory
Increase memory limits or use a smaller model:
```bash
NEXA_SDK_MEMORY_LIMIT=16G
NEXA_SDK_MEMORY_RESERVATION=8G
# Or switch to a smaller model
NEXA_MODEL=gemma-2-2b-instruct
```
### GPU Not Detected
Ensure NVIDIA Docker runtime is installed:
```bash
# Check GPU availability
docker run --rm --gpus all nvidia/cuda:12.8.1-base-ubuntu22.04 nvidia-smi
```
### Model Download Issues
Set HuggingFace token if accessing private models:
```bash
NEXA_HFTOKEN=your_hf_token_here
```
### Slow Performance
- Use GPU profile for better performance
- Increase `NEXA_GPU_LAYERS` to offload more computation to GPU
- Allocate more resources or use a smaller model
## Advanced Configuration
### Custom Model Path
If you want to use local model files, mount them as a volume:
```yaml
volumes:
- ./models:/models
- nexa_models:/root/.cache/nexa
```
Then reference the model by its path in the command.
### HTTPS Configuration
Set environment variables for HTTPS:
```bash
NEXA_ENABLEHTTPS=true
```
Mount certificate files:
```yaml
volumes:
- ./certs/cert.pem:/app/cert.pem:ro
- ./certs/key.pem:/app/key.pem:ro
```
## Health Check
The service includes a health check that verifies the API is responding:
```bash
curl http://localhost:8080/v1/models
```
## License
Nexa SDK is developed by Nexa AI. Please refer to the [official repository](https://github.com/NexaAI/nexa-sdk) for license information.
## Links
- [Official Repository](https://github.com/NexaAI/nexa-sdk)
- [Nexa AI Website](https://nexa.ai)
- [Documentation](https://docs.nexa.ai)
- [Model Hub](https://sdk.nexa.ai)

View File

@@ -1,233 +0,0 @@
# Nexa SDK
Nexa SDK 是一个功能全面的本地 AI 模型运行工具包。它支持多种模型类型的推理,包括 LLM、VLM视觉语言模型、TTS文本转语音、ASR自动语音识别等。该工具专注于性能优化支持 CPU 和 GPU 加速。
## 特性
- **多模型支持**:运行 LLM、VLM、TTS、ASR、嵌入、重排序和图像生成模型
- **OpenAI 兼容 API**:提供标准的 OpenAI API 端点,便于集成
- **GPU 加速**:通过 NVIDIA CUDA 提供可选的 GPU 支持,实现更快的推理速度
- **资源管理**:可配置的 CPU/内存限制和 GPU 层卸载
- **模型缓存**:持久化模型存储,加快启动速度
- **配置文件支持**:轻松在 CPU 模式和 GPU 加速模式之间切换
## 快速开始
### 前置要求
- Docker 和 Docker Compose
- GPU 支持需要NVIDIA Docker runtime 和兼容的 GPU
### 基本使用CPU
```bash
# 复制环境配置文件
cp .env.example .env
# 编辑 .env 配置模型和设置
# NEXA_MODEL=gemma-2-2b-instruct
# 使用 CPU 配置文件启动服务
docker compose --profile cpu up -d
```
### GPU 加速使用
```bash
# 复制环境配置文件
cp .env.example .env
# 配置 GPU 使用
# NEXA_MODEL=gemma-2-2b-instruct
# NEXA_GPU_LAYERS=-1 # -1 表示所有层都在 GPU 上
# 使用 GPU 配置文件启动服务
docker compose --profile gpu up -d
```
## 配置
### 环境变量
| 变量 | 默认值 | 说明 |
| ------------------------ | --------------------- | --------------------------------------------------- |
| `NEXA_SDK_VERSION` | `latest` | Nexa SDK Docker 镜像版本 |
| `NEXA_SDK_PORT_OVERRIDE` | `8080` | API 访问的主机端口 |
| `NEXA_MODEL` | `gemma-2-2b-instruct` | 要加载的模型(如 qwen3-4b、llama-3-8b、mistral-7b |
| `NEXA_HOST` | `0.0.0.0:8080` | 服务器绑定地址 |
| `NEXA_KEEPALIVE` | `300` | 模型保活超时时间(秒) |
| `NEXA_ORIGINS` | `*` | CORS 允许的源 |
| `NEXA_HFTOKEN` | - | 用于私有模型的 HuggingFace 令牌 |
| `NEXA_LOG` | `none` | 日志级别none、debug、info、warn、error |
| `NEXA_GPU_LAYERS` | `-1` | 卸载到 GPU 的层数(-1 = 全部0 = 仅 CPU |
| `NEXA_SHM_SIZE` | `2g` | 共享内存大小 |
| `TZ` | `UTC` | 容器时区 |
### 资源限制
| 变量 | 默认值 | 说明 |
| ----------------------------- | ------ | --------------- |
| `NEXA_SDK_CPU_LIMIT` | `4.0` | 最大 CPU 核心数 |
| `NEXA_SDK_MEMORY_LIMIT` | `8G` | 最大内存 |
| `NEXA_SDK_CPU_RESERVATION` | `2.0` | 预留 CPU 核心数 |
| `NEXA_SDK_MEMORY_RESERVATION` | `4G` | 预留内存 |
### 配置文件
- `cpu`:使用 CPU 推理运行(需要指定默认配置文件)
- `gpu`:使用 GPU 加速运行(需要 NVIDIA GPU
## 使用示例
### 测试 API
```bash
# 检查可用模型
curl http://localhost:8080/v1/models
# 聊天完成
curl http://localhost:8080/v1/chat/completions \
-H "Content-Type: application/json" \
-d '{
"model": "gemma-2-2b-instruct",
"messages": [
{"role": "user", "content": "你好!"}
]
}'
```
### 使用不同的模型
编辑 `.env` 更改模型:
```bash
# 资源受限时使用小模型
NEXA_MODEL=gemma-2-2b-instruct
# 或
NEXA_MODEL=qwen3-4b
# 追求更好质量时使用大模型
NEXA_MODEL=llama-3-8b
# 或
NEXA_MODEL=mistral-7b
```
### GPU 配置
对于 GPU 加速,调整层数:
```bash
# 将所有层卸载到 GPU最快
NEXA_GPU_LAYERS=-1
# 卸载 30 层(混合模式)
NEXA_GPU_LAYERS=30
# 仅 CPU
NEXA_GPU_LAYERS=0
```
## 模型管理
模型会在首次运行时自动下载,并缓存在 `nexa_models` 卷中。容器内的默认缓存位置是 `/root/.cache/nexa`
要使用不同的模型:
1.`.env` 中更新 `NEXA_MODEL`
2. 重启服务:`docker compose --profile <cpu|gpu> restart`
## API 端点
Nexa SDK 提供 OpenAI 兼容的 API 端点:
- `GET /v1/models` - 列出可用模型
- `POST /v1/chat/completions` - 聊天完成
- `POST /v1/completions` - 文本完成
- `POST /v1/embeddings` - 文本嵌入
- `GET /health` - 健康检查
- `GET /docs` - API 文档Swagger UI
## 故障排除
### 内存不足
增加内存限制或使用更小的模型:
```bash
NEXA_SDK_MEMORY_LIMIT=16G
NEXA_SDK_MEMORY_RESERVATION=8G
# 或切换到更小的模型
NEXA_MODEL=gemma-2-2b-instruct
```
### GPU 未检测到
确保已安装 NVIDIA Docker runtime
```bash
# 检查 GPU 可用性
docker run --rm --gpus all nvidia/cuda:12.8.1-base-ubuntu22.04 nvidia-smi
```
### 模型下载问题
如果访问私有模型,设置 HuggingFace 令牌:
```bash
NEXA_HFTOKEN=your_hf_token_here
```
### 性能缓慢
- 使用 GPU 配置文件以获得更好的性能
- 增加 `NEXA_GPU_LAYERS` 以将更多计算卸载到 GPU
- 分配更多资源或使用更小的模型
## 高级配置
### 自定义模型路径
如果要使用本地模型文件,将它们挂载为卷:
```yaml
volumes:
- ./models:/models
- nexa_models:/root/.cache/nexa
```
然后在命令中通过路径引用模型。
### HTTPS 配置
设置 HTTPS 的环境变量:
```bash
NEXA_ENABLEHTTPS=true
```
挂载证书文件:
```yaml
volumes:
- ./certs/cert.pem:/app/cert.pem:ro
- ./certs/key.pem:/app/key.pem:ro
```
## 健康检查
服务包含验证 API 是否响应的健康检查:
```bash
curl http://localhost:8080/v1/models
```
## 许可证
Nexa SDK 由 Nexa AI 开发。许可证信息请参考[官方仓库](https://github.com/NexaAI/nexa-sdk)。
## 链接
- [官方仓库](https://github.com/NexaAI/nexa-sdk)
- [Nexa AI 网站](https://nexa.ai)
- [文档](https://docs.nexa.ai)
- [模型中心](https://sdk.nexa.ai)

View File

@@ -1,94 +0,0 @@
x-defaults: &defaults
restart: unless-stopped
logging:
driver: json-file
options:
max-size: 100m
max-file: "3"
services:
nexa-sdk:
<<: *defaults
build:
context: .
dockerfile: Dockerfile
image: ${GLOBAL_REGISTRY:-}alexsuntop/nexa-sdk:${NEXA_SDK_CPU_VERSION:-0.2.57}
ports:
- "${NEXA_SDK_PORT_OVERRIDE:-8080}:8080"
volumes:
- nexa_models:/root/.cache/nexa
environment:
- TZ=${TZ:-UTC}
- NEXA_HOST=${NEXA_HOST:-0.0.0.0:8080}
- NEXA_KEEPALIVE=${NEXA_KEEPALIVE:-300}
- NEXA_ORIGINS=${NEXA_ORIGINS:-*}
- NEXA_HFTOKEN=${NEXA_HFTOKEN:-}
- NEXA_LOG=${NEXA_LOG:-none}
command: >
nexa server
${NEXA_MODEL:-gemma-2-2b-instruct}
ipc: host
shm_size: ${NEXA_SHM_SIZE:-2g}
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8080/v1/models"]
interval: 30s
timeout: 10s
retries: 3
start_period: 60s
deploy:
resources:
limits:
cpus: ${NEXA_SDK_CPU_LIMIT:-4.0}
memory: ${NEXA_SDK_MEMORY_LIMIT:-8G}
reservations:
cpus: ${NEXA_SDK_CPU_RESERVATION:-2.0}
memory: ${NEXA_SDK_MEMORY_RESERVATION:-4G}
profiles:
- cpu
nexa-sdk-cuda:
<<: *defaults
build:
context: .
dockerfile: Dockerfile.cuda
image: ${GLOBAL_REGISTRY:-}alexsuntop/nexa-sdk:${NEXA_SDK_CUDA_VERSION:-0.2.57-cuda}
ports:
- "${NEXA_SDK_PORT_OVERRIDE:-8080}:8080"
volumes:
- nexa_models:/root/.cache/nexa
environment:
- TZ=${TZ:-UTC}
- NEXA_HOST=${NEXA_HOST:-0.0.0.0:8080}
- NEXA_KEEPALIVE=${NEXA_KEEPALIVE:-300}
- NEXA_ORIGINS=${NEXA_ORIGINS:-*}
- NEXA_HFTOKEN=${NEXA_HFTOKEN:-}
- NEXA_LOG=${NEXA_LOG:-none}
command: >
nexa server
${NEXA_MODEL:-gemma-2-2b-instruct}
-ngl ${NEXA_GPU_LAYERS:--1}
ipc: host
shm_size: ${NEXA_SHM_SIZE:-2g}
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8080/v1/models"]
interval: 30s
timeout: 10s
retries: 3
start_period: 60s
deploy:
resources:
limits:
cpus: ${NEXA_SDK_CPU_LIMIT:-4.0}
memory: ${NEXA_SDK_MEMORY_LIMIT:-8G}
reservations:
cpus: ${NEXA_SDK_CPU_RESERVATION:-2.0}
memory: ${NEXA_SDK_MEMORY_RESERVATION:-4G}
devices:
- driver: nvidia
device_ids: ['0']
capabilities: [gpu]
profiles:
- cuda
volumes:
nexa_models:

20
src/nexa-sdk/.env.example Normal file
View File

@@ -0,0 +1,20 @@
# NexaSDK Docker Configuration
# Image version (e.g., v0.2.62, v0.2.62-cuda, latest, latest-cuda)
NEXA_SDK_VERSION=v0.2.62
# Host port for NexaSDK REST API
NEXA_SDK_PORT_OVERRIDE=18181
# Nexa API token (required for model access)
# Obtain from https://sdk.nexa.ai -> Deployment -> Create Token
NEXA_TOKEN=
# Timezone
TZ=UTC
# Resource limits
NEXA_SDK_CPU_LIMIT=4.0
NEXA_SDK_MEMORY_LIMIT=8G
NEXA_SDK_CPU_RESERVATION=1.0
NEXA_SDK_MEMORY_RESERVATION=2G

105
src/nexa-sdk/README.md Normal file
View File

@@ -0,0 +1,105 @@
# NexaSDK
[English](./README.md) | [中文](./README.zh.md)
This service deploys NexaSDK Docker for running AI models with OpenAI-compatible REST API. Supports LLM, Embeddings, Reranking, Computer Vision, and ASR models.
## Features
- **OpenAI-compatible API**: Drop-in replacement for OpenAI API endpoints
- **Multiple Model Types**: LLM, VLM, Embeddings, Reranking, CV, ASR
- **GPU Acceleration**: CUDA support for NVIDIA GPUs
- **NPU Support**: Optimized for Qualcomm NPU on ARM64
## Supported Models
| Modality | Models |
| ------------- | ------------------------------------------------------- |
| **LLM** | `NexaAI/LFM2-1.2B-npu`, `NexaAI/Granite-4.0-h-350M-NPU` |
| **VLM** | `NexaAI/OmniNeural-4B` |
| **Embedding** | `NexaAI/embeddinggemma-300m-npu`, `NexaAI/EmbedNeural` |
| **Rerank** | `NexaAI/jina-v2-rerank-npu` |
| **CV** | `NexaAI/yolov12-npu`, `NexaAI/convnext-tiny-npu-IoT` |
| **ASR** | `NexaAI/parakeet-tdt-0.6b-v3-npu` |
## Usage
### CPU Mode
```bash
docker compose up -d
```
### GPU Mode (CUDA)
```bash
docker compose --profile gpu up -d nexa-sdk-cuda
```
### Pull a Model
```bash
docker exec -it nexa-sdk nexa pull NexaAI/Granite-4.0-h-350M-NPU
```
### Interactive CLI
```bash
docker exec -it nexa-sdk nexa infer NexaAI/Granite-4.0-h-350M-NPU
```
### API Examples
- Chat completions:
```bash
curl -X POST http://localhost:18181/v1/chat/completions \
-H "Content-Type: application/json" \
-d '{
"model": "NexaAI/Granite-4.0-h-350M-NPU",
"messages": [{"role": "user", "content": "Hello!"}]
}'
```
- Embeddings:
```bash
curl -X POST http://localhost:18181/v1/embeddings \
-H "Content-Type: application/json" \
-d '{
"model": "NexaAI/EmbedNeural",
"input": "Hello, world!"
}'
```
- Swagger UI: Visit `http://localhost:18181/docs/ui`
## Services
- `nexa-sdk`: CPU-based NexaSDK service (default)
- `nexa-sdk-cuda`: GPU-accelerated service with CUDA support (profile: `gpu`)
## Configuration
| Variable | Description | Default |
| ------------------------ | ------------------------- | --------- |
| `NEXA_SDK_VERSION` | NexaSDK image version | `v0.2.62` |
| `NEXA_SDK_PORT_OVERRIDE` | Host port for REST API | `18181` |
| `NEXA_TOKEN` | Nexa API token (required) | - |
| `TZ` | Timezone | `UTC` |
## Volumes
- `nexa_data`: Volume for storing downloaded models and data
## Getting a Token
1. Create an account at [sdk.nexa.ai](https://sdk.nexa.ai)
2. Go to **Deployment → Create Token**
3. Copy the token to your `.env` file
## References
- [NexaSDK Documentation](https://docs.nexa.ai/nexa-sdk-docker/overview)
- [Docker Hub](https://hub.docker.com/r/nexa4ai/nexasdk)
- [Supported Models](https://docs.nexa.ai/nexa-sdk-docker/overview#supported-models)

105
src/nexa-sdk/README.zh.md Normal file
View File

@@ -0,0 +1,105 @@
# NexaSDK
[English](./README.md) | [中文](./README.zh.md)
此服务用于部署 NexaSDK Docker运行兼容 OpenAI 的 REST API 的 AI 模型。支持 LLM、Embeddings、Reranking、计算机视觉和 ASR 模型。
## 特性
- **OpenAI 兼容 API**:可直接替换 OpenAI API 端点
- **多种模型类型**LLM、VLM、Embeddings、Reranking、CV、ASR
- **GPU 加速**:支持 NVIDIA GPU 的 CUDA 加速
- **NPU 支持**:针对 ARM64 上的 Qualcomm NPU 优化
## 支持的模型
| 类型 | 模型 |
| ------------- | ------------------------------------------------------- |
| **LLM** | `NexaAI/LFM2-1.2B-npu``NexaAI/Granite-4.0-h-350M-NPU` |
| **VLM** | `NexaAI/OmniNeural-4B` |
| **Embedding** | `NexaAI/embeddinggemma-300m-npu``NexaAI/EmbedNeural` |
| **Rerank** | `NexaAI/jina-v2-rerank-npu` |
| **CV** | `NexaAI/yolov12-npu``NexaAI/convnext-tiny-npu-IoT` |
| **ASR** | `NexaAI/parakeet-tdt-0.6b-v3-npu` |
## 用法
### CPU 模式
```bash
docker compose up -d
```
### GPU 模式CUDA
```bash
docker compose --profile gpu up -d nexa-sdk-cuda
```
### 拉取模型
```bash
docker exec -it nexa-sdk nexa pull NexaAI/Granite-4.0-h-350M-NPU
```
### 交互式 CLI
```bash
docker exec -it nexa-sdk nexa infer NexaAI/Granite-4.0-h-350M-NPU
```
### API 示例
- 聊天补全:
```bash
curl -X POST http://localhost:18181/v1/chat/completions \
-H "Content-Type: application/json" \
-d '{
"model": "NexaAI/Granite-4.0-h-350M-NPU",
"messages": [{"role": "user", "content": "Hello!"}]
}'
```
- Embeddings
```bash
curl -X POST http://localhost:18181/v1/embeddings \
-H "Content-Type: application/json" \
-d '{
"model": "NexaAI/EmbedNeural",
"input": "Hello, world!"
}'
```
- Swagger UI访问 `http://localhost:18181/docs/ui`
## 服务
- `nexa-sdk`:基于 CPU 的 NexaSDK 服务(默认)
- `nexa-sdk-cuda`:支持 CUDA 的 GPU 加速服务profile`gpu`
## 配置
| 变量 | 描述 | 默认值 |
| ------------------------ | --------------------- | --------- |
| `NEXA_SDK_VERSION` | NexaSDK 镜像版本 | `v0.2.62` |
| `NEXA_SDK_PORT_OVERRIDE` | REST API 的主机端口 | `18181` |
| `NEXA_TOKEN` | Nexa API 令牌(必需) | - |
| `TZ` | 时区 | `UTC` |
## 卷
- `nexa_data`:用于存储下载的模型和数据的卷
## 获取令牌
1. 在 [sdk.nexa.ai](https://sdk.nexa.ai) 创建账户
2. 进入 **Deployment → Create Token**
3. 将令牌复制到 `.env` 文件中
## 参考资料
- [NexaSDK 文档](https://docs.nexa.ai/nexa-sdk-docker/overview)
- [Docker Hub](https://hub.docker.com/r/nexa4ai/nexasdk)
- [支持的模型](https://docs.nexa.ai/nexa-sdk-docker/overview#supported-models)

View File

@@ -0,0 +1,74 @@
# NexaSDK Docker Compose Configuration
# OpenAI-compatible API for LLM, Embeddings, Reranking, and more
# Supports both CPU and GPU (CUDA/NPU) acceleration
x-defaults: &defaults
restart: unless-stopped
logging:
driver: json-file
options:
max-size: 100m
max-file: "3"
services:
nexa-sdk:
<<: *defaults
image: ${GLOBAL_REGISTRY:-}nexa4ai/nexasdk:${NEXA_SDK_VERSION:-v0.2.62}
ports:
- "${NEXA_SDK_PORT_OVERRIDE:-18181}:18181"
volumes:
- nexa_data:/data
environment:
- TZ=${TZ:-UTC}
- NEXA_TOKEN=${NEXA_TOKEN:-}
command: serve
healthcheck:
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:18181/docs/ui"]
interval: 30s
timeout: 10s
retries: 3
start_period: 30s
deploy:
resources:
limits:
cpus: ${NEXA_SDK_CPU_LIMIT:-4.0}
memory: ${NEXA_SDK_MEMORY_LIMIT:-8G}
reservations:
cpus: ${NEXA_SDK_CPU_RESERVATION:-1.0}
memory: ${NEXA_SDK_MEMORY_RESERVATION:-2G}
# GPU-accelerated service with CUDA support
nexa-sdk-cuda:
<<: *defaults
profiles:
- gpu
image: ${GLOBAL_REGISTRY:-}nexa4ai/nexasdk:${NEXA_SDK_VERSION:-v0.2.62}-cuda
ports:
- "${NEXA_SDK_PORT_OVERRIDE:-18181}:18181"
volumes:
- nexa_data:/data
environment:
- TZ=${TZ:-UTC}
- NEXA_TOKEN=${NEXA_TOKEN:-}
command: serve
healthcheck:
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:18181/docs/ui"]
interval: 30s
timeout: 10s
retries: 3
start_period: 60s
deploy:
resources:
limits:
cpus: ${NEXA_SDK_CPU_LIMIT:-8.0}
memory: ${NEXA_SDK_MEMORY_LIMIT:-16G}
reservations:
cpus: ${NEXA_SDK_CPU_RESERVATION:-2.0}
memory: ${NEXA_SDK_MEMORY_RESERVATION:-4G}
devices:
- driver: nvidia
count: 1
capabilities: [gpu]
volumes:
nexa_data:

31
src/phoenix/.env.example Normal file
View File

@@ -0,0 +1,31 @@
# Phoenix version
PHOENIX_VERSION=version-12.19.0
# Timezone
TZ=UTC
# Phoenix ports
PHOENIX_PORT_OVERRIDE=6006 # UI and OTLP HTTP collector
PHOENIX_GRPC_PORT_OVERRIDE=4317 # OTLP gRPC collector
# Phoenix configuration
PHOENIX_ENABLE_PROMETHEUS=false
PHOENIX_SECRET= # Optional: Set for authentication, generate with: openssl rand -base64 32
# PostgreSQL configuration
POSTGRES_VERSION=17.2-alpine3.21
POSTGRES_USER=postgres
POSTGRES_PASSWORD=postgres
POSTGRES_DB=phoenix
# Resource limits for Phoenix
PHOENIX_CPU_LIMIT=2.0
PHOENIX_MEMORY_LIMIT=2G
PHOENIX_CPU_RESERVATION=0.5
PHOENIX_MEMORY_RESERVATION=512M
# Resource limits for PostgreSQL
PHOENIX_DB_CPU_LIMIT=1.0
PHOENIX_DB_MEMORY_LIMIT=1G
PHOENIX_DB_CPU_RESERVATION=0.25
PHOENIX_DB_MEMORY_RESERVATION=256M

100
src/phoenix/README.md Normal file
View File

@@ -0,0 +1,100 @@
# Arize Phoenix
[English](./README.md) | [中文](./README.zh.md)
Arize Phoenix is an open-source AI observability platform for LLM applications. It provides tracing, evaluation, datasets, and experiments to help you build and improve AI applications.
## Services
- `phoenix`: The main Phoenix application server with UI and OpenTelemetry collectors.
- `phoenix-db`: PostgreSQL database for persistent storage.
## Ports
| Port | Protocol | Description |
| ---- | -------- | ----------------------------------------- |
| 6006 | HTTP | UI and OTLP HTTP collector (`/v1/traces`) |
| 4317 | gRPC | OTLP gRPC collector |
## Environment Variables
| Variable Name | Description | Default Value |
| -------------------------- | ------------------------------------- | ----------------- |
| PHOENIX_VERSION | Phoenix image version | `version-12.19.0` |
| PHOENIX_PORT_OVERRIDE | Host port for Phoenix UI and HTTP API | `6006` |
| PHOENIX_GRPC_PORT_OVERRIDE | Host port for OTLP gRPC collector | `4317` |
| PHOENIX_ENABLE_PROMETHEUS | Enable Prometheus metrics endpoint | `false` |
| PHOENIX_SECRET | Secret for authentication (optional) | `""` |
| POSTGRES_VERSION | PostgreSQL image version | `17.2-alpine3.21` |
| POSTGRES_USER | PostgreSQL username | `postgres` |
| POSTGRES_PASSWORD | PostgreSQL password | `postgres` |
| POSTGRES_DB | PostgreSQL database name | `phoenix` |
## Volumes
- `phoenix_db_data`: PostgreSQL data volume for persistent storage.
## Getting Started
1. Copy the example environment file:
```bash
cp .env.example .env
```
2. (Optional) For production, set a secure password and secret:
```bash
# Generate a secret for authentication
openssl rand -base64 32
```
3. Start the services:
```bash
docker compose up -d
```
4. Access Phoenix UI at `http://localhost:6006`
## Sending Traces
Phoenix supports OpenTelemetry-compatible traces. You can send traces using:
### HTTP (OTLP)
Send traces to `http://localhost:6006/v1/traces`
### gRPC (OTLP)
Send traces to `localhost:4317`
### Python Example
```python
from phoenix.otel import register
tracer_provider = register(
project_name="my-llm-app",
endpoint="http://localhost:6006/v1/traces",
)
```
## Features
- **Tracing**: Capture and visualize LLM application traces with OpenTelemetry support.
- **Evaluation**: Run evaluations using built-in or custom evaluators.
- **Datasets**: Create and manage datasets for testing and evaluation.
- **Experiments**: Run experiments to compare model performance.
- **Playground**: Test prompts with different models interactively.
## Documentation
For more information, visit the [official Phoenix documentation](https://docs.arize.com/phoenix).
## Security Notes
- Change default PostgreSQL password in production.
- Set `PHOENIX_SECRET` for authentication if exposing Phoenix publicly.
- Consider using a reverse proxy with SSL/TLS in production.
- Regularly backup the PostgreSQL database.

100
src/phoenix/README.zh.md Normal file
View File

@@ -0,0 +1,100 @@
# Arize Phoenix
[English](./README.md) | [中文](./README.zh.md)
Arize Phoenix 是一个开源的 AI 可观测性平台,专为 LLM 应用设计。它提供追踪、评估、数据集和实验功能,帮助你构建和改进 AI 应用。
## 服务
- `phoenix`Phoenix 主应用服务器,包含 UI 和 OpenTelemetry 采集器。
- `phoenix-db`:用于持久化存储的 PostgreSQL 数据库。
## 端口
| 端口 | 协议 | 描述 |
| ---- | ---- | -------------------------------------- |
| 6006 | HTTP | UI 和 OTLP HTTP 采集器(`/v1/traces` |
| 4317 | gRPC | OTLP gRPC 采集器 |
## 环境变量
| 变量名 | 描述 | 默认值 |
| -------------------------- | --------------------------------- | ----------------- |
| PHOENIX_VERSION | Phoenix 镜像版本 | `version-12.19.0` |
| PHOENIX_PORT_OVERRIDE | Phoenix UI 和 HTTP API 的主机端口 | `6006` |
| PHOENIX_GRPC_PORT_OVERRIDE | OTLP gRPC 采集器的主机端口 | `4317` |
| PHOENIX_ENABLE_PROMETHEUS | 启用 Prometheus 指标端点 | `false` |
| PHOENIX_SECRET | 认证密钥(可选) | `""` |
| POSTGRES_VERSION | PostgreSQL 镜像版本 | `17.2-alpine3.21` |
| POSTGRES_USER | PostgreSQL 用户名 | `postgres` |
| POSTGRES_PASSWORD | PostgreSQL 密码 | `postgres` |
| POSTGRES_DB | PostgreSQL 数据库名 | `phoenix` |
## 数据卷
- `phoenix_db_data`PostgreSQL 数据卷,用于持久化存储。
## 快速开始
1. 复制示例环境文件:
```bash
cp .env.example .env
```
2. (可选)生产环境下,请设置安全的密码和密钥:
```bash
# 生成认证密钥
openssl rand -base64 32
```
3. 启动服务:
```bash
docker compose up -d
```
4. 访问 Phoenix UI`http://localhost:6006`
## 发送追踪数据
Phoenix 支持 OpenTelemetry 兼容的追踪数据。你可以通过以下方式发送追踪:
### HTTPOTLP
发送追踪到 `http://localhost:6006/v1/traces`
### gRPCOTLP
发送追踪到 `localhost:4317`
### Python 示例
```python
from phoenix.otel import register
tracer_provider = register(
project_name="my-llm-app",
endpoint="http://localhost:6006/v1/traces",
)
```
## 功能特性
- **追踪**:捕获和可视化 LLM 应用追踪,支持 OpenTelemetry。
- **评估**:使用内置或自定义评估器运行评估。
- **数据集**:创建和管理用于测试和评估的数据集。
- **实验**:运行实验以比较模型性能。
- **Playground**:交互式测试不同模型的提示词。
## 文档
更多信息请访问 [Phoenix 官方文档](https://docs.arize.com/phoenix)。
## 安全说明
- 生产环境请更改默认的 PostgreSQL 密码。
- 如果公开暴露 Phoenix请设置 `PHOENIX_SECRET` 进行认证。
- 生产环境建议使用反向代理并启用 SSL/TLS。
- 定期备份 PostgreSQL 数据库。

View File

@@ -0,0 +1,68 @@
# Arize Phoenix - AI Observability and Evaluation Platform
# https://docs.arize.com/phoenix
x-defaults: &defaults
restart: unless-stopped
logging:
driver: json-file
options:
max-size: 100m
max-file: "3"
services:
phoenix:
<<: *defaults
image: ${GLOBAL_REGISTRY:-}arizephoenix/phoenix:${PHOENIX_VERSION:-version-12.19.0}
ports:
- "${PHOENIX_PORT_OVERRIDE:-6006}:6006" # UI and OTLP HTTP collector
- "${PHOENIX_GRPC_PORT_OVERRIDE:-4317}:4317" # OTLP gRPC collector
environment:
- TZ=${TZ:-UTC}
- PHOENIX_SQL_DATABASE_URL=postgresql://${POSTGRES_USER:-postgres}:${POSTGRES_PASSWORD:-postgres}@phoenix-db:5432/${POSTGRES_DB:-phoenix}
- PHOENIX_ENABLE_PROMETHEUS=${PHOENIX_ENABLE_PROMETHEUS:-false}
- PHOENIX_SECRET=${PHOENIX_SECRET:-}
depends_on:
phoenix-db:
condition: service_healthy
healthcheck:
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:6006/healthz"]
interval: 30s
timeout: 10s
retries: 3
start_period: 30s
deploy:
resources:
limits:
cpus: ${PHOENIX_CPU_LIMIT:-2.0}
memory: ${PHOENIX_MEMORY_LIMIT:-2G}
reservations:
cpus: ${PHOENIX_CPU_RESERVATION:-0.5}
memory: ${PHOENIX_MEMORY_RESERVATION:-512M}
phoenix-db:
<<: *defaults
image: ${GLOBAL_REGISTRY:-}postgres:${POSTGRES_VERSION:-17.2-alpine3.21}
environment:
- TZ=${TZ:-UTC}
- POSTGRES_USER=${POSTGRES_USER:-postgres}
- POSTGRES_PASSWORD=${POSTGRES_PASSWORD:-postgres}
- POSTGRES_DB=${POSTGRES_DB:-phoenix}
volumes:
- phoenix_db_data:/var/lib/postgresql/data
healthcheck:
test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER:-postgres}"]
interval: 10s
timeout: 5s
retries: 5
start_period: 30s
deploy:
resources:
limits:
cpus: ${PHOENIX_DB_CPU_LIMIT:-1.0}
memory: ${PHOENIX_DB_MEMORY_LIMIT:-1G}
reservations:
cpus: ${PHOENIX_DB_CPU_RESERVATION:-0.25}
memory: ${PHOENIX_DB_MEMORY_RESERVATION:-256M}
volumes:
phoenix_db_data:

View File

@@ -0,0 +1,223 @@
# =============================================================================
# Trigger.dev Configuration
# =============================================================================
# Global settings
TZ=UTC
GLOBAL_REGISTRY=
# =============================================================================
# Image Versions
# =============================================================================
# Trigger.dev version (webapp and supervisor)
TRIGGER_IMAGE_TAG=v4.2.0
# Infrastructure versions
POSTGRES_VERSION=17.2-alpine3.21
REDIS_VERSION=7.4.3-alpine3.21
CLICKHOUSE_VERSION=25.3
MINIO_VERSION=RELEASE.2025-04-22T22-12-26Z
MC_VERSION=RELEASE.2025-04-16T18-13-26Z
ELECTRIC_VERSION=1.0.0
REGISTRY_IMAGE_VERSION=3
DOCKER_SOCKET_PROXY_VERSION=0.3.0
# =============================================================================
# Port Configuration
# =============================================================================
# Webapp port
TRIGGER_PORT=8030
# MinIO ports
MINIO_API_PORT=9000
MINIO_CONSOLE_PORT=9001
# Registry port
REGISTRY_PORT=5000
# =============================================================================
# Required Secrets (MUST be set)
# =============================================================================
# Generate with: openssl rand -hex 16
SESSION_SECRET=
MAGIC_LINK_SECRET=
ENCRYPTION_KEY=
# Managed worker secret (must match between webapp and supervisor)
MANAGED_WORKER_SECRET=managed-secret
# PostgreSQL password
POSTGRES_PASSWORD=
# =============================================================================
# Domain Configuration
# =============================================================================
# Public URLs (change these for production)
APP_ORIGIN=http://localhost:8030
LOGIN_ORIGIN=http://localhost:8030
API_ORIGIN=http://localhost:8030
STREAM_ORIGIN=http://localhost:8030
# =============================================================================
# Database Configuration
# =============================================================================
# PostgreSQL
POSTGRES_USER=trigger
POSTGRES_DB=trigger
DATABASE_CONNECTION_LIMIT=10
# =============================================================================
# ClickHouse Configuration
# =============================================================================
CLICKHOUSE_DATABASE=default
CLICKHOUSE_USER=default
CLICKHOUSE_PASSWORD=password
# =============================================================================
# Object Storage Configuration (MinIO)
# =============================================================================
MINIO_ROOT_USER=admin
MINIO_ROOT_PASSWORD=very-safe-password
PACKET_BUCKET_NAME=packets
# =============================================================================
# Registry Configuration
# =============================================================================
# Registry host (internal)
REGISTRY_HOST=trigger-registry:5000
# Registry credentials (generate htpasswd file)
REGISTRY_USER=registry-user
REGISTRY_PASSWORD=very-secure-indeed
# =============================================================================
# Authentication Configuration
# =============================================================================
# Restrict login to specific email addresses (regex pattern)
# Example: ^(user1@example\.com|user2@example\.com)$
WHITELISTED_EMAILS=
# GitHub OAuth (optional)
AUTH_GITHUB_CLIENT_ID=
AUTH_GITHUB_CLIENT_SECRET=
# =============================================================================
# Email Configuration (optional)
# =============================================================================
# Transport: resend, smtp, or aws-ses
EMAIL_TRANSPORT=
# Email addresses
FROM_EMAIL=
REPLY_TO_EMAIL=
# Resend configuration
RESEND_API_KEY=
# SMTP configuration
SMTP_HOST=
SMTP_PORT=587
SMTP_SECURE=false
SMTP_USER=
SMTP_PASSWORD=
# AWS SES (uses AWS_REGION, AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY)
# =============================================================================
# Telemetry
# =============================================================================
# Set to any non-empty value to disable telemetry
TRIGGER_TELEMETRY_DISABLED=
# =============================================================================
# Bootstrap Configuration
# =============================================================================
# Enable automatic worker group creation
TRIGGER_BOOTSTRAP_ENABLED=true
TRIGGER_BOOTSTRAP_WORKER_GROUP_NAME=bootstrap
# Worker token (auto-generated on first run, or set manually for remote workers)
# Format: tr_wgt_... (obtained from webapp logs on first run)
TRIGGER_WORKER_TOKEN=file:///home/node/shared/worker_token
# =============================================================================
# Supervisor Configuration
# =============================================================================
# Worker instance name (unique per supervisor)
TRIGGER_WORKER_INSTANCE_NAME=supervisor-1
TRIGGER_WORKER_HEARTBEAT_INTERVAL_SECONDS=30
# Docker settings
DOCKER_ENFORCE_MACHINE_PRESETS=true
DOCKER_AUTOREMOVE_EXITED_CONTAINERS=true
# =============================================================================
# Resource Limits
# =============================================================================
# Webapp
WEBAPP_CPU_LIMIT=3.0
WEBAPP_MEMORY_LIMIT=6G
WEBAPP_CPU_RESERVATION=1.0
WEBAPP_MEMORY_RESERVATION=2G
# PostgreSQL
POSTGRES_CPU_LIMIT=2.0
POSTGRES_MEMORY_LIMIT=4G
POSTGRES_CPU_RESERVATION=0.5
POSTGRES_MEMORY_RESERVATION=1G
# Redis
REDIS_CPU_LIMIT=1.0
REDIS_MEMORY_LIMIT=2G
REDIS_CPU_RESERVATION=0.25
REDIS_MEMORY_RESERVATION=512M
# ClickHouse
CLICKHOUSE_CPU_LIMIT=2.0
CLICKHOUSE_MEMORY_LIMIT=4G
CLICKHOUSE_CPU_RESERVATION=0.5
CLICKHOUSE_MEMORY_RESERVATION=1G
# MinIO
MINIO_CPU_LIMIT=1.0
MINIO_MEMORY_LIMIT=2G
MINIO_CPU_RESERVATION=0.25
MINIO_MEMORY_RESERVATION=512M
# Electric
ELECTRIC_CPU_LIMIT=1.0
ELECTRIC_MEMORY_LIMIT=1G
ELECTRIC_CPU_RESERVATION=0.25
ELECTRIC_MEMORY_RESERVATION=256M
# Registry
REGISTRY_CPU_LIMIT=0.5
REGISTRY_MEMORY_LIMIT=512M
REGISTRY_CPU_RESERVATION=0.1
REGISTRY_MEMORY_RESERVATION=128M
# Supervisor
SUPERVISOR_CPU_LIMIT=2.0
SUPERVISOR_MEMORY_LIMIT=4G
SUPERVISOR_CPU_RESERVATION=0.5
SUPERVISOR_MEMORY_RESERVATION=1G
# Docker Socket Proxy
DOCKER_PROXY_CPU_LIMIT=0.5
DOCKER_PROXY_MEMORY_LIMIT=256M
DOCKER_PROXY_CPU_RESERVATION=0.1
DOCKER_PROXY_MEMORY_RESERVATION=64M

247
src/trigger-dev/README.md Normal file
View File

@@ -0,0 +1,247 @@
# Trigger.dev
[English](./README.md) | [中文](./README.zh.md)
Trigger.dev is an open-source platform for building AI workflows and background jobs in TypeScript. It provides long-running tasks with retries, queues, observability, and elastic scaling.
## Services
### Core Services (Webapp Stack)
| Service | Description |
| ----------------------- | --------------------------------------------------- |
| `webapp` | Main Trigger.dev application with dashboard and API |
| `trigger-postgres` | PostgreSQL database with logical replication |
| `trigger-redis` | Redis for cache and job queue |
| `trigger-clickhouse` | ClickHouse for analytics database |
| `trigger-minio` | S3-compatible object storage |
| `trigger-minio-init` | MinIO bucket initialization |
| `electric` | ElectricSQL for real-time sync |
| `trigger-registry` | Private Docker registry for deployed code |
| `trigger-registry-init` | Registry htpasswd initialization |
### Worker Services (Supervisor Stack)
| Service | Description |
| --------------------- | ----------------------------------------- |
| `supervisor` | Worker orchestrator that executes tasks |
| `docker-socket-proxy` | Secure Docker socket proxy for supervisor |
## Prerequisites
- Docker 20.10.0+
- Docker Compose 2.20.0+
- Minimum 6 vCPU and 12 GB RAM for the complete stack
## Quick Start
1. Create a `.env` file with required secrets:
```bash
cp .env.example .env
```
2. Generate required secrets:
```bash
# Generate secrets
echo "SESSION_SECRET=$(openssl rand -hex 16)" >> .env
echo "MAGIC_LINK_SECRET=$(openssl rand -hex 16)" >> .env
echo "ENCRYPTION_KEY=$(openssl rand -hex 16)" >> .env
echo "POSTGRES_PASSWORD=$(openssl rand -hex 16)" >> .env
```
3. Start all services:
```bash
docker compose up -d
```
4. Wait for services to be healthy:
```bash
docker compose ps
```
5. Access the webapp at `http://localhost:8030`
6. Get the magic link from webapp logs for first login:
```bash
docker compose logs -f webapp
```
## Environment Variables
### Required Secrets
| Variable | Description |
| ------------------- | ---------------------------------------------------------- |
| `SESSION_SECRET` | Session encryption secret (run: `openssl rand -hex 16`) |
| `MAGIC_LINK_SECRET` | Magic link encryption secret (run: `openssl rand -hex 16`) |
| `ENCRYPTION_KEY` | Secret store encryption key (run: `openssl rand -hex 16`) |
| `POSTGRES_PASSWORD` | PostgreSQL password |
### Domain Configuration
| Variable | Default | Description |
| -------------- | ----------------------- | -------------------------------------- |
| `APP_ORIGIN` | `http://localhost:8030` | Public webapp URL |
| `LOGIN_ORIGIN` | `http://localhost:8030` | Login URL (usually same as APP_ORIGIN) |
| `API_ORIGIN` | `http://localhost:8030` | API URL |
### Image Versions
| Variable | Default | Description |
| -------------------- | ------------------------------ | ----------------------------------------- |
| `TRIGGER_IMAGE_TAG` | `v4.2.0` | Trigger.dev webapp and supervisor version |
| `POSTGRES_VERSION` | `17.2-alpine3.21` | PostgreSQL version |
| `REDIS_VERSION` | `7.4.3-alpine3.21` | Redis version |
| `CLICKHOUSE_VERSION` | `25.3` | ClickHouse version |
| `MINIO_VERSION` | `RELEASE.2025-04-22T22-12-26Z` | MinIO version |
### Port Configuration
| Variable | Default | Description |
| -------------------- | ------- | -------------------- |
| `TRIGGER_PORT` | `8030` | Webapp port |
| `MINIO_API_PORT` | `9000` | MinIO API port |
| `MINIO_CONSOLE_PORT` | `9001` | MinIO console port |
| `REGISTRY_PORT` | `5000` | Docker registry port |
### Authentication
| Variable | Description |
| --------------------------- | ------------------------------------------------------------- |
| `WHITELISTED_EMAILS` | Regex pattern to restrict login (e.g., `^user@example\.com$`) |
| `AUTH_GITHUB_CLIENT_ID` | GitHub OAuth client ID |
| `AUTH_GITHUB_CLIENT_SECRET` | GitHub OAuth client secret |
### Email Configuration
| Variable | Default | Description |
| ----------------- | ------- | ---------------------------------------------- |
| `EMAIL_TRANSPORT` | — | Transport type: `resend`, `smtp`, or `aws-ses` |
| `FROM_EMAIL` | — | From email address |
| `RESEND_API_KEY` | — | Resend API key (if using Resend) |
| `SMTP_HOST` | — | SMTP server host |
| `SMTP_PORT` | `587` | SMTP server port |
## Volumes
| Volume | Description |
| ------------------------- | -------------------------------- |
| `trigger_shared` | Shared volume for worker token |
| `trigger_postgres_data` | PostgreSQL data |
| `trigger_redis_data` | Redis data |
| `trigger_clickhouse_data` | ClickHouse data |
| `trigger_clickhouse_logs` | ClickHouse logs |
| `trigger_minio_data` | MinIO object storage |
| `trigger_registry_data` | Docker registry data |
| `trigger_registry_auth` | Registry htpasswd authentication |
## Worker Token
On first startup, the webapp generates a worker token and saves it to the shared volume. If you need to run workers on separate machines:
1. Check webapp logs for the token:
```bash
docker compose logs webapp | grep -A15 "Worker Token"
```
2. Set the token in the remote worker's `.env`:
```bash
TRIGGER_WORKER_TOKEN=tr_wgt_xxxxx
```
## Registry Setup
The built-in registry uses htpasswd authentication. The htpasswd file is **automatically generated** on first startup using the credentials from environment variables.
Default credentials:
- Username: `registry-user` (set via `REGISTRY_USER`)
- Password: `very-secure-indeed` (set via `REGISTRY_PASSWORD`)
To use custom credentials, set them in your `.env` file before first run:
```bash
REGISTRY_USER=my-user
REGISTRY_PASSWORD=my-secure-password
```
Before deploying tasks, login to the registry:
```bash
docker login -u registry-user localhost:5000
```
## CLI Usage
To initialize a project with self-hosted Trigger.dev:
```bash
npx trigger.dev@latest login -a http://localhost:8030
npx trigger.dev@latest init -p <project-ref> -a http://localhost:8030
```
To deploy tasks:
```bash
npx trigger.dev@latest deploy --self-hosted
```
## GitHub OAuth Setup
1. Create a GitHub OAuth App at `https://github.com/settings/developers`
2. Set callback URL: `http://localhost:8030/auth/github/callback`
3. Configure environment variables:
```env
AUTH_GITHUB_CLIENT_ID=your_client_id
AUTH_GITHUB_CLIENT_SECRET=your_client_secret
```
## Production Considerations
- Use strong, unique passwords for all secrets
- Set up proper TLS/SSL with a reverse proxy
- Configure email transport for magic links
- Use external managed databases for high availability
- Set appropriate resource limits based on your workload
- Enable `WHITELISTED_EMAILS` to restrict access
- Consider disabling telemetry: `TRIGGER_TELEMETRY_DISABLED=1`
## Scaling Workers
To add more worker capacity:
1. Set up additional supervisor instances on different machines
2. Configure each with the same `TRIGGER_WORKER_TOKEN`
3. Use unique `TRIGGER_WORKER_INSTANCE_NAME` for each
## Troubleshooting
### Magic links not arriving
- Check webapp logs: `docker compose logs -f webapp`
- Magic links are logged if no email transport is configured
- Set up email transport for production use
### Deployment fails at push step
- Ensure you're logged into the registry: `docker login localhost:5000`
- Check registry is healthy: `docker compose ps trigger-registry`
### Services not starting
- Ensure all required secrets are set in `.env`
- Check logs: `docker compose logs -f`
## References
- [Trigger.dev Documentation](https://trigger.dev/docs)
- [Self-hosting Guide](https://trigger.dev/docs/self-hosting/docker)
- [GitHub Repository](https://github.com/triggerdotdev/trigger.dev)

View File

@@ -0,0 +1,247 @@
# Trigger.dev
[English](./README.md) | [中文](./README.zh.md)
Trigger.dev 是一个开源平台,用于在 TypeScript 中构建 AI 工作流和后台任务。它提供长时间运行的任务、重试机制、队列、可观测性和弹性扩展功能。
## 服务组件
### 核心服务Webapp 栈)
| 服务 | 描述 |
| ----------------------- | ----------------------------------------- |
| `webapp` | 主 Trigger.dev 应用程序,包含仪表板和 API |
| `trigger-postgres` | 带有逻辑复制的 PostgreSQL 数据库 |
| `trigger-redis` | 用于缓存和任务队列的 Redis |
| `trigger-clickhouse` | 用于分析的 ClickHouse 数据库 |
| `trigger-minio` | S3 兼容的对象存储 |
| `trigger-minio-init` | MinIO 存储桶初始化 |
| `electric` | 用于实时同步的 ElectricSQL |
| `trigger-registry` | 用于部署代码的私有 Docker 镜像仓库 |
| `trigger-registry-init` | 镜像仓库 htpasswd 初始化 |
### Worker 服务Supervisor 栈)
| 服务 | 描述 |
| --------------------- | ------------------------------------------- |
| `supervisor` | 执行任务的 Worker 编排器 |
| `docker-socket-proxy` | 为 supervisor 提供安全的 Docker socket 代理 |
## 前置要求
- Docker 20.10.0+
- Docker Compose 2.20.0+
- 完整栈至少需要 6 vCPU 和 12 GB RAM
## 快速开始
1. 创建包含必要密钥的 `.env` 文件:
```bash
cp .env.example .env
```
2. 生成必要的密钥:
```bash
# 生成密钥
echo "SESSION_SECRET=$(openssl rand -hex 16)" >> .env
echo "MAGIC_LINK_SECRET=$(openssl rand -hex 16)" >> .env
echo "ENCRYPTION_KEY=$(openssl rand -hex 16)" >> .env
echo "POSTGRES_PASSWORD=$(openssl rand -hex 16)" >> .env
```
3. 启动所有服务:
```bash
docker compose up -d
```
4. 等待服务健康运行:
```bash
docker compose ps
```
5. 访问 `http://localhost:8030` 打开 webapp
6. 从 webapp 日志中获取首次登录的 magic link
```bash
docker compose logs -f webapp
```
## 环境变量
### 必需的密钥
| 变量 | 描述 |
| ------------------- | --------------------------------------------------- |
| `SESSION_SECRET` | 会话加密密钥(运行:`openssl rand -hex 16` |
| `MAGIC_LINK_SECRET` | Magic link 加密密钥(运行:`openssl rand -hex 16` |
| `ENCRYPTION_KEY` | 密钥存储加密密钥(运行:`openssl rand -hex 16` |
| `POSTGRES_PASSWORD` | PostgreSQL 密码 |
### 域名配置
| 变量 | 默认值 | 描述 |
| -------------- | ----------------------- | ---------------------------------- |
| `APP_ORIGIN` | `http://localhost:8030` | 公开的 webapp URL |
| `LOGIN_ORIGIN` | `http://localhost:8030` | 登录 URL通常与 APP_ORIGIN 相同) |
| `API_ORIGIN` | `http://localhost:8030` | API URL |
### 镜像版本
| 变量 | 默认值 | 描述 |
| -------------------- | ------------------------------ | ------------------------------------- |
| `TRIGGER_IMAGE_TAG` | `v4.2.0` | Trigger.dev webapp 和 supervisor 版本 |
| `POSTGRES_VERSION` | `17.2-alpine3.21` | PostgreSQL 版本 |
| `REDIS_VERSION` | `7.4.3-alpine3.21` | Redis 版本 |
| `CLICKHOUSE_VERSION` | `25.3` | ClickHouse 版本 |
| `MINIO_VERSION` | `RELEASE.2025-04-22T22-12-26Z` | MinIO 版本 |
### 端口配置
| 变量 | 默认值 | 描述 |
| -------------------- | ------ | ------------------- |
| `TRIGGER_PORT` | `8030` | Webapp 端口 |
| `MINIO_API_PORT` | `9000` | MinIO API 端口 |
| `MINIO_CONSOLE_PORT` | `9001` | MinIO 控制台端口 |
| `REGISTRY_PORT` | `5000` | Docker 镜像仓库端口 |
### 身份认证
| 变量 | 描述 |
| --------------------------- | --------------------------------------------------- |
| `WHITELISTED_EMAILS` | 限制登录的正则表达式(例如:`^user@example\.com$` |
| `AUTH_GITHUB_CLIENT_ID` | GitHub OAuth 客户端 ID |
| `AUTH_GITHUB_CLIENT_SECRET` | GitHub OAuth 客户端密钥 |
### 邮件配置
| 变量 | 默认值 | 描述 |
| ----------------- | ------ | --------------------------------------- |
| `EMAIL_TRANSPORT` | — | 传输类型:`resend`、`smtp` 或 `aws-ses` |
| `FROM_EMAIL` | — | 发件人邮箱地址 |
| `RESEND_API_KEY` | — | Resend API 密钥(如果使用 Resend |
| `SMTP_HOST` | — | SMTP 服务器主机 |
| `SMTP_PORT` | `587` | SMTP 服务器端口 |
## 数据卷
| 卷 | 描述 |
| ------------------------- | ---------------------- |
| `trigger_shared` | Worker token 共享卷 |
| `trigger_postgres_data` | PostgreSQL 数据 |
| `trigger_redis_data` | Redis 数据 |
| `trigger_clickhouse_data` | ClickHouse 数据 |
| `trigger_clickhouse_logs` | ClickHouse 日志 |
| `trigger_minio_data` | MinIO 对象存储 |
| `trigger_registry_data` | Docker 镜像仓库数据 |
| `trigger_registry_auth` | 镜像仓库 htpasswd 认证 |
## Worker Token
首次启动时webapp 会生成 worker token 并保存到共享卷中。如果需要在单独的机器上运行 worker
1. 从 webapp 日志中获取 token
```bash
docker compose logs webapp | grep -A15 "Worker Token"
```
2. 在远程 worker 的 `.env` 中设置 token
```bash
TRIGGER_WORKER_TOKEN=tr_wgt_xxxxx
```
## 镜像仓库设置
内置镜像仓库使用 htpasswd 认证。htpasswd 文件在首次启动时会根据环境变量中的凭据**自动生成**。
默认凭据:
- 用户名:`registry-user`(通过 `REGISTRY_USER` 设置)
- 密码:`very-secure-indeed`(通过 `REGISTRY_PASSWORD` 设置)
要使用自定义凭据,请在首次运行前在 `.env` 文件中设置:
```bash
REGISTRY_USER=my-user
REGISTRY_PASSWORD=my-secure-password
```
部署任务前,登录到镜像仓库:
```bash
docker login -u registry-user localhost:5000
```
## CLI 使用
使用自托管的 Trigger.dev 初始化项目:
```bash
npx trigger.dev@latest login -a http://localhost:8030
npx trigger.dev@latest init -p <project-ref> -a http://localhost:8030
```
部署任务:
```bash
npx trigger.dev@latest deploy --self-hosted
```
## GitHub OAuth 设置
1. 在 `https://github.com/settings/developers` 创建 GitHub OAuth 应用
2. 设置回调 URL`http://localhost:8030/auth/github/callback`
3. 配置环境变量:
```env
AUTH_GITHUB_CLIENT_ID=your_client_id
AUTH_GITHUB_CLIENT_SECRET=your_client_secret
```
## 生产环境注意事项
- 为所有密钥使用强且唯一的密码
- 使用反向代理设置正确的 TLS/SSL
- 配置邮件传输以发送 magic link
- 使用外部托管数据库以实现高可用性
- 根据工作负载设置适当的资源限制
- 启用 `WHITELISTED_EMAILS` 限制访问
- 考虑禁用遥测:`TRIGGER_TELEMETRY_DISABLED=1`
## 扩展 Worker
添加更多 worker 容量:
1. 在不同机器上设置额外的 supervisor 实例
2. 为每个实例配置相同的 `TRIGGER_WORKER_TOKEN`
3. 为每个实例使用唯一的 `TRIGGER_WORKER_INSTANCE_NAME`
## 故障排除
### Magic link 未收到
- 检查 webapp 日志:`docker compose logs -f webapp`
- 如果未配置邮件传输magic link 会记录在日志中
- 生产环境请设置邮件传输
### 部署在 push 步骤失败
- 确保已登录镜像仓库:`docker login localhost:5000`
- 检查镜像仓库健康状态:`docker compose ps trigger-registry`
### 服务无法启动
- 确保 `.env` 中设置了所有必需的密钥
- 检查日志:`docker compose logs -f`
## 参考链接
- [Trigger.dev 文档](https://trigger.dev/docs)
- [自托管指南](https://trigger.dev/docs/self-hosting/docker)
- [GitHub 仓库](https://github.com/triggerdotdev/trigger.dev)

View File

@@ -0,0 +1,400 @@
# Trigger.dev - Build and deploy fully-managed AI agents and workflows
# https://trigger.dev/
# Repository: https://github.com/triggerdotdev/trigger.dev
x-defaults: &defaults
restart: unless-stopped
logging:
driver: json-file
options:
max-size: 100m
max-file: "3"
services:
# =============================================================================
# Core Services (Webapp Stack)
# =============================================================================
webapp:
<<: *defaults
image: ${GLOBAL_REGISTRY:-}ghcr.io/triggerdotdev/trigger.dev:${TRIGGER_IMAGE_TAG:-v4.2.0}
ports:
- "${TRIGGER_PORT:-8030}:3030"
environment:
- TZ=${TZ:-UTC}
# Secrets (required)
- SESSION_SECRET=${SESSION_SECRET}
- MAGIC_LINK_SECRET=${MAGIC_LINK_SECRET}
- ENCRYPTION_KEY=${ENCRYPTION_KEY}
- MANAGED_WORKER_SECRET=${MANAGED_WORKER_SECRET:-managed-secret}
# Domains & ports
- REMIX_APP_PORT=3030
- APP_ORIGIN=${APP_ORIGIN:-http://localhost:8030}
- LOGIN_ORIGIN=${LOGIN_ORIGIN:-http://localhost:8030}
- API_ORIGIN=${API_ORIGIN:-http://localhost:8030}
- STREAM_ORIGIN=${STREAM_ORIGIN:-http://localhost:8030}
- ELECTRIC_ORIGIN=http://electric:3000
# Database
- DATABASE_URL=postgresql://${POSTGRES_USER:-trigger}:${POSTGRES_PASSWORD}@trigger-postgres:5432/${POSTGRES_DB:-trigger}?schema=public
- DIRECT_URL=postgresql://${POSTGRES_USER:-trigger}:${POSTGRES_PASSWORD}@trigger-postgres:5432/${POSTGRES_DB:-trigger}?schema=public
- DATABASE_CONNECTION_LIMIT=${DATABASE_CONNECTION_LIMIT:-10}
# Redis
- REDIS_HOST=trigger-redis
- REDIS_PORT=6379
- REDIS_TLS_DISABLED=true
# ClickHouse
- CLICKHOUSE_URL=http://trigger-clickhouse:8123
- CLICKHOUSE_USER=${CLICKHOUSE_USER:-default}
- CLICKHOUSE_PASSWORD=${CLICKHOUSE_PASSWORD:-password}
- CLICKHOUSE_DATABASE=${CLICKHOUSE_DATABASE:-default}
# Object storage (MinIO)
- OBJECT_STORE_BASE_URL=http://trigger-minio:9000
- OBJECT_STORE_ACCESS_KEY_ID=${MINIO_ROOT_USER:-admin}
- OBJECT_STORE_SECRET_ACCESS_KEY=${MINIO_ROOT_PASSWORD:-very-safe-password}
- PACKET_BUCKET_NAME=${PACKET_BUCKET_NAME:-packets}
# Registry
- DEPLOY_REGISTRY_HOST=${REGISTRY_HOST:-trigger-registry:5000}
- DEPLOY_REGISTRY_NAMESPACE=trigger
# Authentication
- WHITELISTED_EMAILS=${WHITELISTED_EMAILS:-}
- AUTH_GITHUB_CLIENT_ID=${AUTH_GITHUB_CLIENT_ID:-}
- AUTH_GITHUB_CLIENT_SECRET=${AUTH_GITHUB_CLIENT_SECRET:-}
# Email (optional)
- EMAIL_TRANSPORT=${EMAIL_TRANSPORT:-}
- FROM_EMAIL=${FROM_EMAIL:-}
- REPLY_TO_EMAIL=${REPLY_TO_EMAIL:-}
- RESEND_API_KEY=${RESEND_API_KEY:-}
- SMTP_HOST=${SMTP_HOST:-}
- SMTP_PORT=${SMTP_PORT:-587}
- SMTP_SECURE=${SMTP_SECURE:-false}
- SMTP_USER=${SMTP_USER:-}
- SMTP_PASSWORD=${SMTP_PASSWORD:-}
# Telemetry
- TRIGGER_TELEMETRY_DISABLED=${TRIGGER_TELEMETRY_DISABLED:-}
# Bootstrap
- TRIGGER_BOOTSTRAP_ENABLED=${TRIGGER_BOOTSTRAP_ENABLED:-true}
- TRIGGER_BOOTSTRAP_WORKER_GROUP_NAME=${TRIGGER_BOOTSTRAP_WORKER_GROUP_NAME:-bootstrap}
- TRIGGER_BOOTSTRAP_WORKER_TOKEN_PATH=/home/node/shared/worker_token
volumes:
- trigger_shared:/home/node/shared
depends_on:
trigger-postgres:
condition: service_healthy
trigger-redis:
condition: service_healthy
trigger-clickhouse:
condition: service_healthy
trigger-minio:
condition: service_healthy
electric:
condition: service_healthy
healthcheck:
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:3030/healthcheck"]
interval: 30s
timeout: 10s
retries: 5
start_period: 60s
deploy:
resources:
limits:
cpus: ${WEBAPP_CPU_LIMIT:-3.0}
memory: ${WEBAPP_MEMORY_LIMIT:-6G}
reservations:
cpus: ${WEBAPP_CPU_RESERVATION:-1.0}
memory: ${WEBAPP_MEMORY_RESERVATION:-2G}
trigger-postgres:
<<: *defaults
image: ${GLOBAL_REGISTRY:-}postgres:${POSTGRES_VERSION:-17.2-alpine3.21}
command:
- postgres
- -c
- wal_level=logical
- -c
- max_replication_slots=10
- -c
- max_wal_senders=10
environment:
- TZ=${TZ:-UTC}
- POSTGRES_USER=${POSTGRES_USER:-trigger}
- POSTGRES_PASSWORD=${POSTGRES_PASSWORD}
- POSTGRES_DB=${POSTGRES_DB:-trigger}
volumes:
- trigger_postgres_data:/var/lib/postgresql/data
healthcheck:
test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER:-trigger} -d ${POSTGRES_DB:-trigger}"]
interval: 10s
timeout: 5s
retries: 5
start_period: 30s
deploy:
resources:
limits:
cpus: ${POSTGRES_CPU_LIMIT:-2.0}
memory: ${POSTGRES_MEMORY_LIMIT:-4G}
reservations:
cpus: ${POSTGRES_CPU_RESERVATION:-0.5}
memory: ${POSTGRES_MEMORY_RESERVATION:-1G}
trigger-redis:
<<: *defaults
image: ${GLOBAL_REGISTRY:-}redis:${REDIS_VERSION:-7.4.3-alpine3.21}
command: redis-server --appendonly yes
volumes:
- trigger_redis_data:/data
healthcheck:
test: ["CMD", "redis-cli", "ping"]
interval: 10s
timeout: 5s
retries: 5
start_period: 10s
deploy:
resources:
limits:
cpus: ${REDIS_CPU_LIMIT:-1.0}
memory: ${REDIS_MEMORY_LIMIT:-2G}
reservations:
cpus: ${REDIS_CPU_RESERVATION:-0.25}
memory: ${REDIS_MEMORY_RESERVATION:-512M}
trigger-clickhouse:
<<: *defaults
image: ${GLOBAL_REGISTRY:-}clickhouse/clickhouse-server:${CLICKHOUSE_VERSION:-25.3}
environment:
- TZ=${TZ:-UTC}
- CLICKHOUSE_DB=${CLICKHOUSE_DATABASE:-default}
- CLICKHOUSE_USER=${CLICKHOUSE_USER:-default}
- CLICKHOUSE_PASSWORD=${CLICKHOUSE_PASSWORD:-password}
- CLICKHOUSE_DEFAULT_ACCESS_MANAGEMENT=1
volumes:
- trigger_clickhouse_data:/var/lib/clickhouse
- trigger_clickhouse_logs:/var/log/clickhouse-server
healthcheck:
test: ["CMD", "wget", "--no-verbose", "--tries=1", "-O-", "http://localhost:8123/ping"]
interval: 10s
timeout: 5s
retries: 5
start_period: 30s
deploy:
resources:
limits:
cpus: ${CLICKHOUSE_CPU_LIMIT:-2.0}
memory: ${CLICKHOUSE_MEMORY_LIMIT:-4G}
reservations:
cpus: ${CLICKHOUSE_CPU_RESERVATION:-0.5}
memory: ${CLICKHOUSE_MEMORY_RESERVATION:-1G}
trigger-minio:
<<: *defaults
image: ${GLOBAL_REGISTRY:-}minio/minio:${MINIO_VERSION:-RELEASE.2025-04-22T22-12-26Z}
command: server /data --console-address ":9001"
ports:
- "${MINIO_API_PORT:-9000}:9000"
- "${MINIO_CONSOLE_PORT:-9001}:9001"
environment:
- TZ=${TZ:-UTC}
- MINIO_ROOT_USER=${MINIO_ROOT_USER:-admin}
- MINIO_ROOT_PASSWORD=${MINIO_ROOT_PASSWORD:-very-safe-password}
volumes:
- trigger_minio_data:/data
healthcheck:
test: ["CMD", "mc", "ready", "local"]
interval: 10s
timeout: 5s
retries: 5
start_period: 10s
deploy:
resources:
limits:
cpus: ${MINIO_CPU_LIMIT:-1.0}
memory: ${MINIO_MEMORY_LIMIT:-2G}
reservations:
cpus: ${MINIO_CPU_RESERVATION:-0.25}
memory: ${MINIO_MEMORY_RESERVATION:-512M}
# MinIO bucket initialization
trigger-minio-init:
<<: *defaults
image: ${GLOBAL_REGISTRY:-}minio/mc:${MC_VERSION:-RELEASE.2025-04-16T18-13-26Z}
entrypoint: |
/bin/sh -c '
sleep 5
mc alias set myminio http://trigger-minio:9000 ${MINIO_ROOT_USER:-admin} ${MINIO_ROOT_PASSWORD:-very-safe-password}
mc mb myminio/${PACKET_BUCKET_NAME:-packets} --ignore-existing
echo "MinIO bucket initialized"
exit 0
'
depends_on:
trigger-minio:
condition: service_healthy
restart: "no"
electric:
<<: *defaults
image: ${GLOBAL_REGISTRY:-}electricsql/electric:${ELECTRIC_VERSION:-1.0.0}
environment:
- TZ=${TZ:-UTC}
- DATABASE_URL=postgresql://${POSTGRES_USER:-trigger}:${POSTGRES_PASSWORD}@trigger-postgres:5432/${POSTGRES_DB:-trigger}
depends_on:
trigger-postgres:
condition: service_healthy
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:3000/v1/health"]
interval: 10s
timeout: 5s
retries: 5
start_period: 30s
deploy:
resources:
limits:
cpus: ${ELECTRIC_CPU_LIMIT:-1.0}
memory: ${ELECTRIC_MEMORY_LIMIT:-1G}
reservations:
cpus: ${ELECTRIC_CPU_RESERVATION:-0.25}
memory: ${ELECTRIC_MEMORY_RESERVATION:-256M}
# Initialize registry htpasswd file on first run
trigger-registry-init:
<<: *defaults
image: ${GLOBAL_REGISTRY:-}httpd:2-alpine
entrypoint: |
/bin/sh -c '
if [ ! -f /auth/htpasswd ]; then
echo "Generating htpasswd file..."
htpasswd -nbB "${REGISTRY_USER:-registry-user}" "${REGISTRY_PASSWORD:-very-secure-indeed}" > /auth/htpasswd
echo "htpasswd file created successfully"
else
echo "htpasswd file already exists, skipping..."
fi
'
environment:
- REGISTRY_USER=${REGISTRY_USER:-registry-user}
- REGISTRY_PASSWORD=${REGISTRY_PASSWORD:-very-secure-indeed}
volumes:
- trigger_registry_auth:/auth
restart: "no"
trigger-registry:
<<: *defaults
image: ${GLOBAL_REGISTRY:-}registry:${REGISTRY_IMAGE_VERSION:-3}
ports:
- "${REGISTRY_PORT:-5000}:5000"
environment:
- TZ=${TZ:-UTC}
- REGISTRY_AUTH=htpasswd
- REGISTRY_AUTH_HTPASSWD_REALM=Registry
- REGISTRY_AUTH_HTPASSWD_PATH=/auth/htpasswd
- REGISTRY_STORAGE_DELETE_ENABLED=true
volumes:
- trigger_registry_data:/var/lib/registry
- trigger_registry_auth:/auth:ro
depends_on:
trigger-registry-init:
condition: service_completed_successfully
healthcheck:
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:5000/v2/"]
interval: 30s
timeout: 10s
retries: 3
start_period: 10s
deploy:
resources:
limits:
cpus: ${REGISTRY_CPU_LIMIT:-0.5}
memory: ${REGISTRY_MEMORY_LIMIT:-512M}
reservations:
cpus: ${REGISTRY_CPU_RESERVATION:-0.1}
memory: ${REGISTRY_MEMORY_RESERVATION:-128M}
# =============================================================================
# Worker Services (Supervisor Stack)
# =============================================================================
supervisor:
<<: *defaults
image: ${GLOBAL_REGISTRY:-}ghcr.io/triggerdotdev/supervisor:${TRIGGER_IMAGE_TAG:-v4.2.0}
environment:
- TZ=${TZ:-UTC}
# Required settings
- TRIGGER_API_URL=http://webapp:3030
- TRIGGER_WORKER_TOKEN=${TRIGGER_WORKER_TOKEN:-file:///home/node/shared/worker_token}
- MANAGED_WORKER_SECRET=${MANAGED_WORKER_SECRET:-managed-secret}
- OTEL_EXPORTER_OTLP_ENDPOINT=http://webapp:3030/otel
# Worker instance
- TRIGGER_WORKER_INSTANCE_NAME=${TRIGGER_WORKER_INSTANCE_NAME:-supervisor-1}
- TRIGGER_WORKER_HEARTBEAT_INTERVAL_SECONDS=${TRIGGER_WORKER_HEARTBEAT_INTERVAL_SECONDS:-30}
# Workload API settings
- TRIGGER_WORKLOAD_API_ENABLED=true
- TRIGGER_WORKLOAD_API_PROTOCOL=http
- TRIGGER_WORKLOAD_API_PORT_INTERNAL=8020
- TRIGGER_WORKLOAD_API_PORT_EXTERNAL=8020
# Docker settings
- DOCKER_RUNNER_NETWORKS=trigger-dev_default
- DOCKER_ENFORCE_MACHINE_PRESETS=${DOCKER_ENFORCE_MACHINE_PRESETS:-true}
- DOCKER_AUTOREMOVE_EXITED_CONTAINERS=${DOCKER_AUTOREMOVE_EXITED_CONTAINERS:-true}
volumes:
- trigger_shared:/home/node/shared:ro
depends_on:
webapp:
condition: service_healthy
docker-socket-proxy:
condition: service_started
deploy:
resources:
limits:
cpus: ${SUPERVISOR_CPU_LIMIT:-2.0}
memory: ${SUPERVISOR_MEMORY_LIMIT:-4G}
reservations:
cpus: ${SUPERVISOR_CPU_RESERVATION:-0.5}
memory: ${SUPERVISOR_MEMORY_RESERVATION:-1G}
docker-socket-proxy:
<<: *defaults
image: ${GLOBAL_REGISTRY:-}tecnativa/docker-socket-proxy:${DOCKER_SOCKET_PROXY_VERSION:-0.3.0}
privileged: true
environment:
- TZ=${TZ:-UTC}
# Allowed API endpoints
- CONTAINERS=1
- IMAGES=1
- NETWORKS=1
- VOLUMES=1
- AUTH=1
- POST=1
- BUILD=0
- COMMIT=0
- CONFIGS=0
- DISTRIBUTION=1
- EXEC=0
- GRPC=0
- INFO=1
- NODES=0
- PING=1
- PLUGINS=0
- SECRETS=0
- SERVICES=0
- SESSION=0
- SWARM=0
- SYSTEM=0
- TASKS=0
- VERSION=1
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro
deploy:
resources:
limits:
cpus: ${DOCKER_PROXY_CPU_LIMIT:-0.5}
memory: ${DOCKER_PROXY_MEMORY_LIMIT:-256M}
reservations:
cpus: ${DOCKER_PROXY_CPU_RESERVATION:-0.1}
memory: ${DOCKER_PROXY_MEMORY_RESERVATION:-64M}
volumes:
trigger_shared:
trigger_postgres_data:
trigger_redis_data:
trigger_clickhouse_data:
trigger_clickhouse_logs:
trigger_minio_data:
trigger_registry_data:
trigger_registry_auth: