feat: add more Agent services & easytier

This commit is contained in:
Summer Shen
2026-04-19 12:26:54 +08:00
parent 0e948befac
commit 0b5ba69cb0
30 changed files with 1775 additions and 0 deletions
+7
View File
@@ -55,6 +55,7 @@ These services require building custom Docker images from source.
| [Apache Pulsar](./src/pulsar) | 4.0.7 |
| [Apache RocketMQ](./src/rocketmq) | 5.3.1 |
| [Agentgateway](./src/agentgateway) | 0.11.2 |
| [AnythingLLM](./src/anythingllm) | latest |
| [Bifrost Gateway](./src/bifrost-gateway) | v1.4.17 |
| [Bolt.diy](./apps/bolt-diy) | latest |
| [Budibase](./src/budibase) | 3.23.0 |
@@ -73,6 +74,7 @@ These services require building custom Docker images from source.
| [Doris](./src/doris) | 3.0.0 |
| [DuckDB](./src/duckdb) | v1.1.3 |
| [Easy Dataset](./apps/easy-dataset) | 1.5.1 |
| [EasyTier](./src/easytier) | v2.6.0 |
| [Elasticsearch](./src/elasticsearch) | 9.3.0 |
| [etcd](./src/etcd) | 3.6.0 |
| [FalkorDB](./src/falkordb) | v4.14.11 |
@@ -101,12 +103,15 @@ These services require building custom Docker images from source.
| [Kong](./src/kong) | 3.8.0 |
| [Langflow](./apps/langflow) | latest |
| [Langfuse](./apps/langfuse) | 3.115.0 |
| [Letta](./src/letta) | 0.16.7 |
| [LibreChat](./apps/librechat) | v0.8.4 |
| [LibreOffice](./src/libreoffice) | latest |
| [libSQL Server](./src/libsql) | latest |
| [LiteLLM](./src/litellm) | main-stable |
| [llama-swap](./src/llama-swap) | cpu |
| [llama.cpp](./src/llama.cpp) | server |
| [LMDeploy](./src/lmdeploy) | v0.11.1 |
| [LobeChat](./src/lobe-chat) | 1.143.3 |
| [Logstash](./src/logstash) | 8.16.1 |
| [MariaDB Galera Cluster](./src/mariadb-galera) | 11.7.2 |
| [Mattermost](./apps/mattermost) | 11.3 |
@@ -159,6 +164,7 @@ These services require building custom Docker images from source.
| [PyTorch](./src/pytorch) | 2.6.0 |
| [Qdrant](./src/qdrant) | 1.15.4 |
| [RabbitMQ](./src/rabbitmq) | 4.2.3 |
| [RAGFlow](./apps/ragflow) | v0.24.0 |
| [Ray](./src/ray) | 2.42.1 |
| [Redpanda](./src/redpanda) | v24.3.1 |
| [Redis Cluster](./src/redis-cluster) | 8.2.1 |
@@ -171,6 +177,7 @@ These services require building custom Docker images from source.
| [Shannon](./apps/shannon) | v0.3.1 |
| [SigNoz](./src/signoz) | 0.55.0 |
| [Sim](./apps/sim) | latest |
| [Skyvern](./apps/skyvern) | v1.0.31 |
| [Stable Diffusion WebUI](./apps/stable-diffusion-webui-docker) | latest |
| [Stirling-PDF](./apps/stirling-pdf) | latest |
| [Temporal](./src/temporal) | 1.24.2 |
+7
View File
@@ -55,6 +55,7 @@ docker compose exec redis redis-cli ping
| [Apache Pulsar](./src/pulsar) | 4.0.7 |
| [Apache RocketMQ](./src/rocketmq) | 5.3.1 |
| [Agentgateway](./src/agentgateway) | 0.11.2 |
| [AnythingLLM](./src/anythingllm) | latest |
| [Bifrost Gateway](./src/bifrost-gateway) | v1.4.17 |
| [Bolt.diy](./apps/bolt-diy) | latest |
| [Budibase](./src/budibase) | 3.23.0 |
@@ -73,6 +74,7 @@ docker compose exec redis redis-cli ping
| [Doris](./src/doris) | 3.0.0 |
| [DuckDB](./src/duckdb) | v1.1.3 |
| [Easy Dataset](./apps/easy-dataset) | 1.5.1 |
| [EasyTier](./src/easytier) | v2.6.0 |
| [Elasticsearch](./src/elasticsearch) | 9.3.0 |
| [etcd](./src/etcd) | 3.6.0 |
| [FalkorDB](./src/falkordb) | v4.14.11 |
@@ -101,12 +103,15 @@ docker compose exec redis redis-cli ping
| [Kong](./src/kong) | 3.8.0 |
| [Langflow](./apps/langflow) | latest |
| [Langfuse](./apps/langfuse) | 3.115.0 |
| [Letta](./src/letta) | 0.16.7 |
| [LibreChat](./apps/librechat) | v0.8.4 |
| [LibreOffice](./src/libreoffice) | latest |
| [libSQL Server](./src/libsql) | latest |
| [LiteLLM](./src/litellm) | main-stable |
| [llama-swap](./src/llama-swap) | cpu |
| [llama.cpp](./src/llama.cpp) | server |
| [LMDeploy](./src/lmdeploy) | v0.11.1 |
| [LobeChat](./src/lobe-chat) | 1.143.3 |
| [Logstash](./src/logstash) | 8.16.1 |
| [MariaDB Galera Cluster](./src/mariadb-galera) | 11.7.2 |
| [Mattermost](./apps/mattermost) | 11.3 |
@@ -159,6 +164,7 @@ docker compose exec redis redis-cli ping
| [PyTorch](./src/pytorch) | 2.6.0 |
| [Qdrant](./src/qdrant) | 1.15.4 |
| [RabbitMQ](./src/rabbitmq) | 4.2.3 |
| [RAGFlow](./apps/ragflow) | v0.24.0 |
| [Ray](./src/ray) | 2.42.1 |
| [Redpanda](./src/redpanda) | v24.3.1 |
| [Redis Cluster](./src/redis-cluster) | 8.2.1 |
@@ -171,6 +177,7 @@ docker compose exec redis redis-cli ping
| [Shannon](./apps/shannon) | v0.3.1 |
| [SigNoz](./src/signoz) | 0.55.0 |
| [Sim](./apps/sim) | latest |
| [Skyvern](./apps/skyvern) | v1.0.31 |
| [Stable Diffusion WebUI](./apps/stable-diffusion-webui-docker) | latest |
| [Stirling-PDF](./apps/stirling-pdf) | latest |
| [Temporal](./src/temporal) | 1.24.2 |
+50
View File
@@ -0,0 +1,50 @@
# Global Registry Prefix (optional)
# GLOBAL_REGISTRY=
# Service Versions
LIBRECHAT_VERSION=v0.8.4
MONGODB_VERSION=8.0
MEILISEARCH_VERSION=v1.12.8
# Timezone
TZ=UTC
# Host port for the LibreChat web UI
LIBRECHAT_PORT_OVERRIDE=3080
# Security Secrets (CHANGEME: generate with: openssl rand -hex 32)
JWT_SECRET=changeme_jwt_secret_please_change_CHANGEME
JWT_REFRESH_SECRET=changeme_jwt_refresh_secret_CHANGEME
MEILI_MASTER_KEY=changeme_meili_master_key_CHANGEME
# Encryption Keys
# CREDS_KEY must be exactly 32 characters
CREDS_KEY=changeme_creds_key_32_chars_only
# CREDS_IV must be exactly 16 characters
CREDS_IV=changeme_iv_16ch
# Registration
ALLOW_REGISTRATION=true
ALLOW_SOCIAL_LOGIN=false
# LLM Provider API Keys (optional; configure via UI or here)
# OPENAI_API_KEY=sk-...
# ANTHROPIC_API_KEY=sk-ant-...
# Resource Limits - LibreChat
LIBRECHAT_CPU_LIMIT=2
LIBRECHAT_MEMORY_LIMIT=2G
LIBRECHAT_CPU_RESERVATION=0.5
LIBRECHAT_MEMORY_RESERVATION=512M
# Resource Limits - MongoDB
MONGODB_CPU_LIMIT=1
MONGODB_MEMORY_LIMIT=1G
MONGODB_CPU_RESERVATION=0.25
MONGODB_MEMORY_RESERVATION=256M
# Resource Limits - Meilisearch
MEILISEARCH_CPU_LIMIT=0.5
MEILISEARCH_MEMORY_LIMIT=512M
MEILISEARCH_CPU_RESERVATION=0.1
MEILISEARCH_MEMORY_RESERVATION=128M
+82
View File
@@ -0,0 +1,82 @@
# LibreChat
[English](./README.md) | [中文](./README.zh.md)
Quick start: <https://docs.librechat.ai>.
This service deploys LibreChat, an open-source AI chat platform that supports OpenAI, Anthropic, Google, Ollama, and many other providers in a single unified interface with conversation history, file uploads, code execution, and multi-user support.
## Services
- **librechat**: The LibreChat web application (Node.js).
- **mongodb**: MongoDB database for conversation and user data.
- **meilisearch**: Full-text search engine for message indexing.
## Quick Start
1. Copy `.env.example` to `.env`:
```bash
cp .env.example .env
```
2. Update the secrets in `.env` (generate with `openssl rand -hex 32`):
```
JWT_SECRET, JWT_REFRESH_SECRET, MEILI_MASTER_KEY, CREDS_KEY, CREDS_IV
```
3. Start the services:
```bash
docker compose up -d
```
4. Open `http://localhost:3080` and register the first user account.
## Core Environment Variables
| Variable | Description | Default |
| --------------------- | -------------------------------------------------------- | ---------------------------- |
| `LIBRECHAT_VERSION` | Image version | `v0.8.4` |
| `LIBRECHAT_PORT_OVERRIDE` | Host port for the web UI | `3080` |
| `JWT_SECRET` | JWT signing secret (min 32 chars) — **CHANGEME** | placeholder |
| `JWT_REFRESH_SECRET` | JWT refresh signing secret — **CHANGEME** | placeholder |
| `MEILI_MASTER_KEY` | Meilisearch master key — **CHANGEME** | placeholder |
| `CREDS_KEY` | Encryption key for stored credentials (exactly 32 chars) | placeholder |
| `CREDS_IV` | Encryption IV (exactly 16 chars) | placeholder |
| `ALLOW_REGISTRATION` | Allow new user registration | `true` |
| `OPENAI_API_KEY` | OpenAI API key (optional; can also configure in UI) | *(empty)* |
| `ANTHROPIC_API_KEY` | Anthropic API key (optional) | *(empty)* |
## Volumes
- `librechat_images`: User-uploaded images served by the web UI.
- `librechat_logs`: Application log files.
- `librechat_mongo_data`: MongoDB data persistence.
- `librechat_meilisearch_data`: Meilisearch index data.
## Ports
- **3080**: LibreChat web UI
## Security Notes
- Generate all secrets before any external exposure: `openssl rand -hex 32`
- `CREDS_KEY` and `CREDS_IV` encrypt stored API keys — losing them makes stored credentials unrecoverable.
- Set `ALLOW_REGISTRATION=false` after creating admin accounts to lock down signups.
## Resource Requirements
| Service | CPU Limit | Memory Limit |
| ------------ | --------- | ------------ |
| librechat | 2 | 2 GB |
| mongodb | 1 | 1 GB |
| meilisearch | 0.5 | 512 MB |
Total recommended: **4+ GB RAM**.
## Documentation
- [LibreChat Docs](https://docs.librechat.ai)
- [GitHub](https://github.com/danny-avila/LibreChat)
+82
View File
@@ -0,0 +1,82 @@
# LibreChat
[English](./README.md) | [中文](./README.zh.md)
快速开始:<https://docs.librechat.ai>。
此服务用于部署 LibreChat,一个开源 AI 对话平台,在单一统一界面中支持 OpenAI、Anthropic、Google、Ollama 等众多提供商,具备对话历史、文件上传、代码执行和多用户支持。
## 服务
- **librechat**LibreChat Web 应用(Node.js)。
- **mongodb**:用于存储对话和用户数据的 MongoDB 数据库。
- **meilisearch**:用于消息索引的全文搜索引擎。
## 快速开始
1. 将 `.env.example` 复制为 `.env`
```bash
cp .env.example .env
```
2. 更新 `.env` 中的密钥(使用 `openssl rand -hex 32` 生成):
```
JWT_SECRET、JWT_REFRESH_SECRET、MEILI_MASTER_KEY、CREDS_KEY、CREDS_IV
```
3. 启动服务:
```bash
docker compose up -d
```
4. 打开 `http://localhost:3080`,注册第一个用户账号。
## 核心环境变量
| 变量 | 说明 | 默认值 |
| ------------------------ | ------------------------------------------------------- | -------- |
| `LIBRECHAT_VERSION` | 镜像版本 | `v0.8.4` |
| `LIBRECHAT_PORT_OVERRIDE`| Web UI 宿主机端口 | `3080` |
| `JWT_SECRET` | JWT 签名密钥(至少 32 字符)——**请修改** | 占位符 |
| `JWT_REFRESH_SECRET` | JWT 刷新签名密钥——**请修改** | 占位符 |
| `MEILI_MASTER_KEY` | Meilisearch 主密钥——**请修改** | 占位符 |
| `CREDS_KEY` | 存储凭证的加密密钥(恰好 32 字符) | 占位符 |
| `CREDS_IV` | 加密 IV(恰好 16 字符) | 占位符 |
| `ALLOW_REGISTRATION` | 允许新用户注册 | `true` |
| `OPENAI_API_KEY` | OpenAI API Key(可选;也可在 UI 中配置) | *(空)* |
| `ANTHROPIC_API_KEY` | Anthropic API Key(可选) | *(空)* |
## 数据卷
- `librechat_images`:用户上传的图片,由 Web UI 提供服务。
- `librechat_logs`:应用日志文件。
- `librechat_mongo_data`MongoDB 数据持久化。
- `librechat_meilisearch_data`Meilisearch 索引数据。
## 端口
- **3080**LibreChat Web UI
## 安全说明
- 在对外暴露之前,请生成所有密钥:`openssl rand -hex 32`
- `CREDS_KEY``CREDS_IV` 用于加密存储的 API Key——丢失后存储的凭证将无法恢复。
- 创建管理员账号后,将 `ALLOW_REGISTRATION` 设为 `false` 以禁止新用户注册。
## 资源需求
| 服务 | CPU 限制 | 内存限制 |
| ----------- | -------- | -------- |
| librechat | 2 | 2 GB |
| mongodb | 1 | 1 GB |
| meilisearch | 0.5 | 512 MB |
推荐总计:**4+ GB RAM**。
## 文档
- [LibreChat 文档](https://docs.librechat.ai)
- [GitHub](https://github.com/danny-avila/LibreChat)
+108
View File
@@ -0,0 +1,108 @@
# Make sure to change the secret placeholders before exposing this stack externally.
# Fields marked with CHANGEME must be updated for any non-local deployment.
x-defaults: &defaults
restart: unless-stopped
logging:
driver: json-file
options:
max-size: 100m
max-file: '3'
services:
librechat:
<<: *defaults
image: ${GLOBAL_REGISTRY:-}librechat/librechat:${LIBRECHAT_VERSION:-v0.8.4}
depends_on:
mongodb:
condition: service_healthy
meilisearch:
condition: service_healthy
ports:
- '${LIBRECHAT_PORT_OVERRIDE:-3080}:3080'
volumes:
- librechat_images:/app/client/public/images
- librechat_logs:/app/api/logs
environment:
- TZ=${TZ:-UTC}
- MONGO_URI=mongodb://mongodb:27017/LibreChat
- MEILI_HOST=http://meilisearch:7700
- MEILI_MASTER_KEY=${MEILI_MASTER_KEY:-changeme_meili_master_key_CHANGEME}
- JWT_SECRET=${JWT_SECRET:-changeme_jwt_secret_please_change_CHANGEME}
- JWT_REFRESH_SECRET=${JWT_REFRESH_SECRET:-changeme_jwt_refresh_secret_CHANGEME}
- CREDS_KEY=${CREDS_KEY:-changeme_creds_key_32_chars_only}
- CREDS_IV=${CREDS_IV:-changeme_iv_16ch}
- ALLOW_REGISTRATION=${ALLOW_REGISTRATION:-true}
- ALLOW_SOCIAL_LOGIN=${ALLOW_SOCIAL_LOGIN:-false}
- OPENAI_API_KEY=${OPENAI_API_KEY:-}
- ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY:-}
healthcheck:
test:
- CMD
- node
- -e
- "require('http').get('http://localhost:3080/health',res=>process.exit(res.statusCode===200?0:1)).on('error',()=>process.exit(1))"
interval: 30s
timeout: 10s
retries: 5
start_period: 40s
deploy:
resources:
limits:
cpus: ${LIBRECHAT_CPU_LIMIT:-2}
memory: ${LIBRECHAT_MEMORY_LIMIT:-2G}
reservations:
cpus: ${LIBRECHAT_CPU_RESERVATION:-0.5}
memory: ${LIBRECHAT_MEMORY_RESERVATION:-512M}
mongodb:
<<: *defaults
image: ${GLOBAL_REGISTRY:-}mongo:${MONGODB_VERSION:-8.0}
volumes:
- librechat_mongo_data:/data/db
environment:
- TZ=${TZ:-UTC}
healthcheck:
test: [CMD, mongosh, --eval, "db.adminCommand('ping')"]
interval: 10s
timeout: 10s
retries: 5
start_period: 20s
deploy:
resources:
limits:
cpus: ${MONGODB_CPU_LIMIT:-1}
memory: ${MONGODB_MEMORY_LIMIT:-1G}
reservations:
cpus: ${MONGODB_CPU_RESERVATION:-0.25}
memory: ${MONGODB_MEMORY_RESERVATION:-256M}
meilisearch:
<<: *defaults
image: ${GLOBAL_REGISTRY:-}getmeili/meilisearch:${MEILISEARCH_VERSION:-v1.12.8}
volumes:
- librechat_meilisearch_data:/meili_data
environment:
- TZ=${TZ:-UTC}
- MEILI_MASTER_KEY=${MEILI_MASTER_KEY:-changeme_meili_master_key_CHANGEME}
- MEILI_NO_ANALYTICS=true
healthcheck:
test: [CMD-SHELL, "curl -sf http://localhost:7700/health || exit 1"]
interval: 10s
timeout: 10s
retries: 5
start_period: 10s
deploy:
resources:
limits:
cpus: ${MEILISEARCH_CPU_LIMIT:-0.5}
memory: ${MEILISEARCH_MEMORY_LIMIT:-512M}
reservations:
cpus: ${MEILISEARCH_CPU_RESERVATION:-0.1}
memory: ${MEILISEARCH_MEMORY_RESERVATION:-128M}
volumes:
librechat_images:
librechat_logs:
librechat_mongo_data:
librechat_meilisearch_data:
+55
View File
@@ -0,0 +1,55 @@
# Global Registry Prefix (optional)
# GLOBAL_REGISTRY=
# Service Versions
RAGFLOW_VERSION=v0.24.0
ELASTICSEARCH_VERSION=8.11.3
MYSQL_VERSION=8.0.39
REDIS_VERSION=7
MINIO_VERSION=RELEASE.2025-01-20T14-49-07Z
# Timezone
TZ=UTC
# Host port for the RAGFlow web UI (Nginx reverse proxy)
RAGFLOW_PORT_OVERRIDE=80
# MinIO web console port
MINIO_CONSOLE_PORT_OVERRIDE=9001
# Secrets (CHANGEME: use strong random values in production)
SECRET_KEY=changeme_secret_key_CHANGEME
MYSQL_PASSWORD=ragflow
REDIS_PASSWORD=redispassword
MINIO_USER=minioadmin
MINIO_PASSWORD=minioadmin
# Resource Limits - RAGFlow
RAGFLOW_CPU_LIMIT=4
RAGFLOW_MEMORY_LIMIT=4G
RAGFLOW_CPU_RESERVATION=1
RAGFLOW_MEMORY_RESERVATION=2G
# Resource Limits - Elasticsearch
ELASTICSEARCH_CPU_LIMIT=2
ELASTICSEARCH_MEMORY_LIMIT=2G
ELASTICSEARCH_CPU_RESERVATION=0.5
ELASTICSEARCH_MEMORY_RESERVATION=1G
# Resource Limits - MySQL
MYSQL_CPU_LIMIT=1
MYSQL_MEMORY_LIMIT=1G
MYSQL_CPU_RESERVATION=0.25
MYSQL_MEMORY_RESERVATION=256M
# Resource Limits - Redis
REDIS_CPU_LIMIT=0.5
REDIS_MEMORY_LIMIT=512M
REDIS_CPU_RESERVATION=0.1
REDIS_MEMORY_RESERVATION=128M
# Resource Limits - MinIO
MINIO_CPU_LIMIT=1
MINIO_MEMORY_LIMIT=1G
MINIO_CPU_RESERVATION=0.25
MINIO_MEMORY_RESERVATION=256M
+84
View File
@@ -0,0 +1,84 @@
# RAGFlow
[English](./README.md) | [中文](./README.zh.md)
Quick start: <https://ragflow.io/docs>.
This service deploys RAGFlow, an open-source Retrieval-Augmented Generation engine based on deep document understanding. It provides intelligent question answering over complex documents (PDFs, Word, PowerPoint, etc.) with accurate citation and citation tracing.
> **Platform note**: This stack is **x86-64 (amd64) only**. ARM64 is not supported by the official image.
>
> **Resource note**: Elasticsearch alone requires ~2 GB RAM. Provision at least **8 GB RAM** total before starting.
## Services
- **ragflow**: The RAGFlow web application and API server (Nginx on port 80, API on port 9380).
- **es01**: Elasticsearch single-node cluster for vector and full-text search.
- **mysql**: MySQL 8 database for metadata and workflow state.
- **redis**: Redis for task queues and caching.
- **minio**: S3-compatible object storage for document and chunk storage.
## Quick Start
1. Copy `.env.example` to `.env`:
```bash
cp .env.example .env
```
2. Update the secrets in `.env`:
```
SECRET_KEY, MYSQL_PASSWORD, REDIS_PASSWORD, MINIO_PASSWORD
```
3. Start the services (initial startup may take 25 minutes):
```bash
docker compose up -d
```
4. Open `http://localhost` and register the first admin account.
## Core Environment Variables
| Variable | Description | Default |
| ---------------------- | -------------------------------------------------------- | -------------------------------- |
| `RAGFLOW_VERSION` | RAGFlow image version | `v0.24.0` |
| `RAGFLOW_PORT_OVERRIDE`| Host port for the web UI | `80` |
| `SECRET_KEY` | Application secret key — **CHANGEME** | placeholder |
| `MYSQL_PASSWORD` | MySQL root password (also used by RAGFlow) | `ragflow` |
| `REDIS_PASSWORD` | Redis authentication password | `redispassword` |
| `MINIO_USER` | MinIO root user | `minioadmin` |
| `MINIO_PASSWORD` | MinIO root password | `minioadmin` |
| `MINIO_CONSOLE_PORT_OVERRIDE` | MinIO web console host port | `9001` |
## Volumes
- `ragflow_logs`: RAGFlow application logs.
- `ragflow_es_data`: Elasticsearch index data.
- `ragflow_mysql_data`: MySQL database files.
- `ragflow_redis_data`: Redis persistence.
- `ragflow_minio_data`: Object storage for documents and embeddings.
## Ports
- **80**: RAGFlow web UI and API (via Nginx)
- **9001**: MinIO web console
## Resource Requirements
| Service | CPU Limit | Memory Limit |
| ------------- | --------- | ------------ |
| ragflow | 4 | 4 GB |
| elasticsearch | 2 | 2 GB |
| mysql | 1 | 1 GB |
| redis | 0.5 | 512 MB |
| minio | 1 | 1 GB |
Total recommended: **8+ GB RAM**, **4+ CPU cores**.
## Documentation
- [RAGFlow Docs](https://ragflow.io/docs)
- [GitHub](https://github.com/infiniflow/ragflow)
+84
View File
@@ -0,0 +1,84 @@
# RAGFlow
[English](./README.md) | [中文](./README.zh.md)
快速开始:<https://ragflow.io/docs>。
此服务用于部署 RAGFlow,一个基于深度文档理解的开源检索增强生成引擎。它能对复杂文档(PDF、Word、PowerPoint 等)进行智能问答,并提供精准的引用和引文追踪。
> **平台说明**:此 Stack 仅支持 **x86-64amd64**,官方镜像不支持 ARM64。
>
> **资源说明**:仅 Elasticsearch 就需要约 2 GB RAM,启动前请确保系统至少有 **8 GB RAM**
## 服务
- **ragflow**RAGFlow Web 应用和 API 服务器(Nginx 监听 80 端口,API 监听 9380 端口)。
- **es01**:单节点 Elasticsearch 集群,用于向量和全文检索。
- **mysql**:MySQL 8 数据库,用于元数据和工作流状态存储。
- **redis**Redis,用于任务队列和缓存。
- **minio**:S3 兼容对象存储,用于文档和分块存储。
## 快速开始
1. 将 `.env.example` 复制为 `.env`
```bash
cp .env.example .env
```
2. 更新 `.env` 中的密钥:
```
SECRET_KEY、MYSQL_PASSWORD、REDIS_PASSWORD、MINIO_PASSWORD
```
3. 启动服务(首次启动可能需要 2~5 分钟):
```bash
docker compose up -d
```
4. 打开 `http://localhost`,注册第一个管理员账号。
## 核心环境变量
| 变量 | 说明 | 默认值 |
| ----------------------------- | ------------------------------------------ | ------------- |
| `RAGFLOW_VERSION` | RAGFlow 镜像版本 | `v0.24.0` |
| `RAGFLOW_PORT_OVERRIDE` | Web UI 宿主机端口 | `80` |
| `SECRET_KEY` | 应用密钥——**请修改** | 占位符 |
| `MYSQL_PASSWORD` | MySQL root 密码(也供 RAGFlow 使用) | `ragflow` |
| `REDIS_PASSWORD` | Redis 认证密码 | `redispassword` |
| `MINIO_USER` | MinIO root 用户名 | `minioadmin` |
| `MINIO_PASSWORD` | MinIO root 密码 | `minioadmin` |
| `MINIO_CONSOLE_PORT_OVERRIDE` | MinIO Web 控制台宿主机端口 | `9001` |
## 数据卷
- `ragflow_logs`RAGFlow 应用日志。
- `ragflow_es_data`Elasticsearch 索引数据。
- `ragflow_mysql_data`MySQL 数据库文件。
- `ragflow_redis_data`Redis 持久化数据。
- `ragflow_minio_data`:文档和嵌入向量的对象存储。
## 端口
- **80**RAGFlow Web UI 和 API(通过 Nginx
- **9001**MinIO Web 控制台
## 资源需求
| 服务 | CPU 限制 | 内存限制 |
| ------------- | -------- | -------- |
| ragflow | 4 | 4 GB |
| elasticsearch | 2 | 2 GB |
| mysql | 1 | 1 GB |
| redis | 0.5 | 512 MB |
| minio | 1 | 1 GB |
推荐总计:**8+ GB RAM****4+ CPU 核心**。
## 文档
- [RAGFlow 文档](https://ragflow.io/docs)
- [GitHub](https://github.com/infiniflow/ragflow)
+157
View File
@@ -0,0 +1,157 @@
# RAGFlow requires substantial system resources.
# Elasticsearch alone needs ~2 GB RAM. Total recommended: 8+ GB RAM.
# This stack is x86-64 (amd64) only; ARM64 is not supported.
x-defaults: &defaults
restart: unless-stopped
logging:
driver: json-file
options:
max-size: 100m
max-file: '3'
services:
ragflow:
<<: *defaults
image: ${GLOBAL_REGISTRY:-}infiniflow/ragflow:${RAGFLOW_VERSION:-v0.24.0}
depends_on:
mysql:
condition: service_healthy
redis:
condition: service_healthy
minio:
condition: service_healthy
es01:
condition: service_healthy
ports:
- '${RAGFLOW_PORT_OVERRIDE:-80}:80'
volumes:
- ragflow_logs:/ragflow/logs
environment:
- TZ=${TZ:-UTC}
- MYSQL_PASSWORD=${MYSQL_PASSWORD:-ragflow}
- MINIO_USER=${MINIO_USER:-minioadmin}
- MINIO_PASSWORD=${MINIO_PASSWORD:-minioadmin}
- REDIS_PASSWORD=${REDIS_PASSWORD:-redispassword}
- SECRET_KEY=${SECRET_KEY:-changeme_secret_key_CHANGEME}
healthcheck:
test: [CMD-SHELL, "curl -sf http://localhost/ > /dev/null 2>&1 || exit 1"]
interval: 30s
timeout: 15s
retries: 10
start_period: 120s
deploy:
resources:
limits:
cpus: ${RAGFLOW_CPU_LIMIT:-4}
memory: ${RAGFLOW_MEMORY_LIMIT:-4G}
reservations:
cpus: ${RAGFLOW_CPU_RESERVATION:-1}
memory: ${RAGFLOW_MEMORY_RESERVATION:-2G}
es01:
<<: *defaults
image: ${GLOBAL_REGISTRY:-}elasticsearch:${ELASTICSEARCH_VERSION:-8.11.3}
environment:
- TZ=${TZ:-UTC}
- discovery.type=single-node
- xpack.security.enabled=false
- ES_JAVA_OPTS=-Xms512m -Xmx1g
volumes:
- ragflow_es_data:/usr/share/elasticsearch/data
healthcheck:
test: [CMD-SHELL, "curl -sf http://localhost:9200/_cluster/health | grep -qE '\"status\":\"(green|yellow)\"' || exit 1"]
interval: 15s
timeout: 10s
retries: 10
start_period: 60s
deploy:
resources:
limits:
cpus: ${ELASTICSEARCH_CPU_LIMIT:-2}
memory: ${ELASTICSEARCH_MEMORY_LIMIT:-2G}
reservations:
cpus: ${ELASTICSEARCH_CPU_RESERVATION:-0.5}
memory: ${ELASTICSEARCH_MEMORY_RESERVATION:-1G}
mysql:
<<: *defaults
image: ${GLOBAL_REGISTRY:-}mysql:${MYSQL_VERSION:-8.0.39}
environment:
- TZ=${TZ:-UTC}
- MYSQL_ROOT_PASSWORD=${MYSQL_PASSWORD:-ragflow}
- MYSQL_DATABASE=rag_flow
volumes:
- ragflow_mysql_data:/var/lib/mysql
healthcheck:
test: [CMD, mysqladmin, ping, -h, localhost]
interval: 10s
timeout: 10s
retries: 5
start_period: 30s
deploy:
resources:
limits:
cpus: ${MYSQL_CPU_LIMIT:-1}
memory: ${MYSQL_MEMORY_LIMIT:-1G}
reservations:
cpus: ${MYSQL_CPU_RESERVATION:-0.25}
memory: ${MYSQL_MEMORY_RESERVATION:-256M}
redis:
<<: *defaults
image: ${GLOBAL_REGISTRY:-}redis:${REDIS_VERSION:-7}
command: >
--requirepass ${REDIS_PASSWORD:-redispassword}
--maxmemory-policy noeviction
environment:
- REDIS_PASSWORD=${REDIS_PASSWORD:-redispassword}
volumes:
- ragflow_redis_data:/data
healthcheck:
test: [CMD-SHELL, "redis-cli -a $$REDIS_PASSWORD ping | grep -q PONG"]
interval: 5s
timeout: 10s
retries: 10
deploy:
resources:
limits:
cpus: ${REDIS_CPU_LIMIT:-0.5}
memory: ${REDIS_MEMORY_LIMIT:-512M}
reservations:
cpus: ${REDIS_CPU_RESERVATION:-0.1}
memory: ${REDIS_MEMORY_RESERVATION:-128M}
minio:
<<: *defaults
image: ${GLOBAL_REGISTRY:-}minio/minio:${MINIO_VERSION:-RELEASE.2025-01-20T14-49-07Z}
command: server /data --console-address ':9001'
environment:
- TZ=${TZ:-UTC}
- MINIO_ROOT_USER=${MINIO_USER:-minioadmin}
- MINIO_ROOT_PASSWORD=${MINIO_PASSWORD:-minioadmin}
volumes:
- ragflow_minio_data:/data
ports:
- '${MINIO_CONSOLE_PORT_OVERRIDE:-9001}:9001'
healthcheck:
test: [CMD-SHELL, "curl -sf http://localhost:9000/minio/health/live || exit 1"]
interval: 10s
timeout: 5s
retries: 10
start_period: 10s
deploy:
resources:
limits:
cpus: ${MINIO_CPU_LIMIT:-1}
memory: ${MINIO_MEMORY_LIMIT:-1G}
reservations:
cpus: ${MINIO_CPU_RESERVATION:-0.25}
memory: ${MINIO_MEMORY_RESERVATION:-256M}
volumes:
ragflow_logs:
ragflow_es_data:
ragflow_mysql_data:
ragflow_redis_data:
ragflow_minio_data:
+48
View File
@@ -0,0 +1,48 @@
# Global Registry Prefix (optional)
# GLOBAL_REGISTRY=
# Service Versions
SKYVERN_VERSION=v1.0.31
POSTGRES_VERSION=15
# Timezone
TZ=UTC
# Host ports
SKYVERN_PORT_OVERRIDE=8000
SKYVERN_UI_PORT_OVERRIDE=8080
# Skyvern API Key (CHANGEME: set a strong random key for the REST API)
SKYVERN_API_KEY=changeme_skyvern_api_key_CHANGEME
# Browser type: chromium-headless (default), chromium, or chrome
BROWSER_TYPE=chromium-headless
# LLM Provider API Keys (at least one is required for task automation)
# OPENAI_API_KEY=sk-...
# ANTHROPIC_API_KEY=sk-ant-...
# PostgreSQL password
POSTGRES_PASSWORD=skyvern
# UI → API connection (must be the address reachable from the user's browser)
VITE_API_BASE_URL=http://localhost:8000
VITE_WSS_BASE_URL=ws://localhost:8000
# Resource Limits - Skyvern backend (includes Playwright + Chromium)
SKYVERN_CPU_LIMIT=2
SKYVERN_MEMORY_LIMIT=4G
SKYVERN_CPU_RESERVATION=0.5
SKYVERN_MEMORY_RESERVATION=1G
# Resource Limits - Skyvern UI
SKYVERN_UI_CPU_LIMIT=0.5
SKYVERN_UI_MEMORY_LIMIT=256M
SKYVERN_UI_CPU_RESERVATION=0.1
SKYVERN_UI_MEMORY_RESERVATION=64M
# Resource Limits - PostgreSQL
POSTGRES_CPU_LIMIT=1
POSTGRES_MEMORY_LIMIT=1G
POSTGRES_CPU_RESERVATION=0.25
POSTGRES_MEMORY_RESERVATION=256M
+84
View File
@@ -0,0 +1,84 @@
# Skyvern
[English](./README.md) | [中文](./README.zh.md)
Quick start: <https://docs.skyvern.com>.
This service deploys Skyvern, an AI-powered browser automation platform that uses LLMs and computer vision to execute tasks in web browsers. It can fill forms, navigate websites, and complete multi-step workflows without custom scripts.
## Services
- **skyvern**: The Skyvern API server with embedded Playwright + Chromium.
- **skyvern-ui**: React-based web UI for task management and browser session viewing.
- **postgres**: PostgreSQL database for task history and state.
## Quick Start
1. Copy `.env.example` to `.env`:
```bash
cp .env.example .env
```
2. Set your LLM API key and change the Skyvern API key in `.env`:
```
SKYVERN_API_KEY=your-strong-api-key
OPENAI_API_KEY=sk-...
```
3. Start the services:
```bash
docker compose up -d
```
4. Open `http://localhost:8080` for the web UI, or send tasks to the API at `http://localhost:8000`.
## Core Environment Variables
| Variable | Description | Default |
| ----------------------- | -------------------------------------------------------------------- | -------------------- |
| `SKYVERN_VERSION` | Image version (applies to both skyvern and skyvern-ui) | `v1.0.31` |
| `SKYVERN_PORT_OVERRIDE` | Host port for the API | `8000` |
| `SKYVERN_UI_PORT_OVERRIDE` | Host port for the web UI | `8080` |
| `SKYVERN_API_KEY` | API key for authenticating requests to the Skyvern server — **CHANGEME** | placeholder |
| `BROWSER_TYPE` | Browser type: `chromium-headless`, `chromium`, or `chrome` | `chromium-headless` |
| `OPENAI_API_KEY` | OpenAI API key (recommended for best results) | *(empty)* |
| `ANTHROPIC_API_KEY` | Anthropic API key (alternative to OpenAI) | *(empty)* |
| `POSTGRES_PASSWORD` | PostgreSQL password | `skyvern` |
| `VITE_API_BASE_URL` | Skyvern API URL as seen from the user's browser | `http://localhost:8000` |
| `VITE_WSS_BASE_URL` | WebSocket URL for live session streaming | `ws://localhost:8000` |
## Volumes
- `skyvern_artifacts`: Downloaded files and task artifacts.
- `skyvern_videos`: Browser session recordings.
- `skyvern_har`: HTTP Archive (HAR) files for debugging.
- `skyvern_postgres_data`: PostgreSQL data persistence.
## Ports
- **8000**: Skyvern REST API
- **8080**: Skyvern web UI
## Resource Requirements
| Service | CPU Limit | Memory Limit |
| ---------- | --------- | ------------ |
| skyvern | 2 | 4 GB |
| skyvern-ui | 0.5 | 256 MB |
| postgres | 1 | 1 GB |
The `skyvern` service includes Playwright and Chromium. Allocate **4+ GB RAM** and **2+ CPU cores** for reliable browser automation.
## Notes
- Database migrations run automatically on startup via Alembic.
- If deploying behind a reverse proxy, update `VITE_API_BASE_URL` and `VITE_WSS_BASE_URL` to your public domain.
- The `SKYVERN_API_KEY` must be included in API requests as the `x-api-key` header.
## Documentation
- [Skyvern Docs](https://docs.skyvern.com)
- [GitHub](https://github.com/Skyvern-AI/skyvern)
+84
View File
@@ -0,0 +1,84 @@
# Skyvern
[English](./README.md) | [中文](./README.zh.md)
快速开始:<https://docs.skyvern.com>。
此服务用于部署 Skyvern,一个由 AI 驱动的浏览器自动化平台,使用 LLM 和计算机视觉在 Web 浏览器中执行任务。无需编写自定义脚本,即可填写表单、导航网站和完成多步骤工作流。
## 服务
- **skyvern**:集成了 Playwright + Chromium 的 Skyvern API 服务器。
- **skyvern-ui**:用于任务管理和浏览器会话查看的 React Web UI。
- **postgres**PostgreSQL 数据库,用于存储任务历史和状态。
## 快速开始
1. 将 `.env.example` 复制为 `.env`
```bash
cp .env.example .env
```
2. 在 `.env` 中设置 LLM API Key 并更改 Skyvern API Key
```
SKYVERN_API_KEY=your-strong-api-key
OPENAI_API_KEY=sk-...
```
3. 启动服务:
```bash
docker compose up -d
```
4. 打开 `http://localhost:8080` 访问 Web UI,或通过 `http://localhost:8000` 向 API 发送任务。
## 核心环境变量
| 变量 | 说明 | 默认值 |
| -------------------------- | ------------------------------------------------------- | ------------------------ |
| `SKYVERN_VERSION` | 镜像版本(同时适用于 skyvern 和 skyvern-ui | `v1.0.31` |
| `SKYVERN_PORT_OVERRIDE` | API 宿主机端口 | `8000` |
| `SKYVERN_UI_PORT_OVERRIDE` | Web UI 宿主机端口 | `8080` |
| `SKYVERN_API_KEY` | 请求 Skyvern 服务器的认证 API Key——**请修改** | 占位符 |
| `BROWSER_TYPE` | 浏览器类型:`chromium-headless``chromium``chrome` | `chromium-headless` |
| `OPENAI_API_KEY` | OpenAI API Key(推荐,效果最佳) | *(空)* |
| `ANTHROPIC_API_KEY` | Anthropic API KeyOpenAI 的替代方案) | *(空)* |
| `POSTGRES_PASSWORD` | PostgreSQL 密码 | `skyvern` |
| `VITE_API_BASE_URL` | 从用户浏览器访问的 Skyvern API URL | `http://localhost:8000` |
| `VITE_WSS_BASE_URL` | 实时会话流的 WebSocket URL | `ws://localhost:8000` |
## 数据卷
- `skyvern_artifacts`:下载的文件和任务产物。
- `skyvern_videos`:浏览器会话录像。
- `skyvern_har`:用于调试的 HTTP 存档(HAR)文件。
- `skyvern_postgres_data`PostgreSQL 数据持久化。
## 端口
- **8000**Skyvern REST API
- **8080**Skyvern Web UI
## 资源需求
| 服务 | CPU 限制 | 内存限制 |
| ---------- | -------- | -------- |
| skyvern | 2 | 4 GB |
| skyvern-ui | 0.5 | 256 MB |
| postgres | 1 | 1 GB |
`skyvern` 服务包含 Playwright 和 Chromium,需分配 **4+ GB RAM** 和 **2+ CPU 核心**以保证浏览器自动化的稳定运行。
## 说明
- 数据库迁移通过 Alembic 在启动时自动运行。
- 如果部署在反向代理后,请将 `VITE_API_BASE_URL``VITE_WSS_BASE_URL` 更新为你的公网域名。
- API 请求中必须在 `x-api-key` 请求头中包含 `SKYVERN_API_KEY`
## 文档
- [Skyvern 文档](https://docs.skyvern.com)
- [GitHub](https://github.com/Skyvern-AI/skyvern)
+110
View File
@@ -0,0 +1,110 @@
# Change SKYVERN_API_KEY before exposing this stack externally.
# Fields marked with CHANGEME must be updated for any non-local deployment.
x-defaults: &defaults
restart: unless-stopped
logging:
driver: json-file
options:
max-size: 100m
max-file: '3'
services:
skyvern:
<<: *defaults
image: ${GLOBAL_REGISTRY:-}skyvern/skyvern:${SKYVERN_VERSION:-v1.0.31}
depends_on:
postgres:
condition: service_healthy
ports:
- '${SKYVERN_PORT_OVERRIDE:-8000}:8000'
volumes:
- skyvern_artifacts:/data/artifacts
- skyvern_videos:/data/videos
- skyvern_har:/data/har
environment:
- TZ=${TZ:-UTC}
- DATABASE_STRING=postgresql+psycopg2://skyvern:${POSTGRES_PASSWORD:-skyvern}@postgres:5432/skyvern
- SKYVERN_API_KEY=${SKYVERN_API_KEY:-changeme_skyvern_api_key_CHANGEME}
- BROWSER_TYPE=${BROWSER_TYPE:-chromium-headless}
- VIDEO_PATH=/data/videos
- HAR_PATH=/data/har
- ARTIFACT_STORAGE_PATH=/data/artifacts
- OPENAI_API_KEY=${OPENAI_API_KEY:-}
- ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY:-}
healthcheck:
test:
- CMD
- python3
- -c
- "import urllib.request; urllib.request.urlopen('http://localhost:8000/api/v1/heartbeat')"
interval: 30s
timeout: 10s
retries: 5
start_period: 30s
deploy:
resources:
limits:
cpus: ${SKYVERN_CPU_LIMIT:-2}
memory: ${SKYVERN_MEMORY_LIMIT:-4G}
reservations:
cpus: ${SKYVERN_CPU_RESERVATION:-0.5}
memory: ${SKYVERN_MEMORY_RESERVATION:-1G}
skyvern-ui:
<<: *defaults
image: ${GLOBAL_REGISTRY:-}skyvern/skyvern-ui:${SKYVERN_VERSION:-v1.0.31}
depends_on:
skyvern:
condition: service_healthy
ports:
- '${SKYVERN_UI_PORT_OVERRIDE:-8080}:8080'
environment:
- TZ=${TZ:-UTC}
- VITE_API_BASE_URL=${VITE_API_BASE_URL:-http://localhost:8000}
- VITE_WSS_BASE_URL=${VITE_WSS_BASE_URL:-ws://localhost:8000}
healthcheck:
test: [CMD-SHELL, "curl -sf http://localhost:8080/ > /dev/null 2>&1 || exit 1"]
interval: 30s
timeout: 10s
retries: 3
start_period: 15s
deploy:
resources:
limits:
cpus: ${SKYVERN_UI_CPU_LIMIT:-0.5}
memory: ${SKYVERN_UI_MEMORY_LIMIT:-256M}
reservations:
cpus: ${SKYVERN_UI_CPU_RESERVATION:-0.1}
memory: ${SKYVERN_UI_MEMORY_RESERVATION:-64M}
postgres:
<<: *defaults
image: ${GLOBAL_REGISTRY:-}postgres:${POSTGRES_VERSION:-15}
environment:
- POSTGRES_USER=skyvern
- POSTGRES_PASSWORD=${POSTGRES_PASSWORD:-skyvern}
- POSTGRES_DB=skyvern
- TZ=UTC
- PGTZ=UTC
volumes:
- skyvern_postgres_data:/var/lib/postgresql/data
healthcheck:
test: [CMD-SHELL, pg_isready -U skyvern]
interval: 5s
timeout: 5s
retries: 10
deploy:
resources:
limits:
cpus: ${POSTGRES_CPU_LIMIT:-1}
memory: ${POSTGRES_MEMORY_LIMIT:-1G}
reservations:
cpus: ${POSTGRES_CPU_RESERVATION:-0.25}
memory: ${POSTGRES_MEMORY_RESERVATION:-256M}
volumes:
skyvern_artifacts:
skyvern_videos:
skyvern_har:
skyvern_postgres_data:
+22
View File
@@ -0,0 +1,22 @@
# Global Registry Prefix (optional)
# GLOBAL_REGISTRY=
# AnythingLLM Image Version
# No stable semantic version tags exist; 'latest' tracks the current release.
ANYTHINGLLM_VERSION=latest
# Timezone
TZ=UTC
# Host port for the AnythingLLM web UI
ANYTHINGLLM_PORT_OVERRIDE=3001
# UID/GID for file ownership inside the container
ANYTHINGLLM_UID=1000
ANYTHINGLLM_GID=1000
# Resource Limits
ANYTHINGLLM_CPU_LIMIT=2
ANYTHINGLLM_MEMORY_LIMIT=2G
ANYTHINGLLM_CPU_RESERVATION=0.5
ANYTHINGLLM_MEMORY_RESERVATION=512M
+49
View File
@@ -0,0 +1,49 @@
# AnythingLLM
[English](./README.md) | [中文](./README.zh.md)
Quick start: <https://docs.anythingllm.com>.
This service deploys AnythingLLM, an all-in-one AI application that lets you chat with documents, use multiple LLM providers, and build custom AI agents — with a full RAG pipeline built in.
## Services
- `anythingllm`: The AnythingLLM web application.
## Quick Start
```bash
docker compose up -d
```
Open `http://localhost:3001` and complete the setup wizard to connect your LLM provider.
## Configuration
All LLM providers, vector databases, and agent settings are configured through the web UI after startup. No API keys are required in `.env` unless you want to pre-seed them via environment variables.
| Variable | Description | Default |
| ----------------------------- | ----------------------------------------------- | -------- |
| `ANYTHINGLLM_VERSION` | Image version (`latest` — no stable tags exist) | `latest` |
| `TZ` | Container timezone | `UTC` |
| `ANYTHINGLLM_PORT_OVERRIDE` | Host port for the web UI | `3001` |
| `ANYTHINGLLM_UID` | UID for volume file ownership | `1000` |
| `ANYTHINGLLM_GID` | GID for volume file ownership | `1000` |
| `ANYTHINGLLM_CPU_LIMIT` | CPU limit | `2` |
| `ANYTHINGLLM_MEMORY_LIMIT` | Memory limit | `2G` |
| `ANYTHINGLLM_CPU_RESERVATION` | CPU reservation | `0.5` |
| `ANYTHINGLLM_MEMORY_LIMIT` | Memory reservation | `512M` |
## Volumes
- `anythingllm_storage`: Persists all application data, uploaded documents, embeddings, and settings.
## Ports
- **3001**: Web UI
## Notes
- The `mintplexlabs/anythingllm` image does not publish stable semantic version tags; `latest` is the only reliable tag.
- Supports OpenAI, Anthropic, Ollama, LM Studio, and many other LLM backends — all configured from the UI.
- The health check uses the `/api/ping` endpoint.
+49
View File
@@ -0,0 +1,49 @@
# AnythingLLM
[English](./README.md) | [中文](./README.zh.md)
快速开始:<https://docs.anythingllm.com>。
此服务用于部署 AnythingLLM,一款集文档问答、多 LLM 提供商接入和自定义 AI Agent 于一体的全能 AI 应用,内置完整的 RAG 流水线。
## 服务
- `anythingllm`AnythingLLM Web 应用。
## 快速开始
```bash
docker compose up -d
```
打开 `http://localhost:3001`,按照设置向导连接你的 LLM 提供商。
## 配置
所有 LLM 提供商、向量数据库和 Agent 设置均通过启动后的 Web UI 进行配置,无需在 `.env` 中预设 API Key(除非你希望通过环境变量预填充)。
| 变量 | 说明 | 默认值 |
| ----------------------------- | ----------------------------------- | -------- |
| `ANYTHINGLLM_VERSION` | 镜像版本(无语义化稳定标签,使用 `latest` | `latest` |
| `TZ` | 容器时区 | `UTC` |
| `ANYTHINGLLM_PORT_OVERRIDE` | Web UI 的宿主机端口 | `3001` |
| `ANYTHINGLLM_UID` | 数据卷文件所有者 UID | `1000` |
| `ANYTHINGLLM_GID` | 数据卷文件所有者 GID | `1000` |
| `ANYTHINGLLM_CPU_LIMIT` | CPU 限制 | `2` |
| `ANYTHINGLLM_MEMORY_LIMIT` | 内存限制 | `2G` |
| `ANYTHINGLLM_CPU_RESERVATION` | CPU 预留 | `0.5` |
| `ANYTHINGLLM_MEMORY_LIMIT` | 内存预留 | `512M` |
## 数据卷
- `anythingllm_storage`:持久化所有应用数据、上传的文档、嵌入向量和配置。
## 端口
- **3001**Web UI
## 说明
- `mintplexlabs/anythingllm` 镜像未发布语义化稳定标签,`latest` 是唯一可靠的标签。
- 支持 OpenAI、Anthropic、Ollama、LM Studio 等众多 LLM 后端,均可在 UI 中配置。
- 健康检查使用 `/api/ping` 端点。
+42
View File
@@ -0,0 +1,42 @@
x-defaults: &defaults
restart: unless-stopped
logging:
driver: json-file
options:
max-size: 100m
max-file: '3'
services:
anythingllm:
<<: *defaults
image: ${GLOBAL_REGISTRY:-}mintplexlabs/anythingllm:${ANYTHINGLLM_VERSION:-latest}
ports:
- '${ANYTHINGLLM_PORT_OVERRIDE:-3001}:3001'
volumes:
- anythingllm_storage:/app/server/storage
environment:
- TZ=${TZ:-UTC}
- STORAGE_DIR=/app/server/storage
- UID=${ANYTHINGLLM_UID:-1000}
- GID=${ANYTHINGLLM_GID:-1000}
healthcheck:
test:
- CMD
- node
- -e
- "require('http').get('http://localhost:3001/api/ping',res=>process.exit(res.statusCode===200?0:1)).on('error',()=>process.exit(1))"
interval: 30s
timeout: 10s
retries: 5
start_period: 30s
deploy:
resources:
limits:
cpus: ${ANYTHINGLLM_CPU_LIMIT:-2}
memory: ${ANYTHINGLLM_MEMORY_LIMIT:-2G}
reservations:
cpus: ${ANYTHINGLLM_CPU_RESERVATION:-0.5}
memory: ${ANYTHINGLLM_MEMORY_RESERVATION:-512M}
volumes:
anythingllm_storage:
+30
View File
@@ -0,0 +1,30 @@
# EasyTier image version
EASYTIER_VERSION=v2.6.0
# Timezone
TZ=UTC
# Virtual network name (shared with all peers in the same network)
EASYTIER_NETWORK_NAME=easytier
# Virtual network secret — REQUIRED, change before deploying
# Generate a strong secret: openssl rand -hex 16
EASYTIER_NETWORK_SECRET=
# Virtual IPv4 address of this server node within the EasyTier network
EASYTIER_IPV4=10.144.144.1
# Host port for peer TCP connections
EASYTIER_TCP_PORT_OVERRIDE=11010
# Host port for peer UDP connections
EASYTIER_UDP_PORT_OVERRIDE=11010
# Host port for the management RPC portal (bound to 127.0.0.1 by default)
EASYTIER_RPC_PORT_OVERRIDE=15888
# Resource limits
EASYTIER_CPU_LIMIT=0.50
EASYTIER_MEMORY_LIMIT=128M
EASYTIER_CPU_RESERVATION=0.10
EASYTIER_MEMORY_RESERVATION=32M
+88
View File
@@ -0,0 +1,88 @@
# EasyTier
[English](./README.md) | [中文](./README.zh.md)
[EasyTier](https://github.com/EasyTier/EasyTier) is a mesh VPN networking tool that lets you build a private, encrypted overlay network across hosts that are behind NAT or firewalls. This stack deploys EasyTier as a **public relay server** — a stable entry point that peers can use for discovery and traffic relay when direct connections are not possible.
## Services
- `easytier`: EasyTier core node running in relay-only mode (`--no-tun`), without creating a local TUN interface.
## Ports
| Port | Protocol | Description |
| ---------- | -------- | ------------------------------------------------------------------ |
| `11010` | TCP | Peer connection listener — must be publicly reachable |
| `11010` | UDP | Peer connection listener — must be publicly reachable |
| `15888` | TCP | Management RPC portal (bound to `127.0.0.1` by default) |
## Environment Variables
| Variable | Description | Default |
| --------------------------- | --------------------------------------------------- | ---------------- |
| `EASYTIER_VERSION` | EasyTier image version | `v2.6.0` |
| `TZ` | Timezone | `UTC` |
| `EASYTIER_NETWORK_NAME` | Virtual network name shared by all peers | `easytier` |
| `EASYTIER_NETWORK_SECRET` | Network secret (password); **required** | `""` |
| `EASYTIER_IPV4` | Virtual IPv4 of this server node | `10.144.144.1` |
| `EASYTIER_TCP_PORT_OVERRIDE`| Host port for peer TCP listener | `11010` |
| `EASYTIER_UDP_PORT_OVERRIDE`| Host port for peer UDP listener | `11010` |
| `EASYTIER_RPC_PORT_OVERRIDE`| Host port for management RPC (localhost only) | `15888` |
| `EASYTIER_CPU_LIMIT` | CPU limit | `0.50` |
| `EASYTIER_MEMORY_LIMIT` | Memory limit | `128M` |
## Quick Start
1. Copy `.env.example` and set a strong network secret:
```bash
cp .env.example .env
```
Edit `.env`:
```env
EASYTIER_NETWORK_NAME=myvpn
EASYTIER_NETWORK_SECRET=<your-strong-secret>
```
Generate a secret with: `openssl rand -hex 16`
2. Start the server:
```bash
docker compose up -d
```
3. Verify the node is healthy:
```bash
docker compose exec easytier easytier-cli -p 127.0.0.1:15888 node info
```
4. On each peer machine, connect to this server:
```bash
easytier-core \
--network-name myvpn \
--network-secret <your-strong-secret> \
--peers tcp://<server-public-ip>:11010 \
--ipv4 10.144.144.2
```
## Storage
This stack does not use persistent volumes. Configuration is provided entirely via command-line flags derived from environment variables.
## Security Notes
- **`EASYTIER_NETWORK_SECRET` is required.** An empty secret leaves the network open to any peer that knows the network name. Always set a strong random value before exposing this server to the internet.
- The management RPC port (`15888`) is bound to `127.0.0.1` by default. Do not expose it publicly unless you have separate authentication in place.
- Ports `11010/tcp` and `11010/udp` must be open in your firewall / cloud security group for peers to reach this server.
- This stack runs in `--no-tun` relay mode. No kernel TUN device is created, so no elevated capabilities (`NET_ADMIN`) are required and `cap_drop: ALL` is applied.
- If you need this server node to also participate as a VPN peer (with a local virtual interface), remove `--no-tun` from `command` and add `cap_add: [NET_ADMIN]` to the service.
## Documentation
- [EasyTier GitHub](https://github.com/EasyTier/EasyTier)
- [EasyTier Documentation](https://www.easytier.top/guide/introduction.html)
+88
View File
@@ -0,0 +1,88 @@
# EasyTier
[English](./README.md) | [中文](./README.zh.md)
[EasyTier](https://github.com/EasyTier/EasyTier) 是一款网状 VPN 组网工具,可在 NAT 或防火墙后面的主机之间构建私有加密覆盖网络。本配置将 EasyTier 部署为**公共中继服务器**——作为稳定的入口节点,供各客户端节点在无法直连时进行发现和流量中转。
## 服务
- `easytier`:以中继模式(`--no-tun`)运行的 EasyTier 核心节点,不创建本地 TUN 网络接口。
## 端口
| 端口 | 协议 | 说明 |
| ------- | ---- | ------------------------------------------------- |
| `11010` | TCP | 节点连接监听端口——需公网可达 |
| `11010` | UDP | 节点连接监听端口——需公网可达 |
| `15888` | TCP | 管理 RPC 端口(默认仅绑定 `127.0.0.1` |
## 环境变量
| 变量名 | 描述 | 默认值 |
| ---------------------------- | --------------------------------- | -------------- |
| `EASYTIER_VERSION` | EasyTier 镜像版本 | `v2.6.0` |
| `TZ` | 时区 | `UTC` |
| `EASYTIER_NETWORK_NAME` | 所有节点共享的虚拟网络名称 | `easytier` |
| `EASYTIER_NETWORK_SECRET` | 网络密钥(密码),**必须设置** | `""` |
| `EASYTIER_IPV4` | 本服务器节点在虚拟网络中的 IPv4 | `10.144.144.1` |
| `EASYTIER_TCP_PORT_OVERRIDE` | 节点 TCP 监听端口(宿主机映射) | `11010` |
| `EASYTIER_UDP_PORT_OVERRIDE` | 节点 UDP 监听端口(宿主机映射) | `11010` |
| `EASYTIER_RPC_PORT_OVERRIDE` | 管理 RPC 端口(仅本机可访问) | `15888` |
| `EASYTIER_CPU_LIMIT` | CPU 上限 | `0.50` |
| `EASYTIER_MEMORY_LIMIT` | 内存上限 | `128M` |
## 快速开始
1. 复制 `.env.example` 并设置强网络密钥:
```bash
cp .env.example .env
```
编辑 `.env`
```env
EASYTIER_NETWORK_NAME=myvpn
EASYTIER_NETWORK_SECRET=<你的强密钥>
```
生成随机密钥:`openssl rand -hex 16`
2. 启动服务:
```bash
docker compose up -d
```
3. 验证节点状态:
```bash
docker compose exec easytier easytier-cli -p 127.0.0.1:15888 node info
```
4. 在各客户端机器上连接到此服务器:
```bash
easytier-core \
--network-name myvpn \
--network-secret <你的强密钥> \
--peers tcp://<服务器公网 IP>:11010 \
--ipv4 10.144.144.2
```
## 数据卷
本配置不使用持久化卷,所有配置均通过环境变量转换为命令行参数传入。
## 安全说明
- **`EASYTIER_NETWORK_SECRET` 为必填项。** 若密钥为空,任何知道网络名称的节点均可接入,请务必在公网暴露前设置强密钥。
- 管理 RPC 端口(`15888`)默认仅绑定 `127.0.0.1`,请勿在无额外认证保护的情况下对外暴露。
- 防火墙及云安全组需放行 `11010/tcp``11010/udp`,客户端节点才能连接到本服务器。
- 本配置以 `--no-tun` 中继模式运行,无需创建 TUN 设备,因此无需提升内核权限(`NET_ADMIN`),已应用 `cap_drop: ALL`
- 如需服务器节点同时作为 VPN 网络中的普通成员(拥有本地虚拟网卡),请移除 `command` 中的 `--no-tun` 参数,并在服务中添加 `cap_add: [NET_ADMIN]`
## 文档
- [EasyTier GitHub](https://github.com/EasyTier/EasyTier)
- [EasyTier 官方文档](https://www.easytier.top/guide/introduction.html)
+46
View File
@@ -0,0 +1,46 @@
x-defaults: &defaults
restart: unless-stopped
logging:
driver: json-file
options:
max-size: 100m
max-file: '3'
services:
easytier:
<<: *defaults
image: ${GLOBAL_REGISTRY:-}easytier/easytier:${EASYTIER_VERSION:-v2.6.0}
command:
- --network-name=${EASYTIER_NETWORK_NAME:-easytier}
- --network-secret=${EASYTIER_NETWORK_SECRET:-}
- --ipv4=${EASYTIER_IPV4:-10.144.144.1}
- --listeners
- tcp://0.0.0.0:11010
- udp://0.0.0.0:11010
- --rpc-portal=0.0.0.0:15888
- --no-tun
ports:
# Peer listener ports — must be reachable from the public internet
- '${EASYTIER_TCP_PORT_OVERRIDE:-11010}:11010/tcp'
- '${EASYTIER_UDP_PORT_OVERRIDE:-11010}:11010/udp'
# Management RPC — bind to localhost only by default for security
- '127.0.0.1:${EASYTIER_RPC_PORT_OVERRIDE:-15888}:15888'
environment:
- TZ=${TZ:-UTC}
# No TUN interface in server-relay mode; no special capabilities required
cap_drop:
- ALL
deploy:
resources:
limits:
cpus: ${EASYTIER_CPU_LIMIT:-0.50}
memory: ${EASYTIER_MEMORY_LIMIT:-128M}
reservations:
cpus: ${EASYTIER_CPU_RESERVATION:-0.10}
memory: ${EASYTIER_MEMORY_RESERVATION:-32M}
healthcheck:
test: ['CMD', 'easytier-cli', '-p', '127.0.0.1:15888', 'node', 'info']
interval: 30s
timeout: 10s
retries: 3
start_period: 15s
+23
View File
@@ -0,0 +1,23 @@
# Global Registry Prefix (optional)
# GLOBAL_REGISTRY=
# Letta Image Version
LETTA_VERSION=0.16.7
# Timezone
TZ=UTC
# Host port for the Letta REST API server
LETTA_PORT_OVERRIDE=8283
# LLM Provider API Keys (optional; at least one is required for agent functionality)
# OPENAI_API_KEY=sk-...
# ANTHROPIC_API_KEY=sk-ant-...
# GROQ_API_KEY=gsk_...
# OLLAMA_BASE_URL=http://host.docker.internal:11434
# Resource Limits
LETTA_CPU_LIMIT=1
LETTA_MEMORY_LIMIT=1G
LETTA_CPU_RESERVATION=0.25
LETTA_MEMORY_RESERVATION=256M
+49
View File
@@ -0,0 +1,49 @@
# Letta
[English](./README.md) | [中文](./README.zh.md)
Quick start: <https://docs.letta.com>.
This service deploys Letta (formerly MemGPT), a framework for building stateful AI agents with long-term memory, persistent state, and tool use. Letta exposes a REST API for creating and managing agents programmatically.
## Services
- `letta`: The Letta agent server.
## Quick Start
```bash
docker compose up -d
```
The Letta REST API will be available at `http://localhost:8283`. You can interact with it via the [Letta Python SDK](https://github.com/letta-ai/letta) or the [ADE web interface](https://app.letta.com).
To connect a local LLM (Ollama), set `OLLAMA_BASE_URL` in your `.env` file before starting.
## Configuration
| Variable | Description | Default |
| ---------------------- | -------------------------------------------------------- | -------- |
| `LETTA_VERSION` | Image version | `0.16.7` |
| `TZ` | Container timezone | `UTC` |
| `LETTA_PORT_OVERRIDE` | Host port for the REST API | `8283` |
| `OPENAI_API_KEY` | OpenAI API key (optional) | *(empty)*|
| `ANTHROPIC_API_KEY` | Anthropic API key (optional) | *(empty)*|
| `GROQ_API_KEY` | Groq API key (optional) | *(empty)*|
| `OLLAMA_BASE_URL` | Ollama base URL, e.g. `http://host.docker.internal:11434`| *(empty)*|
| `LETTA_CPU_LIMIT` | CPU limit | `1` |
| `LETTA_MEMORY_LIMIT` | Memory limit | `1G` |
| `LETTA_CPU_RESERVATION`| CPU reservation | `0.25` |
## Volumes
- `letta_data`: Persists agent state, memory, and configuration at `/root/.letta`.
## Ports
- **8283**: REST API
## Notes
- At least one LLM provider API key (or `OLLAMA_BASE_URL`) is required to create functioning agents.
- The health check uses the `/health` endpoint.
+49
View File
@@ -0,0 +1,49 @@
# Letta
[English](./README.md) | [中文](./README.zh.md)
快速开始:<https://docs.letta.com>。
此服务用于部署 Letta(前身为 MemGPT),一个用于构建具备长期记忆、持久状态和工具调用能力的有状态 AI Agent 框架。Letta 提供 REST API,支持以编程方式创建和管理 Agent。
## 服务
- `letta`Letta Agent 服务器。
## 快速开始
```bash
docker compose up -d
```
Letta REST API 将在 `http://localhost:8283` 可用。你可以通过 [Letta Python SDK](https://github.com/letta-ai/letta) 或 [ADE Web 界面](https://app.letta.com) 与其交互。
如需连接本地 LLM(Ollama),请在启动前在 `.env` 文件中设置 `OLLAMA_BASE_URL`
## 配置
| 变量 | 说明 | 默认值 |
| ---------------------- | ----------------------------------------------------------- | -------- |
| `LETTA_VERSION` | 镜像版本 | `0.16.7` |
| `TZ` | 容器时区 | `UTC` |
| `LETTA_PORT_OVERRIDE` | REST API 的宿主机端口 | `8283` |
| `OPENAI_API_KEY` | OpenAI API Key(可选) | *(空)* |
| `ANTHROPIC_API_KEY` | Anthropic API Key(可选) | *(空)* |
| `GROQ_API_KEY` | Groq API Key(可选) | *(空)* |
| `OLLAMA_BASE_URL` | Ollama 基础 URL,例如 `http://host.docker.internal:11434` | *(空)* |
| `LETTA_CPU_LIMIT` | CPU 限制 | `1` |
| `LETTA_MEMORY_LIMIT` | 内存限制 | `1G` |
| `LETTA_CPU_RESERVATION`| CPU 预留 | `0.25` |
## 数据卷
- `letta_data`:在 `/root/.letta` 持久化 Agent 状态、记忆和配置。
## 端口
- **8283**REST API
## 说明
- 创建可用的 Agent 至少需要一个 LLM 提供商的 API Key(或 `OLLAMA_BASE_URL`)。
- 健康检查使用 `/health` 端点。
+43
View File
@@ -0,0 +1,43 @@
x-defaults: &defaults
restart: unless-stopped
logging:
driver: json-file
options:
max-size: 100m
max-file: '3'
services:
letta:
<<: *defaults
image: ${GLOBAL_REGISTRY:-}letta/letta:${LETTA_VERSION:-0.16.7}
ports:
- '${LETTA_PORT_OVERRIDE:-8283}:8283'
volumes:
- letta_data:/root/.letta
environment:
- TZ=${TZ:-UTC}
- OPENAI_API_KEY=${OPENAI_API_KEY:-}
- ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY:-}
- GROQ_API_KEY=${GROQ_API_KEY:-}
- OLLAMA_BASE_URL=${OLLAMA_BASE_URL:-}
healthcheck:
test:
- CMD
- python3
- -c
- "import urllib.request; urllib.request.urlopen('http://localhost:8283/health')"
interval: 30s
timeout: 10s
retries: 5
start_period: 20s
deploy:
resources:
limits:
cpus: ${LETTA_CPU_LIMIT:-1}
memory: ${LETTA_MEMORY_LIMIT:-1G}
reservations:
cpus: ${LETTA_CPU_RESERVATION:-0.25}
memory: ${LETTA_MEMORY_RESERVATION:-256M}
volumes:
letta_data:
+26
View File
@@ -0,0 +1,26 @@
# Global Registry Prefix (optional)
# GLOBAL_REGISTRY=
# LobeChat Image Version
LOBE_CHAT_VERSION=1.143.3
# Timezone
TZ=UTC
# Host port for the LobeChat web UI
LOBE_CHAT_PORT_OVERRIDE=3210
# Optional access code to restrict access (leave empty to allow anonymous access)
# ACCESS_CODE=your-secret-code
# LLM Provider API Keys (at least one is required for chat to work)
# OPENAI_API_KEY=sk-...
# OPENAI_PROXY_URL=https://your-proxy/v1
# ANTHROPIC_API_KEY=sk-ant-...
# GOOGLE_API_KEY=AIza...
# Resource Limits
LOBE_CHAT_CPU_LIMIT=0.5
LOBE_CHAT_MEMORY_LIMIT=512M
LOBE_CHAT_CPU_RESERVATION=0.1
LOBE_CHAT_MEMORY_RESERVATION=128M
+45
View File
@@ -0,0 +1,45 @@
# LobeChat
[English](./README.md) | [中文](./README.zh.md)
Quick start: <https://lobehub.com/docs>.
This service deploys LobeChat in standalone (serverless) mode — a modern, high-performance AI chat interface that supports multiple LLM providers, vision models, and plugin extensibility. No database is required; all state is stored client-side.
## Services
- `lobe-chat`: The LobeChat web application.
## Quick Start
```bash
docker compose up -d
```
Open `http://localhost:3210`. Configure your LLM API keys in the settings panel (gear icon), or set them as environment variables before starting.
## Configuration
| Variable | Description | Default |
| ---------------------- | -------------------------------------------------------- | ---------- |
| `LOBE_CHAT_VERSION` | Image version | `1.143.3` |
| `TZ` | Container timezone | `UTC` |
| `LOBE_CHAT_PORT_OVERRIDE` | Host port for the web UI | `3210` |
| `ACCESS_CODE` | Optional password to restrict access (empty = open) | *(empty)* |
| `OPENAI_API_KEY` | OpenAI API key | *(empty)* |
| `OPENAI_PROXY_URL` | Custom OpenAI-compatible API base URL | *(empty)* |
| `ANTHROPIC_API_KEY` | Anthropic API key | *(empty)* |
| `GOOGLE_API_KEY` | Google Gemini API key | *(empty)* |
| `LOBE_CHAT_CPU_LIMIT` | CPU limit | `0.5` |
| `LOBE_CHAT_MEMORY_LIMIT` | Memory limit | `512M` |
## Ports
- **3210**: Web UI
## Notes
- This is the **standalone** (client-side) mode. No PostgreSQL, S3, or auth server is needed.
- Conversation history is stored in the browser; clearing browser data loses history.
- For multi-user deployments with persistent server-side data, see the [LobeChat database mode docs](https://lobehub.com/docs/self-hosting/server-database).
- The health check uses the `/api/health` endpoint.
+45
View File
@@ -0,0 +1,45 @@
# LobeChat
[English](./README.md) | [中文](./README.zh.md)
快速开始:<https://lobehub.com/docs>。
此服务以独立(无服务器)模式部署 LobeChat,这是一款现代高性能的 AI 对话界面,支持多 LLM 提供商、视觉模型和插件扩展。无需数据库,所有状态均存储在客户端。
## 服务
- `lobe-chat`LobeChat Web 应用。
## 快速开始
```bash
docker compose up -d
```
打开 `http://localhost:3210`。在设置面板(齿轮图标)中配置 LLM API Key,或在启动前通过环境变量设置。
## 配置
| 变量 | 说明 | 默认值 |
| ------------------------- | ------------------------------------------ | --------- |
| `LOBE_CHAT_VERSION` | 镜像版本 | `1.143.3` |
| `TZ` | 容器时区 | `UTC` |
| `LOBE_CHAT_PORT_OVERRIDE` | Web UI 的宿主机端口 | `3210` |
| `ACCESS_CODE` | 可选访问密码(空则开放访问) | *(空)* |
| `OPENAI_API_KEY` | OpenAI API Key | *(空)* |
| `OPENAI_PROXY_URL` | 自定义 OpenAI 兼容 API 基础 URL | *(空)* |
| `ANTHROPIC_API_KEY` | Anthropic API Key | *(空)* |
| `GOOGLE_API_KEY` | Google Gemini API Key | *(空)* |
| `LOBE_CHAT_CPU_LIMIT` | CPU 限制 | `0.5` |
| `LOBE_CHAT_MEMORY_LIMIT` | 内存限制 | `512M` |
## 端口
- **3210**Web UI
## 说明
- 此为**独立**(客户端)模式,无需 PostgreSQL、S3 或认证服务器。
- 对话历史存储在浏览器中,清除浏览器数据将丢失历史记录。
- 如需多用户部署及服务端持久化数据,请参阅 [LobeChat 数据库模式文档](https://lobehub.com/docs/self-hosting/server-database)。
- 健康检查使用 `/api/health` 端点。
+39
View File
@@ -0,0 +1,39 @@
x-defaults: &defaults
restart: unless-stopped
logging:
driver: json-file
options:
max-size: 100m
max-file: '3'
services:
lobe-chat:
<<: *defaults
image: ${GLOBAL_REGISTRY:-}lobehub/lobe-chat:${LOBE_CHAT_VERSION:-1.143.3}
ports:
- '${LOBE_CHAT_PORT_OVERRIDE:-3210}:3210'
environment:
- TZ=${TZ:-UTC}
- ACCESS_CODE=${ACCESS_CODE:-}
- OPENAI_API_KEY=${OPENAI_API_KEY:-}
- OPENAI_PROXY_URL=${OPENAI_PROXY_URL:-}
- ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY:-}
- GOOGLE_API_KEY=${GOOGLE_API_KEY:-}
healthcheck:
test:
- CMD
- node
- -e
- "require('http').get('http://localhost:3210/api/health',res=>process.exit(res.statusCode===200?0:1)).on('error',()=>process.exit(1))"
interval: 30s
timeout: 10s
retries: 3
start_period: 15s
deploy:
resources:
limits:
cpus: ${LOBE_CHAT_CPU_LIMIT:-0.5}
memory: ${LOBE_CHAT_MEMORY_LIMIT:-512M}
reservations:
cpus: ${LOBE_CHAT_CPU_RESERVATION:-0.1}
memory: ${LOBE_CHAT_MEMORY_RESERVATION:-128M}