refactor: ./apps/*

This commit is contained in:
Sun-ZhenXing
2026-01-01 18:32:10 +08:00
parent 9c25970445
commit 922068b8af
37 changed files with 274 additions and 241 deletions

View File

@@ -35,7 +35,8 @@ services:
cpus: ${APISIX_CPU_RESERVATION:-0.25}
memory: ${APISIX_MEMORY_RESERVATION:-256M}
healthcheck:
test: ["CMD-SHELL", "curl -f http://localhost:9080/apisix/status || exit 1"]
test:
["CMD-SHELL", "curl -f http://localhost:9080/apisix/status || exit 1"]
interval: 30s
timeout: 10s
retries: 3

View File

@@ -1,17 +0,0 @@
# Bolt.diy Configuration
# For more information, visit: https://github.com/stackblitz-labs/bolt.diy
# Container port override
# BOLT_DIY_PORT_OVERRIDE=5173
# Log level (trace, debug, info, warn, error)
# VITE_LOG_LEVEL=info
# Enable experimental features
# ENABLE_EXPERIMENTAL_FEATURES=false
# Bolt.diy version
# BOLT_DIY_VERSION=latest
# Timezone
# TZ=UTC

View File

@@ -1,57 +0,0 @@
# Bolt.diy
Bolt.diy is an AI-powered web IDE that enables you to build full-stack web applications directly in your browser. It combines the power of AI with a modern development environment to streamline your development workflow.
## Quick Start
```bash
docker compose up -d
```
Access Bolt.diy at [http://localhost:5173](http://localhost:5173)
## Features
- **AI-Powered Development**: Leverage AI to assist with code generation and development
- **Full-Stack Development**: Build complete web applications with frontend and backend capabilities
- **Real-time Preview**: See your changes in real-time as you develop
- **Built-in Terminal**: Execute commands directly within the IDE
- **Git Integration**: Manage your repositories within the IDE
## Configuration
### Environment Variables
| Variable | Default | Description |
| ------------------------------ | ------- | ------------------------------------------- |
| `BOLT_DIY_PORT_OVERRIDE` | 5173 | Host port for accessing Bolt.diy |
| `BOLT_DIY_VERSION` | latest | Docker image version |
| `VITE_LOG_LEVEL` | info | Log level (trace, debug, info, warn, error) |
| `ENABLE_EXPERIMENTAL_FEATURES` | false | Enable experimental features |
| `TZ` | UTC | Timezone |
### Port Mapping
- **5173**: Bolt.diy web interface
## Volume
The container uses in-memory storage for the development environment. For persistent storage, you can mount volumes as needed.
## Health Check
The service includes a health check that monitors the availability of the web interface.
## Resource Limits
- **CPU**: 2 cores (limit) / 0.5 cores (reservation)
- **Memory**: 2GB (limit) / 512MB (reservation)
## Documentation
- [Official Bolt.diy Repository](https://github.com/stackblitz-labs/bolt.diy)
- [Bolt.diy Documentation](https://docs.bolt.new/)
## License
Refer to the [Bolt.diy License](https://github.com/stackblitz-labs/bolt.diy/blob/main/LICENSE)

View File

@@ -1,57 +0,0 @@
# Bolt.diy
Bolt.diy 是一个由 AI 驱动的网页版 IDE让你可以直接在浏览器中构建全栈 web 应用程序。它将 AI 的强大功能与现代开发环境相结合,以简化你的开发工作流程。
## 快速开始
```bash
docker compose up -d
```
在 [http://localhost:5173](http://localhost:5173) 访问 Bolt.diy
## 功能特性
- **AI 驱动开发**:利用 AI 辅助代码生成和开发
- **全栈开发**:构建具有前端和后端功能的完整 web 应用程序
- **实时预览**:在开发时实时查看你的更改
- **内置终端**:直接在 IDE 中执行命令
- **Git 集成**:在 IDE 中管理你的代码库
## 配置
### 环境变量
| 变量 | 默认值 | 说明 |
| ------------------------------ | ------ | ------------------------------------------- |
| `BOLT_DIY_PORT_OVERRIDE` | 5173 | 访问 Bolt.diy 的主机端口 |
| `BOLT_DIY_VERSION` | latest | Docker 镜像版本 |
| `VITE_LOG_LEVEL` | info | 日志级别trace、debug、info、warn、error |
| `ENABLE_EXPERIMENTAL_FEATURES` | false | 启用实验性功能 |
| `TZ` | UTC | 时区 |
### 端口映射
- **5173**Bolt.diy web 界面
## 存储卷
容器为开发环境使用内存存储。如需持久化存储,可根据需要挂载卷。
## 健康检查
该服务包含一个健康检查,监控 web 界面的可用性。
## 资源限制
- **CPU**2 核心(上限)/ 0.5 核心(预留)
- **内存**2GB上限/ 512MB预留
## 文档
- [Bolt.diy 官方仓库](https://github.com/stackblitz-labs/bolt.diy)
- [Bolt.diy 文档](https://docs.bolt.new/)
## 许可证
参考 [Bolt.diy 许可证](https://github.com/stackblitz-labs/bolt.diy/blob/main/LICENSE)

View File

@@ -1,40 +0,0 @@
x-defaults: &defaults
restart: unless-stopped
logging:
driver: json-file
options:
max-size: 100m
max-file: "3"
services:
bolt-diy:
<<: *defaults
image: ${GLOBAL_REGISTRY:-}stackblitz/bolt:${BOLT_DIY_VERSION:-latest}
ports:
- "${BOLT_DIY_PORT_OVERRIDE:-5173}:5173"
environment:
- TZ=${TZ:-UTC}
- VITE_LOG_LEVEL=${VITE_LOG_LEVEL:-info}
- ENABLE_EXPERIMENTAL_FEATURES=${ENABLE_EXPERIMENTAL_FEATURES:-false}
deploy:
resources:
limits:
cpus: ${BOLT_DIY_CPU_LIMIT:-2.00}
memory: ${BOLT_DIY_MEMORY_LIMIT:-2G}
reservations:
cpus: ${BOLT_DIY_CPU_RESERVATION:-0.5}
memory: ${BOLT_DIY_MEMORY_RESERVATION:-512M}
healthcheck:
test:
[
"CMD",
"wget",
"--no-verbose",
"--tries=1",
"--spider",
"http://localhost:5173/",
]
interval: 30s
timeout: 10s
retries: 3
start_period: 10s

View File

@@ -1,13 +0,0 @@
# Easy Dataset Configuration
# Image version
# Find latest releases at: https://github.com/ConardLi/easy-dataset/releases
EASY_DATASET_VERSION=1.5.1
# Port configuration
# Override the host port for the web interface
EASY_DATASET_PORT_OVERRIDE=1717
# Timezone configuration
# Set your timezone (e.g., UTC, Asia/Shanghai, America/New_York)
TZ=UTC

View File

@@ -1,161 +0,0 @@
# Easy Dataset
[English](./README.md) | [中文](./README.zh.md)
This service deploys Easy Dataset, a powerful tool for creating fine-tuning datasets for Large Language Models (LLMs). It provides an intuitive interface for uploading domain-specific files, intelligently splitting content, generating questions, and producing high-quality training data for model fine-tuning.
## Services
- `easy-dataset`: The main Easy Dataset application server with built-in SQLite database.
## Environment Variables
| Variable Name | Description | Default Value |
| -------------------------- | ----------------------------------- | ------------- |
| EASY_DATASET_VERSION | Easy Dataset image version | `1.5.1` |
| EASY_DATASET_PORT_OVERRIDE | Host port mapping for web interface | `1717` |
| TZ | System timezone | `UTC` |
Please create a `.env` file and modify it as needed for your use case.
## Volumes
- `easy_dataset_db`: A named volume for storing the SQLite database and uploaded files.
- `easy_dataset_prisma`: (Optional) A named volume for Prisma database files if needed.
## Getting Started
### Quick Start (Recommended)
1. (Optional) Create a `.env` file to customize settings:
```env
EASY_DATASET_VERSION=1.5.1
EASY_DATASET_PORT_OVERRIDE=1717
TZ=Asia/Shanghai
```
2. Start the service:
```bash
docker compose up -d
```
3. Access Easy Dataset at `http://localhost:1717`
### With Prisma Database Mount (Advanced)
If you need to mount the Prisma database files:
1. Initialize the database first:
```bash
# Clone the repository and initialize database
git clone https://github.com/ConardLi/easy-dataset.git
cd easy-dataset
npm install
npm run db:push
```
2. Uncomment the Prisma volume mount in `docker-compose.yaml`:
```yaml
volumes:
- easy_dataset_db:/app/local-db
- easy_dataset_prisma:/app/prisma # Uncomment this line
```
3. Start the service:
```bash
docker compose up -d
```
## Features
- **Intelligent Document Processing**: Supports PDF, Markdown, DOCX, and more
- **Smart Text Splitting**: Multiple algorithms with customizable segmentation
- **Question Generation**: Automatically extracts relevant questions from text
- **Domain Labels**: Builds global domain labels with understanding capabilities
- **Answer Generation**: Uses LLM APIs to generate comprehensive answers and Chain of Thought (COT)
- **Flexible Editing**: Edit questions, answers, and datasets at any stage
- **Multiple Export Formats**: Alpaca, ShareGPT, multilingual-thinking (JSON/JSONL)
- **Wide Model Support**: Compatible with all LLM APIs following OpenAI format
## Usage Workflow
1. **Create a Project**: Set up a new project with LLM API configuration
2. **Upload Documents**: Add your domain-specific files (PDF, Markdown, etc.)
3. **Text Splitting**: Review and adjust automatically split text segments
4. **Generate Questions**: Batch construct questions from text blocks
5. **Create Datasets**: Generate answers using configured LLM
6. **Export**: Export datasets in your preferred format
## Default Credentials
Easy Dataset does not require authentication by default. Access control should be implemented at the infrastructure level (e.g., reverse proxy, firewall rules).
## Resource Limits
The service is configured with the following resource limits:
- **CPU**: 0.5-2.0 cores
- **Memory**: 1-4 GB
These limits can be adjusted in `docker-compose.yaml` based on your workload requirements.
## Security Considerations
- **Data Privacy**: All data processing happens locally
- **API Keys**: Store LLM API keys securely within the application
- **Access Control**: Implement network-level access restrictions as needed
- **Updates**: Regularly update to the latest version for security patches
## Documentation
- Official Documentation: [https://docs.easy-dataset.com/](https://docs.easy-dataset.com/)
- GitHub Repository: [https://github.com/ConardLi/easy-dataset](https://github.com/ConardLi/easy-dataset)
- Video Tutorial: [Bilibili](https://www.bilibili.com/video/BV1y8QpYGE57/)
- Research Paper: [arXiv:2507.04009](https://arxiv.org/abs/2507.04009v1)
## Troubleshooting
### Container Won't Start
- Check logs: `docker compose logs easy-dataset`
- Verify port 1717 is not already in use
- Ensure sufficient system resources
### Database Issues
- For SQLite issues, remove and recreate the volume:
```bash
docker compose down -v
docker compose up -d
```
### Permission Errors
- Ensure the container has write access to mounted volumes
- Check Docker volume permissions
## License
Easy Dataset is licensed under AGPL 3.0. See the [LICENSE](https://github.com/ConardLi/easy-dataset/blob/main/LICENSE) file for details.
## Citation
If this work is helpful, please cite:
```bibtex
@misc{miao2025easydataset,
title={Easy Dataset: A Unified and Extensible Framework for Synthesizing LLM Fine-Tuning Data from Unstructured Documents},
author={Ziyang Miao and Qiyu Sun and Jingyuan Wang and Yuchen Gong and Yaowei Zheng and Shiqi Li and Richong Zhang},
year={2025},
eprint={2507.04009},
archivePrefix={arXiv},
primaryClass={cs.CL},
url={https://arxiv.org/abs/2507.04009}
}
```

View File

@@ -1,145 +0,0 @@
# Easy Dataset
[English](./README.md) | [中文](./README.zh.md)
这个服务部署 Easy Dataset一个用于创建大语言模型LLM微调数据集的强大工具。它提供了直观的界面可以上传特定领域的文件、智能分割内容、生成问题并产生高质量的模型微调训练数据。
## 服务
- `easy-dataset`:主应用服务器,内置 SQLite 数据库。
## 环境变量
| 变量名 | 描述 | 默认值 |
| -------------------------- | ---------------------- | ------- |
| EASY_DATASET_VERSION | Easy Dataset 镜像版本 | `1.5.1` |
| EASY_DATASET_PORT_OVERRIDE | Web 界面的主机端口映射 | `1717` |
| TZ | 系统时区 | `UTC` |
请创建 `.env` 文件并根据您的使用场景进行修改。
## 数据卷
- `easy_dataset_db`:用于存储 SQLite 数据库和上传文件的命名卷。
- `easy_dataset_prisma`:(可选)如需要可用于 Prisma 数据库文件的命名卷。
## 快速开始
### 快速启动(推荐)
1. (可选)创建 `.env` 文件以自定义设置:
```env
EASY_DATASET_VERSION=1.5.1
EASY_DATASET_PORT_OVERRIDE=1717
TZ=Asia/Shanghai
```
2. 启动服务:
```bash
docker compose up -d
```
3. 访问 Easy Dataset`http://localhost:1717`
### 使用 Prisma 数据库挂载(高级)
如果需要挂载 Prisma 数据库文件:
1. 首先初始化数据库:
```bash
# 克隆仓库并初始化数据库
git clone https://github.com/ConardLi/easy-dataset.git
cd easy-dataset
npm install
npm run db:push
```
2. 在 `docker-compose.yaml` 中取消注释 Prisma 卷挂载:
```yaml
volumes:
- easy_dataset_db:/app/local-db
- easy_dataset_prisma:/app/prisma # 取消此行注释
```
3. 启动服务:
```bash
docker compose up -d
```
## 功能特性
- **智能文档处理**:支持 PDF、Markdown、DOCX 等多种格式
- **智能文本分割**:多种算法,可自定义分段
- **问题生成**:从文本中自动提取相关问题
- **领域标签**:构建全局领域标签,具有理解能力
- **答案生成**:使用 LLM API 生成全面的答案和思维链COT
- **灵活编辑**:在任何阶段编辑问题、答案和数据集
- **多种导出格式**Alpaca、ShareGPT、multilingual-thinkingJSON/JSONL
- **广泛的模型支持**:兼容所有遵循 OpenAI 格式的 LLM API
## 使用流程
1. **创建项目**:设置新项目并配置 LLM API
2. **上传文档**添加您的特定领域文件PDF、Markdown 等)
3. **文本分割**:查看并调整自动分割的文本段
4. **生成问题**:从文本块批量构造问题
5. **创建数据集**:使用配置的 LLM 生成答案
6. **导出**:以您喜欢的格式导出数据集
## 默认凭据
Easy Dataset 默认不需要身份验证。应在基础设施层面实现访问控制(例如反向代理、防火墙规则)。
## 资源限制
该服务配置了以下资源限制:
- **CPU**0.5-2.0 核心
- **内存**1-4 GB
可以根据您的工作负载需求在 `docker-compose.yaml` 中调整这些限制。
## 安全注意事项
- **数据隐私**:所有数据处理都在本地进行
- **API 密钥**:在应用程序内安全存储 LLM API 密钥
- **访问控制**:根据需要实施网络级访问限制
- **更新**:定期更新到最新版本以获取安全补丁
## 文档
- 官方文档:[https://docs.easy-dataset.com/](https://docs.easy-dataset.com/)
- GitHub 仓库:[https://github.com/ConardLi/easy-dataset](https://github.com/ConardLi/easy-dataset)
- 视频教程:[Bilibili](https://www.bilibili.com/video/BV1y8QpYGE57/)
- 研究论文:[arXiv:2507.04009](https://arxiv.org/abs/2507.04009v1)
## 故障排除
### 容器无法启动
- 查看日志:`docker compose logs easy-dataset`
- 验证端口 1717 未被占用
- 确保系统资源充足
### 数据库问题
- 如遇到 SQLite 问题,删除并重新创建卷:
```bash
docker compose down -v
docker compose up -d
```
### 权限错误
- 确保容器对挂载卷有写入权限
- 检查 Docker 卷权限
## 许可证
Easy Dataset 采用 AGPL 3.0 许可证。详见 [LICENSE](https://github.com/ConardLi/easy-dataset/blob/main/LICENSE) 文件。

View File

@@ -1,40 +0,0 @@
x-defaults: &defaults
restart: unless-stopped
logging:
driver: json-file
options:
max-size: 100m
max-file: "3"
services:
easy-dataset:
<<: *defaults
image: ${GHCR_IO_REGISTRY:-ghcr.io}/conardli/easy-dataset:${EASY_DATASET_VERSION:-1.5.1}
ports:
- "${EASY_DATASET_PORT_OVERRIDE:-1717}:1717"
volumes:
- easy_dataset_db:/app/local-db
# Uncomment the following line if you need to mount Prisma database files
# Note: You need to run 'npm run db:push' first to initialize the database
# - easy_dataset_prisma:/app/prisma
environment:
- TZ=${TZ:-UTC}
- NODE_ENV=production
deploy:
resources:
limits:
cpus: ${EASY_DATASET_CPU_LIMIT:-2.0}
memory: ${EASY_DATASET_MEMORY_LIMIT:-4G}
reservations:
cpus: ${EASY_DATASET_CPU_RESERVATION:-0.5}
memory: ${EASY_DATASET_MEMORY_RESERVATION:-1G}
healthcheck:
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:1717"]
interval: 30s
timeout: 10s
retries: 3
start_period: 30s
volumes:
easy_dataset_db:
# easy_dataset_prisma:

View File

@@ -82,7 +82,11 @@ services:
cpus: ${KAFKA_CPU_RESERVATION:-0.50}
memory: ${KAFKA_MEMORY_RESERVATION:-1G}
healthcheck:
test: ["CMD-SHELL", "kafka-broker-api-versions --bootstrap-server localhost:9092"]
test:
[
"CMD-SHELL",
"kafka-broker-api-versions --bootstrap-server localhost:9092",
]
interval: 30s
timeout: 10s
retries: 5
@@ -92,6 +96,8 @@ services:
kafka-ui:
<<: *defaults
image: ${GLOBAL_REGISTRY:-}provectuslabs/kafka-ui:${KAFKA_UI_VERSION:-latest}
profiles:
- ui
depends_on:
kafka:
condition: service_healthy
@@ -113,8 +119,6 @@ services:
reservations:
cpus: ${KAFKA_UI_CPU_RESERVATION:-0.10}
memory: ${KAFKA_UI_MEMORY_RESERVATION:-128M}
profiles:
- ui
volumes:
zookeeper_data:

View File

@@ -1,38 +0,0 @@
# n8n version
N8N_VERSION=1.114.0
# Timezone
TZ=UTC
GENERIC_TIMEZONE=UTC
# Port
N8N_PORT=5678
# Basic auth
N8N_BASIC_AUTH_ACTIVE=true
N8N_BASIC_AUTH_USER=
N8N_BASIC_AUTH_PASSWORD=
# Host configuration
N8N_HOST=0.0.0.0
N8N_PROTOCOL=http
WEBHOOK_URL=http://localhost:5678/
# Database configuration (SQLite by default, PostgreSQL optional)
DB_TYPE=sqlite
DB_POSTGRESDB_DATABASE=n8n
DB_POSTGRESDB_HOST=n8n-db
DB_POSTGRESDB_PORT=5432
DB_POSTGRESDB_USER=n8n
DB_POSTGRESDB_PASSWORD=
# Execution mode
EXECUTIONS_MODE=regular
# Encryption key (generate with: openssl rand -base64 32)
N8N_ENCRYPTION_KEY=
# PostgreSQL configuration (if using PostgreSQL)
POSTGRES_USER=n8n
POSTGRES_PASSWORD=n8n
POSTGRES_DB=n8n

View File

@@ -1,118 +0,0 @@
# n8n
[English](./README.md) | [中文](./README.zh.md)
This service deploys n8n, a fair-code workflow automation platform with native AI capabilities.
## Services
- `n8n`: The main n8n application server.
- `n8n-db`: PostgreSQL database for n8n (optional, uses SQLite by default).
## Profiles
- `default`: Runs n8n with SQLite (no external database required).
- `postgres`: Runs n8n with PostgreSQL database.
To use PostgreSQL, start with:
```bash
docker compose --profile postgres up -d
```
## Environment Variables
| Variable Name | Description | Default Value |
| ----------------------- | ------------------------------------------------ | ------------------------ |
| N8N_VERSION | n8n image version | `1.114.0` |
| N8N_PORT | Host port mapping for n8n web interface | `5678` |
| N8N_BASIC_AUTH_ACTIVE | Enable basic authentication | `true` |
| N8N_BASIC_AUTH_USER | Basic auth username (required if auth is active) | `""` |
| N8N_BASIC_AUTH_PASSWORD | Basic auth password (required if auth is active) | `""` |
| N8N_HOST | Host address | `0.0.0.0` |
| N8N_PROTOCOL | Protocol (http or https) | `http` |
| WEBHOOK_URL | Webhook URL for external access | `http://localhost:5678/` |
| GENERIC_TIMEZONE | Timezone for n8n | `UTC` |
| TZ | System timezone | `UTC` |
| DB_TYPE | Database type (sqlite or postgresdb) | `sqlite` |
| DB_POSTGRESDB_DATABASE | PostgreSQL database name | `n8n` |
| DB_POSTGRESDB_HOST | PostgreSQL host | `n8n-db` |
| DB_POSTGRESDB_PORT | PostgreSQL port | `5432` |
| DB_POSTGRESDB_USER | PostgreSQL username | `n8n` |
| DB_POSTGRESDB_PASSWORD | PostgreSQL password | `n8n123` |
| POSTGRES_VERSION | PostgreSQL image version | `17.2-alpine3.21` |
| EXECUTIONS_MODE | Execution mode (regular or queue) | `regular` |
| N8N_ENCRYPTION_KEY | Encryption key for credentials | `""` |
Please create a `.env` file and modify it as needed for your use case.
## Volumes
- `n8n_data`: A volume for storing n8n data (workflows, credentials, etc.).
- `n8n_db_data`: A volume for storing PostgreSQL data (when using PostgreSQL profile).
## Getting Started
### SQLite (Default)
1. Create a `.env` file with authentication credentials:
```env
N8N_BASIC_AUTH_USER=admin
N8N_BASIC_AUTH_PASSWORD=your-secure-password
```
2. Start the service:
```bash
docker compose up -d
```
3. Access n8n at `http://localhost:5678`
### PostgreSQL
1. Create a `.env` file with authentication and database credentials:
```env
N8N_BASIC_AUTH_USER=admin
N8N_BASIC_AUTH_PASSWORD=your-secure-password
DB_TYPE=postgresdb
DB_POSTGRESDB_PASSWORD=your-db-password
```
2. Start the service with PostgreSQL profile:
```bash
docker compose --profile postgres up -d
```
3. Access n8n at `http://localhost:5678`
## Features
- **Visual Workflow Builder**: Create workflows with an intuitive drag-and-drop interface
- **400+ Integrations**: Connect to popular services and APIs
- **AI-Native**: Built-in LangChain support for AI workflows
- **Code When Needed**: Write JavaScript/Python or use visual nodes
- **Self-Hosted**: Full control over your data and deployments
- **Webhook Support**: Trigger workflows from external events
- **Scheduled Executions**: Run workflows on a schedule
## Documentation
For more information, visit the [official n8n documentation](https://docs.n8n.io/).
## Community Resources
- [n8n Community Forum](https://community.n8n.io/)
- [Workflow Templates](https://n8n.io/workflows)
- [Integration List](https://n8n.io/integrations)
## Security Notes
- Always set `N8N_BASIC_AUTH_USER` and `N8N_BASIC_AUTH_PASSWORD` in production
- Use HTTPS in production environments (set `N8N_PROTOCOL=https`)
- Consider setting `N8N_ENCRYPTION_KEY` for credential encryption
- Regularly backup the n8n data volume
- Keep n8n updated to the latest stable version

View File

@@ -1,118 +0,0 @@
# n8n
[English](./README.md) | [中文](./README.zh.md)
此服务部署 n8n,一个具有原生 AI 功能的公平代码工作流自动化平台。
## 服务
- `n8n`: n8n 主应用服务器。
- `n8n-db`: n8n 的 PostgreSQL 数据库(可选,默认使用 SQLite)。
## 配置文件
- `default`: 使用 SQLite 运行 n8n(不需要外部数据库)。
- `postgres`: 使用 PostgreSQL 数据库运行 n8n。
要使用 PostgreSQL,请使用以下命令启动:
```bash
docker compose --profile postgres up -d
```
## 环境变量
| 变量名 | 描述 | 默认值 |
| ----------------------- | -------------------------------- | ------------------------ |
| N8N_VERSION | n8n 镜像版本 | `1.114.0` |
| N8N_PORT | n8n Web 界面的主机端口映射 | `5678` |
| N8N_BASIC_AUTH_ACTIVE | 启用基本认证 | `true` |
| N8N_BASIC_AUTH_USER | 基本认证用户名(认证启用时必需) | `""` |
| N8N_BASIC_AUTH_PASSWORD | 基本认证密码(认证启用时必需) | `""` |
| N8N_HOST | 主机地址 | `0.0.0.0` |
| N8N_PROTOCOL | 协议(http 或 https) | `http` |
| WEBHOOK_URL | 外部访问的 Webhook URL | `http://localhost:5678/` |
| GENERIC_TIMEZONE | n8n 时区 | `UTC` |
| TZ | 系统时区 | `UTC` |
| DB_TYPE | 数据库类型(sqlite 或 postgresdb) | `sqlite` |
| DB_POSTGRESDB_DATABASE | PostgreSQL 数据库名 | `n8n` |
| DB_POSTGRESDB_HOST | PostgreSQL 主机 | `n8n-db` |
| DB_POSTGRESDB_PORT | PostgreSQL 端口 | `5432` |
| DB_POSTGRESDB_USER | PostgreSQL 用户名 | `n8n` |
| DB_POSTGRESDB_PASSWORD | PostgreSQL 密码 | `n8n123` |
| POSTGRES_VERSION | PostgreSQL 镜像版本 | `17.2-alpine3.21` |
| EXECUTIONS_MODE | 执行模式(regular 或 queue) | `regular` |
| N8N_ENCRYPTION_KEY | 凭据加密密钥 | `""` |
请创建 `.env` 文件并根据需要进行修改。
## 数据卷
- `n8n_data`: 用于存储 n8n 数据(工作流、凭据等)的卷。
- `n8n_db_data`: 用于存储 PostgreSQL 数据的卷(使用 PostgreSQL 配置文件时)。
## 快速开始
### SQLite(默认)
1. 创建包含认证凭据的 `.env` 文件:
```env
N8N_BASIC_AUTH_USER=admin
N8N_BASIC_AUTH_PASSWORD=your-secure-password
```
2. 启动服务:
```bash
docker compose up -d
```
3. 访问 `http://localhost:5678`
### PostgreSQL
1. 创建包含认证和数据库凭据的 `.env` 文件:
```env
N8N_BASIC_AUTH_USER=admin
N8N_BASIC_AUTH_PASSWORD=your-secure-password
DB_TYPE=postgresdb
DB_POSTGRESDB_PASSWORD=your-db-password
```
2. 使用 PostgreSQL 配置文件启动服务:
```bash
docker compose --profile postgres up -d
```
3. 访问 `http://localhost:5678`
## 功能特性
- **可视化工作流构建器**: 使用直观的拖放界面创建工作流
- **400+ 集成**: 连接到流行的服务和 API
- **原生 AI**: 内置 LangChain 支持用于 AI 工作流
- **按需编码**: 编写 JavaScript/Python 或使用可视化节点
- **自托管**: 完全控制您的数据和部署
- **Webhook 支持**: 通过外部事件触发工作流
- **定时执行**: 按计划运行工作流
## 文档
更多信息请访问 [n8n 官方文档](https://docs.n8n.io/)。
## 社区资源
- [n8n 社区论坛](https://community.n8n.io/)
- [工作流模板](https://n8n.io/workflows)
- [集成列表](https://n8n.io/integrations)
## 安全提示
- 在生产环境中始终设置 `N8N_BASIC_AUTH_USER` 和 `N8N_BASIC_AUTH_PASSWORD`
- 在生产环境中使用 HTTPS(设置 `N8N_PROTOCOL=https`)
- 考虑设置 `N8N_ENCRYPTION_KEY` 用于凭据加密
- 定期备份 n8n 数据卷
- 保持 n8n 更新到最新稳定版本

View File

@@ -1,78 +0,0 @@
x-defaults: &defaults
restart: unless-stopped
logging:
driver: json-file
options:
max-size: 100m
max-file: "3"
services:
n8n:
<<: *defaults
image: ${GLOBAL_REGISTRY:-}n8nio/n8n:${N8N_VERSION:-1.114.0}
ports:
- "${N8N_PORT:-5678}:5678"
volumes:
- n8n_data:/home/node/.n8n
environment:
- TZ=${TZ:-UTC}
- N8N_BASIC_AUTH_ACTIVE=${N8N_BASIC_AUTH_ACTIVE:-true}
- N8N_BASIC_AUTH_USER=${N8N_BASIC_AUTH_USER:-}
- N8N_BASIC_AUTH_PASSWORD=${N8N_BASIC_AUTH_PASSWORD:-}
- N8N_HOST=${N8N_HOST:-0.0.0.0}
- N8N_PORT=${N8N_PORT:-5678}
- N8N_PROTOCOL=${N8N_PROTOCOL:-http}
- WEBHOOK_URL=${WEBHOOK_URL:-http://localhost:5678/}
- GENERIC_TIMEZONE=${GENERIC_TIMEZONE:-UTC}
# Database configuration (optional, uses SQLite by default)
- DB_TYPE=${DB_TYPE:-sqlite}
- DB_POSTGRESDB_DATABASE=${DB_POSTGRESDB_DATABASE:-n8n}
- DB_POSTGRESDB_HOST=${DB_POSTGRESDB_HOST:-n8n-db}
- DB_POSTGRESDB_PORT=${DB_POSTGRESDB_PORT:-5432}
- DB_POSTGRESDB_USER=${DB_POSTGRESDB_USER:-n8n}
- DB_POSTGRESDB_PASSWORD=${DB_POSTGRESDB_PASSWORD:-}
# Execution mode
- EXECUTIONS_MODE=${EXECUTIONS_MODE:-regular}
- N8N_ENCRYPTION_KEY=${N8N_ENCRYPTION_KEY:-}
depends_on:
n8n-db:
condition: service_healthy
profiles:
- default
deploy:
resources:
limits:
cpus: ${N8N_CPU_LIMIT:-2.0}
memory: ${N8N_MEMORY_LIMIT:-2G}
reservations:
cpus: ${N8N_CPU_RESERVATION:-0.5}
memory: ${N8N_MEMORY_RESERVATION:-512M}
healthcheck:
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:5678/healthz"]
interval: 30s
timeout: 10s
retries: 3
start_period: 30s
n8n-db:
<<: *defaults
image: ${GLOBAL_REGISTRY:-}postgres:${POSTGRES_VERSION:-17.2-alpine3.21}
environment:
- TZ=${TZ:-UTC}
- POSTGRES_USER=${POSTGRES_USER:-n8n}
- POSTGRES_PASSWORD=${POSTGRES_PASSWORD:-}
- POSTGRES_DB=${POSTGRES_DB:-n8n}
volumes:
- n8n_db_data:/var/lib/postgresql/data
deploy:
resources:
limits:
cpus: ${N8N_DB_CPU_LIMIT:-1.0}
memory: ${N8N_DB_MEMORY_LIMIT:-1G}
reservations:
cpus: ${N8N_DB_CPU_RESERVATION:-0.5}
memory: ${N8N_DB_MEMORY_RESERVATION:-512M}
volumes:
n8n_data:
n8n_db_data:

View File

@@ -1,7 +0,0 @@
# Timezone
TZ=UTC
# Note: OpenCoze is a complex multi-service platform.
# This is a placeholder configuration.
# For full deployment, please refer to:
# https://github.com/coze-dev/coze-studio/tree/main/docker

View File

@@ -1,89 +0,0 @@
# OpenCoze
[English](./README.md) | [中文](./README.zh.md)
OpenCoze is a comprehensive AI application development platform based on Coze Studio.
## Important Notice
OpenCoze requires a complex multi-service architecture that includes:
- MySQL (database)
- Redis (caching)
- Elasticsearch (search engine)
- MinIO (object storage)
- etcd (distributed configuration)
- Milvus (vector database)
- NSQ (message queue)
- Coze Server (main application)
- Nginx (reverse proxy)
Due to the complexity of this setup, **we recommend using the official docker-compose configuration directly** from the Coze Studio repository.
## Official Deployment
1. Clone the official repository:
```bash
git clone https://github.com/coze-dev/coze-studio.git
cd coze-studio/docker
```
2. Follow the official deployment guide:
- [Official Documentation](https://github.com/coze-dev/coze-studio)
- [Docker Deployment Guide](https://github.com/coze-dev/coze-studio/tree/main/docker)
3. The official docker-compose includes all necessary services with proper configuration.
## System Requirements
- **Minimum Resources**:
- CPU: 8 cores
- RAM: 16GB
- Disk: 100GB SSD
- **Recommended Resources**:
- CPU: 16 cores
- RAM: 32GB
- Disk: 200GB SSD
## Key Features
- **AI Bot Builder**: Visual interface for creating AI-powered chatbots
- **Workflow Automation**: Design complex workflows with AI capabilities
- **Knowledge Base**: Manage and utilize knowledge bases for AI responses
- **Plugin System**: Extend functionality with custom plugins
- **Multi-model Support**: Integration with various LLM providers
- **Team Collaboration**: Multi-user workspace with permission management
## Getting Started
For detailed setup instructions, please refer to:
- [Official GitHub Repository](https://github.com/coze-dev/coze-studio)
- [Official Docker Compose](https://github.com/coze-dev/coze-studio/blob/main/docker/docker-compose.yml)
## Alternative: Cloud Version
If self-hosting is too complex, consider using the cloud version:
- [Coze Cloud](https://www.coze.com/) (Official cloud service)
## Security Notes
When deploying OpenCoze:
- Change all default passwords
- Use strong encryption keys
- Enable HTTPS with valid SSL certificates
- Implement proper firewall rules
- Regularly backup all data volumes
- Keep all services updated to the latest versions
- Monitor resource usage and performance
## Support
For issues and questions:
- [GitHub Issues](https://github.com/coze-dev/coze-studio/issues)
- [Official Documentation](https://github.com/coze-dev/coze-studio)

View File

@@ -1,89 +0,0 @@
# OpenCoze
[English](./README.md) | [中文](./README.zh.md)
OpenCoze 是一个基于 Coze Studio 的综合性 AI 应用开发平台。
## 重要提示
OpenCoze 需要一个复杂的多服务架构,包括:
- MySQL(数据库)
- Redis(缓存)
- Elasticsearch(搜索引擎)
- MinIO(对象存储)
- etcd(分布式配置)
- Milvus(向量数据库)
- NSQ(消息队列)
- Coze Server(主应用)
- Nginx(反向代理)
由于设置的复杂性,**我们建议直接使用 Coze Studio 仓库中的官方 docker-compose 配置**。
## 官方部署
1. 克隆官方仓库:
```bash
git clone https://github.com/coze-dev/coze-studio.git
cd coze-studio/docker
```
2. 遵循官方部署指南:
- [官方文档](https://github.com/coze-dev/coze-studio)
- [Docker 部署指南](https://github.com/coze-dev/coze-studio/tree/main/docker)
3. 官方 docker-compose 包含所有必需的服务及适当的配置。
## 系统要求
- **最低要求**:
- CPU: 8 核
- 内存: 16GB
- 磁盘: 100GB SSD
- **推荐配置**:
- CPU: 16 核
- 内存: 32GB
- 磁盘: 200GB SSD
## 主要功能
- **AI 机器人构建器**: 用于创建 AI 驱动的聊天机器人的可视化界面
- **工作流自动化**: 设计具有 AI 能力的复杂工作流
- **知识库**: 管理和利用知识库进行 AI 响应
- **插件系统**: 使用自定义插件扩展功能
- **多模型支持**: 与各种 LLM 提供商集成
- **团队协作**: 具有权限管理的多用户工作区
## 快速开始
详细的设置说明请参考:
- [官方 GitHub 仓库](https://github.com/coze-dev/coze-studio)
- [官方 Docker Compose](https://github.com/coze-dev/coze-studio/blob/main/docker/docker-compose.yml)
## 替代方案: 云版本
如果自托管过于复杂,可以考虑使用云版本:
- [Coze 云服务](https://www.coze.com/)(官方云服务)
## 安全提示
部署 OpenCoze 时:
- 更改所有默认密码
- 使用强加密密钥
- 启用带有有效 SSL 证书的 HTTPS
- 实施适当的防火墙规则
- 定期备份所有数据卷
- 保持所有服务更新到最新版本
- 监控资源使用和性能
## 支持
如有问题和疑问:
- [GitHub Issues](https://github.com/coze-dev/coze-studio/issues)
- [官方文档](https://github.com/coze-dev/coze-studio)

View File

@@ -1,28 +0,0 @@
x-defaults: &defaults
restart: unless-stopped
logging:
driver: json-file
options:
max-size: 100m
max-file: "3"
services:
# Note: OpenCoze is a complex platform that requires multiple services.
# This is a placeholder configuration. For full deployment, please refer to:
# https://github.com/coze-dev/coze-studio/tree/main/docker
opencoze-info:
image: ${GLOBAL_REGISTRY:-}alpine:latest
environment:
- TZ=${TZ:-UTC}
command: >
sh -c "echo 'OpenCoze requires a complex multi-service setup.' &&
echo 'Please visit https://github.com/coze-dev/coze-studio for full deployment instructions.' &&
echo 'The official docker-compose includes: MySQL, Redis, Elasticsearch, MinIO, etcd, Milvus, NSQ, and the Coze server.' &&
echo 'For production deployment, consider using their official docker-compose.yml directly.' &&
tail -f /dev/null"
deploy:
resources:
limits:
cpus: ${OPENCOZE_INFO_CPU_LIMIT:-0.1}
memory: ${OPENCOZE_INFO_MEMORY_LIMIT:-64M}

View File

@@ -28,7 +28,7 @@ services:
- pingap
- --autoreload
healthcheck:
test: ["CMD", "wget", "--spider", "-q", "http://localhost:80/pingap/"]
test: ["CMD-SHELL", "echo > /dev/tcp/localhost/80"]
interval: 30s
timeout: 10s
retries: 3

View File

@@ -24,10 +24,14 @@ Redpanda is a Kafka-compatible streaming data platform built for performance and
2. Start the services:
```bash
# Start only Redpanda (without console)
docker compose up -d
# Or start with Redpanda Console UI
docker compose --profile console up -d
```
3. Access Redpanda Console at <http://localhost:8080>
3. If started with console profile, access Redpanda Console at <http://localhost:8080>
4. Verify the cluster is healthy:
@@ -35,13 +39,21 @@ Redpanda is a Kafka-compatible streaming data platform built for performance and
docker compose exec redpanda rpk cluster health
```
## Profiles
This configuration supports the following Docker Compose profiles:
- **console**: Enables the Redpanda Console web UI for monitoring and management
- To start with console: `docker compose --profile console up -d`
- To start without console: `docker compose up -d`
## Service Endpoints
- **Kafka API** (external): `localhost:19092`
- **Schema Registry**: `localhost:18081`
- **HTTP Proxy**: `localhost:18082`
- **Admin API**: `localhost:19644`
- **Redpanda Console**: `http://localhost:8080`
- **Redpanda Console** (when console profile is enabled): `http://localhost:8080`
## Basic Usage

View File

@@ -24,10 +24,14 @@ Redpanda 是一个与 Kafka 兼容的流数据平台,专为性能和开发者
2. 启动服务:
```bash
# 仅启动 Redpanda不包含控制台
docker compose up -d
# 或者启动包含 Redpanda Console UI
docker compose --profile console up -d
```
3. 访问 Redpanda Console<http://localhost:8080>
3. 如果使用 console profile 启动,可访问 Redpanda Console<http://localhost:8080>
4. 验证集群健康状态:
@@ -35,13 +39,21 @@ Redpanda 是一个与 Kafka 兼容的流数据平台,专为性能和开发者
docker compose exec redpanda rpk cluster health
```
## Profiles
此配置支持以下 Docker Compose profiles
- **console**:启用 Redpanda Console Web UI用于监控和管理
- 启用 console`docker compose --profile console up -d`
- 不启用 console`docker compose up -d`
## 服务端点
- **Kafka API**(外部):`localhost:19092`
- **Schema Registry**`localhost:18081`
- **HTTP Proxy**`localhost:18082`
- **Admin API**`localhost:19644`
- **Redpanda Console**`http://localhost:8080`
- **Redpanda Console**(启用 console profile 时)`http://localhost:8080`
## 基本使用

View File

@@ -38,7 +38,8 @@ services:
environment:
- TZ=${TZ:-UTC}
healthcheck:
test: ["CMD-SHELL", "rpk cluster health | grep -E 'Healthy:.+true' || exit 1"]
test:
["CMD-SHELL", "rpk cluster health | grep -E 'Healthy:.+true' || exit 1"]
interval: 15s
timeout: 10s
retries: 5
@@ -55,6 +56,8 @@ services:
console:
<<: *defaults
image: ${GLOBAL_REGISTRY:-}redpandadata/console:${REDPANDA_CONSOLE_VERSION:-v3.3.2}
profiles:
- console
ports:
- "${REDPANDA_CONSOLE_PORT_OVERRIDE:-8080}:8080"
environment:
@@ -68,7 +71,8 @@ services:
redpanda:
condition: service_healthy
healthcheck:
test: ["CMD", "wget", "--spider", "-q", "http://localhost:8080/admin/health"]
test:
["CMD", "wget", "--spider", "-q", "http://localhost:8080/admin/health"]
interval: 10s
timeout: 5s
retries: 3

View File

@@ -1,13 +0,0 @@
# Stable Diffusion WebUI version
SD_WEBUI_VERSION="latest"
# CLI arguments for WebUI
CLI_ARGS="--listen --api --skip-version-check"
# NVIDIA GPU configuration
NVIDIA_VISIBLE_DEVICES="all"
NVIDIA_DRIVER_CAPABILITIES="compute,utility"
GPU_COUNT=1
# Port overrides
SD_WEBUI_PORT_OVERRIDE=7860

View File

@@ -1,122 +0,0 @@
# Stable Diffusion WebUI Docker
[English](./README.md) | [中文](./README.zh.md)
This service deploys Stable Diffusion WebUI (SD.Next) for AI image generation.
## Services
- `stable-diffusion-webui`: Stable Diffusion WebUI with GPU support.
## Prerequisites
**NVIDIA GPU Required**: This service requires an NVIDIA GPU with CUDA support and the NVIDIA Container Toolkit installed.
### Install NVIDIA Container Toolkit
**Linux:**
```bash
distribution=$(. /etc/os-release;echo $ID$VERSION_ID)
curl -s -L https://nvidia.github.io/nvidia-docker/gpgkey | sudo apt-key add -
curl -s -L https://nvidia.github.io/nvidia-docker/$distribution/nvidia-docker.list | sudo tee /etc/apt/sources.list.d/nvidia-docker.list
sudo apt-get update && sudo apt-get install -y nvidia-container-toolkit
sudo systemctl restart docker
```
**Windows (Docker Desktop):**
Ensure you have WSL2 with NVIDIA drivers installed and Docker Desktop configured to use WSL2 backend.
## Environment Variables
| Variable Name | Description | Default Value |
| -------------------------- | -------------------------- | ------------------------------------- |
| SD_WEBUI_VERSION | SD WebUI image version | `latest` |
| CLI_ARGS | Command-line arguments | `--listen --api --skip-version-check` |
| NVIDIA_VISIBLE_DEVICES | GPUs to use | `all` |
| NVIDIA_DRIVER_CAPABILITIES | Driver capabilities | `compute,utility` |
| GPU_COUNT | Number of GPUs to allocate | `1` |
| SD_WEBUI_PORT_OVERRIDE | WebUI port | `7860` |
Please modify the `.env` file as needed for your use case.
## Volumes
- `sd_webui_data`: Model files, extensions, and configuration.
- `sd_webui_output`: Generated images output directory.
## Usage
### Start the Service
```bash
docker-compose up -d
```
### Access the WebUI
Open your browser and navigate to:
```text
http://localhost:7860
```
### Download Models
On first start, you need to download models. The WebUI will guide you through this process, or you can manually place models in the `/data/models` directory.
Common model locations:
- Stable Diffusion models: `/data/models/Stable-diffusion/`
- VAE models: `/data/models/VAE/`
- LoRA models: `/data/models/Lora/`
- Embeddings: `/data/models/embeddings/`
### Generate Images
1. Select a model from the dropdown
2. Enter your prompt
3. Adjust parameters (steps, CFG scale, sampler, etc.)
4. Click "Generate"
## Features
- **Text-to-Image**: Generate images from text prompts
- **Image-to-Image**: Transform existing images
- **Inpainting**: Edit specific parts of images
- **Upscaling**: Enhance image resolution
- **API Access**: RESTful API for automation
- **Extensions**: Support for custom extensions
- **Multiple Models**: Support for various SD models (1.5, 2.x, SDXL, etc.)
## Notes
- First startup may take time to download dependencies and models
- Recommended: 8GB+ VRAM for SD 1.5, 12GB+ for SDXL
- GPU is required; CPU-only mode is extremely slow
- Generated images are saved in the `sd_webui_output` volume
- Models can be large (2-7GB each); ensure adequate disk space
## API Usage
With `--api` flag enabled, access the API at:
```text
http://localhost:7860/docs
```
Example API call:
```bash
curl -X POST http://localhost:7860/sdapi/v1/txt2img \
-H "Content-Type: application/json" \
-d '{
"prompt": "a beautiful landscape",
"steps": 20
}'
```
## License
Stable Diffusion models have various licenses. Please check individual model licenses before use.

View File

@@ -1,122 +0,0 @@
# Stable Diffusion WebUI Docker
[English](./README.md) | [中文](./README.zh.md)
此服务用于部署 Stable Diffusion WebUI (SD.Next) 进行 AI 图像生成。
## 服务
- `stable-diffusion-webui`: 支持 GPU 的 Stable Diffusion WebUI。
## 先决条件
**需要 NVIDIA GPU**: 此服务需要支持 CUDA 的 NVIDIA GPU 和已安装的 NVIDIA Container Toolkit。
### 安装 NVIDIA Container Toolkit
**Linux:**
```bash
distribution=$(. /etc/os-release;echo $ID$VERSION_ID)
curl -s -L https://nvidia.github.io/nvidia-docker/gpgkey | sudo apt-key add -
curl -s -L https://nvidia.github.io/nvidia-docker/$distribution/nvidia-docker.list | sudo tee /etc/apt/sources.list.d/nvidia-docker.list
sudo apt-get update && sudo apt-get install -y nvidia-container-toolkit
sudo systemctl restart docker
```
**Windows (Docker Desktop):**
确保已安装带有 NVIDIA 驱动程序的 WSL2并将 Docker Desktop 配置为使用 WSL2 后端。
## 环境变量
| 变量名 | 说明 | 默认值 |
| -------------------------- | ----------------- | ------------------------------------- |
| SD_WEBUI_VERSION | SD WebUI 镜像版本 | `latest` |
| CLI_ARGS | 命令行参数 | `--listen --api --skip-version-check` |
| NVIDIA_VISIBLE_DEVICES | 使用的 GPU | `all` |
| NVIDIA_DRIVER_CAPABILITIES | 驱动程序功能 | `compute,utility` |
| GPU_COUNT | 分配的 GPU 数量 | `1` |
| SD_WEBUI_PORT_OVERRIDE | WebUI 端口 | `7860` |
请根据实际需求修改 `.env` 文件。
## 卷
- `sd_webui_data`: 模型文件、扩展和配置。
- `sd_webui_output`: 生成的图像输出目录。
## 使用方法
### 启动服务
```bash
docker-compose up -d
```
### 访问 WebUI
在浏览器中打开:
```text
http://localhost:7860
```
### 下载模型
首次启动时您需要下载模型。WebUI 会指导您完成此过程,或者您可以手动将模型放置在 `/data/models` 目录中。
常见模型位置:
- Stable Diffusion 模型: `/data/models/Stable-diffusion/`
- VAE 模型: `/data/models/VAE/`
- LoRA 模型: `/data/models/Lora/`
- 嵌入: `/data/models/embeddings/`
### 生成图像
1. 从下拉列表中选择模型
2. 输入您的提示词
3. 调整参数步数、CFG 比例、采样器等)
4. 点击"生成"
## 功能
- **文本到图像**: 从文本提示生成图像
- **图像到图像**: 转换现有图像
- **修复**: 编辑图像的特定部分
- **放大**: 增强图像分辨率
- **API 访问**: 用于自动化的 RESTful API
- **扩展**: 支持自定义扩展
- **多模型**: 支持各种 SD 模型1.5、2.x、SDXL 等)
## 注意事项
- 首次启动可能需要时间下载依赖项和模型
- 推荐: SD 1.5 需要 8GB+ 显存SDXL 需要 12GB+
- 需要 GPU纯 CPU 模式极其缓慢
- 生成的图像保存在 `sd_webui_output` 卷中
- 模型可能很大(每个 2-7GB确保有足够的磁盘空间
## API 使用
启用 `--api` 标志后,在以下地址访问 API:
```text
http://localhost:7860/docs
```
API 调用示例:
```bash
curl -X POST http://localhost:7860/sdapi/v1/txt2img \
-H "Content-Type: application/json" \
-d '{
"prompt": "a beautiful landscape",
"steps": 20
}'
```
## 许可证
Stable Diffusion 模型有各种许可证。使用前请检查各个模型的许可证。

View File

@@ -1,44 +0,0 @@
x-defaults: &defaults
restart: unless-stopped
logging:
driver: json-file
options:
max-size: 100m
max-file: "3"
services:
stable-diffusion-webui:
<<: *defaults
image: ${GHCR_IO_REGISTRY:-ghcr.io}/absolutelyludicrous/sdnext:${SD_WEBUI_VERSION:-latest}
ports:
- "${SD_WEBUI_PORT_OVERRIDE:-7860}:7860"
environment:
TZ: ${TZ:-UTC}
CLI_ARGS: ${CLI_ARGS:---listen --api --skip-version-check}
NVIDIA_VISIBLE_DEVICES: ${NVIDIA_VISIBLE_DEVICES:-all}
NVIDIA_DRIVER_CAPABILITIES: ${NVIDIA_DRIVER_CAPABILITIES:-compute,utility}
volumes:
- sd_webui_data:/data
- sd_webui_output:/output
deploy:
resources:
limits:
cpus: ${SD_WEBUI_CPU_LIMIT:-4.0}
memory: ${SD_WEBUI_MEMORY_LIMIT:-16G}
reservations:
cpus: ${SD_WEBUI_CPU_RESERVATION:-2.0}
memory: ${SD_WEBUI_MEMORY_RESERVATION:-8G}
devices:
- driver: nvidia
count: ${GPU_COUNT:-1}
capabilities: [gpu]
healthcheck:
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:7860/"]
interval: 30s
timeout: 10s
retries: 3
start_period: 120s
volumes:
sd_webui_data:
sd_webui_output:

View File

@@ -1,37 +0,0 @@
# Stirling-PDF version
STIRLING_VERSION="latest"
# Port override
PORT_OVERRIDE=8080
# Security settings
ENABLE_SECURITY="false"
ENABLE_LOGIN="false"
INITIAL_USERNAME="admin"
INITIAL_PASSWORD="admin"
# Advanced operations (requires more dependencies)
INSTALL_ADVANCED_OPS="false"
# Languages (comma-separated, e.g., en_GB,ar_AR,de_DE,fr_FR,es_ES)
LANGUAGES="en_GB"
# User and group IDs
PUID=1000
PGID=1000
UMASK="022"
# System settings
DEFAULT_LOCALE="en-US"
APP_NAME="Stirling-PDF"
HOME_DESCRIPTION=""
NAVBAR_NAME=""
# Maximum file size in MB
MAX_FILE_SIZE="2000"
# Metrics
METRICS_ENABLED="false"
# Google visibility
GOOGLE_VISIBILITY="false"

View File

@@ -1,67 +0,0 @@
# Stirling-PDF
[English](./README.md) | [中文](./README.zh.md)
This service deploys Stirling-PDF, a locally hosted web-based PDF manipulation tool.
## Services
- `stirling-pdf`: The Stirling-PDF service.
## Environment Variables
| Variable Name | Description | Default Value |
| -------------------- | ------------------------------------- | -------------- |
| STIRLING_VERSION | Stirling-PDF image version | `latest` |
| PORT_OVERRIDE | Host port mapping | `8080` |
| ENABLE_SECURITY | Enable security features | `false` |
| ENABLE_LOGIN | Enable login functionality | `false` |
| INITIAL_USERNAME | Initial admin username | `admin` |
| INITIAL_PASSWORD | Initial admin password | `admin` |
| INSTALL_ADVANCED_OPS | Install advanced operations | `false` |
| LANGUAGES | Supported languages (comma-separated) | `en_GB` |
| PUID | User ID to run the service | `1000` |
| PGID | Group ID to run the service | `1000` |
| UMASK | File creation mask | `022` |
| DEFAULT_LOCALE | Default system locale | `en-US` |
| APP_NAME | Application name | `Stirling-PDF` |
| HOME_DESCRIPTION | Home page description | `""` |
| NAVBAR_NAME | Navigation bar name | `""` |
| MAX_FILE_SIZE | Maximum file size in MB | `2000` |
| METRICS_ENABLED | Enable metrics collection | `false` |
| GOOGLE_VISIBILITY | Allow Google indexing | `false` |
Please modify the `.env` file as needed for your use case.
## Volumes
- `stirling_trainingData`: OCR training data for Tesseract.
- `stirling_configs`: Configuration files.
- `stirling_logs`: Application logs.
- `stirling_customFiles`: Custom files and templates.
## Features
Stirling-PDF supports 50+ PDF operations including:
- Merge, split, rotate PDFs
- Convert to/from PDF
- OCR (Optical Character Recognition)
- Add/remove watermarks
- Compress PDFs
- Encrypt/decrypt PDFs
- Sign PDFs
- Fill forms
- Extract images and text
- And much more!
## Security Notes
- By default, security is disabled for easy setup.
- For production use, set `ENABLE_SECURITY=true` and `ENABLE_LOGIN=true`.
- Change the default admin credentials before deployment.
- Consider using a reverse proxy with HTTPS for secure access.
## License
Stirling-PDF is licensed under the MIT License.

View File

@@ -1,67 +0,0 @@
# Stirling-PDF
[English](./README.md) | [中文](./README.zh.md)
此服务用于部署 Stirling-PDF一个本地托管的基于 Web 的 PDF 操作工具。
## 服务
- `stirling-pdf`: Stirling-PDF 服务。
## 环境变量
| 变量名 | 说明 | 默认值 |
| -------------------- | ---------------------- | -------------- |
| STIRLING_VERSION | Stirling-PDF 镜像版本 | `latest` |
| PORT_OVERRIDE | 主机端口映射 | `8080` |
| ENABLE_SECURITY | 启用安全功能 | `false` |
| ENABLE_LOGIN | 启用登录功能 | `false` |
| INITIAL_USERNAME | 初始管理员用户名 | `admin` |
| INITIAL_PASSWORD | 初始管理员密码 | `admin` |
| INSTALL_ADVANCED_OPS | 安装高级操作 | `false` |
| LANGUAGES | 支持的语言(逗号分隔) | `en_GB` |
| PUID | 运行服务的用户 ID | `1000` |
| PGID | 运行服务的组 ID | `1000` |
| UMASK | 文件创建掩码 | `022` |
| DEFAULT_LOCALE | 默认系统区域设置 | `en-US` |
| APP_NAME | 应用程序名称 | `Stirling-PDF` |
| HOME_DESCRIPTION | 主页描述 | `""` |
| NAVBAR_NAME | 导航栏名称 | `""` |
| MAX_FILE_SIZE | 最大文件大小MB | `2000` |
| METRICS_ENABLED | 启用指标收集 | `false` |
| GOOGLE_VISIBILITY | 允许 Google 索引 | `false` |
请根据实际需求修改 `.env` 文件。
## 卷
- `stirling_trainingData`: Tesseract 的 OCR 训练数据。
- `stirling_configs`: 配置文件。
- `stirling_logs`: 应用程序日志。
- `stirling_customFiles`: 自定义文件和模板。
## 功能
Stirling-PDF 支持 50 多种 PDF 操作,包括:
- 合并、拆分、旋转 PDF
- PDF 转换
- OCR光学字符识别
- 添加/删除水印
- 压缩 PDF
- 加密/解密 PDF
- 签名 PDF
- 填写表单
- 提取图像和文本
- 以及更多功能!
## 安全说明
- 默认情况下,安全功能被禁用以便于设置。
- 对于生产环境,请设置 `ENABLE_SECURITY=true``ENABLE_LOGIN=true`
- 在部署前更改默认管理员凭据。
- 考虑使用反向代理和 HTTPS 以实现安全访问。
## 许可证
Stirling-PDF 使用 MIT 许可证授权。

View File

@@ -1,57 +0,0 @@
x-defaults: &defaults
restart: unless-stopped
logging:
driver: json-file
options:
max-size: 100m
max-file: "3"
services:
stirling-pdf:
<<: *defaults
image: ${GLOBAL_REGISTRY:-}stirlingtools/stirling-pdf:${STIRLING_VERSION:-latest}
ports:
- "${PORT_OVERRIDE:-8080}:8080"
volumes:
- stirling_trainingData:/usr/share/tessdata
- stirling_configs:/configs
- stirling_logs:/logs
- stirling_customFiles:/customFiles
environment:
- TZ=${TZ:-UTC}
- DOCKER_ENABLE_SECURITY=${ENABLE_SECURITY:-false}
- SECURITY_ENABLELOGIN=${ENABLE_LOGIN:-false}
- SECURITY_INITIALLOGIN_USERNAME=${INITIAL_USERNAME:-admin}
- SECURITY_INITIALLOGIN_PASSWORD=${INITIAL_PASSWORD:-admin}
- INSTALL_BOOK_AND_ADVANCED_HTML_OPS=${INSTALL_ADVANCED_OPS:-false}
- LANGS=${LANGUAGES:-en_GB}
- PUID=${PUID:-1000}
- PGID=${PGID:-1000}
- UMASK=${UMASK:-022}
- SYSTEM_DEFAULTLOCALE=${DEFAULT_LOCALE:-en-US}
- UI_APPNAME=${APP_NAME:-Stirling-PDF}
- UI_HOMEDESCRIPTION=${HOME_DESCRIPTION:-}
- UI_APPNAMENAVBAR=${NAVBAR_NAME:-}
- SYSTEM_MAXFILESIZE=${MAX_FILE_SIZE:-2000}
- METRICS_ENABLED=${METRICS_ENABLED:-false}
- SYSTEM_GOOGLEVISIBILITY=${GOOGLE_VISIBILITY:-false}
deploy:
resources:
limits:
cpus: ${STIRLING_CPU_LIMIT:-2.0}
memory: ${STIRLING_MEMORY_LIMIT:-4G}
reservations:
cpus: ${STIRLING_CPU_RESERVATION:-1.0}
memory: ${STIRLING_MEMORY_RESERVATION:-2G}
healthcheck:
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:8080/"]
interval: 30s
timeout: 10s
retries: 3
start_period: 30s
volumes:
stirling_trainingData:
stirling_configs:
stirling_logs:
stirling_customFiles: