diff --git a/.vscode/settings.json b/.vscode/settings.json index 8c0a364..06e7340 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -13,6 +13,12 @@ "strings": "off" } }, + "[yaml]": { + "editor.formatOnSave": true + }, + "[dockercompose]": { + "editor.formatOnSave": true + }, "files.eol": "\n", "cSpell.enabled": false } diff --git a/README.md b/README.md index 8e1a856..0b6e8e0 100644 --- a/README.md +++ b/README.md @@ -99,6 +99,7 @@ These services require building custom Docker images from source. | [Phoenix (Arize)](./src/phoenix) | 12.28.1-nonroot | | [Pingora Proxy Manager](./src/pingora-proxy-manager) | v1.0.3 | | [Open WebUI Rust](./src/open-webui-rust) | latest | +| [OpenCode](./src/opencode) | 1.1.27 | | [OpenCoze](./apps/opencoze) | See Docs | | [OpenCut](./src/opencut) | latest | | [OpenList](./src/openlist) | latest | diff --git a/README.zh.md b/README.zh.md index 7808c0e..4b716e2 100644 --- a/README.zh.md +++ b/README.zh.md @@ -99,6 +99,7 @@ Compose Anything 通过提供一组高质量的 Docker Compose 配置文件, | [Phoenix (Arize)](./src/phoenix) | 12.28.1-nonroot | | [Pingora Proxy Manager](./src/pingora-proxy-manager) | v1.0.3 | | [Open WebUI Rust](./src/open-webui-rust) | latest | +| [OpenCode](./src/opencode) | 1.1.27 | | [OpenCoze](./apps/opencoze) | See Docs | | [OpenCut](./src/opencut) | latest | | [OpenList](./src/openlist) | latest | diff --git a/src/lmdeploy/README.md b/src/lmdeploy/README.md index bc650e6..f378158 100644 --- a/src/lmdeploy/README.md +++ b/src/lmdeploy/README.md @@ -6,9 +6,11 @@ 1. (Optional) Configure the model and port in `.env`. 2. Start the service: + ```bash docker compose up -d ``` + 3. Access the OpenAI compatible API at `http://localhost:23333/v1`. ## Configuration diff --git a/src/lmdeploy/README.zh.md b/src/lmdeploy/README.zh.md index cd92b52..91a1a46 100644 --- a/src/lmdeploy/README.zh.md +++ b/src/lmdeploy/README.zh.md @@ -6,9 +6,11 @@ 1. (可选)在 `.env` 中配置模型和端口。 2. 启动服务: + ```bash docker compose up -d ``` + 3. 通过 `http://localhost:23333/v1` 访问与 OpenAI 兼容的 API。 ## 配置项 diff --git a/src/opencode/.env.example b/src/opencode/.env.example new file mode 100644 index 0000000..cdbcec9 --- /dev/null +++ b/src/opencode/.env.example @@ -0,0 +1,31 @@ +# OpenCode Version +OPENCODE_VERSION=1.1.27 + +# Host Port Override +OPENCODE_PORT_OVERRIDE=4096 + +# Project Directory to mount (absolute or relative path) +# This is where OpenCode will perform coding tasks +OPENCODE_PROJECT_DIR=./project + +# Timezone +TZ=UTC + +# LLM Provider API Keys +# You need at least one of these to use OpenCode +ANTHROPIC_API_KEY= +OPENAI_API_KEY= +GEMINI_API_KEY= +DEEPSEEK_API_KEY= +GROQ_API_KEY= +TOGETHER_API_KEY= +MISTRAL_API_KEY= + +# Optional: Inline JSON config content +# OPENCODE_CONFIG_CONTENT={"theme": "opencode", "autoupdate": false} + +# Resource Limits +OPENCODE_CPU_LIMIT=1.0 +OPENCODE_MEMORY_LIMIT=2G +OPENCODE_CPU_RESERVATION=0.25 +OPENCODE_MEMORY_RESERVATION=512M diff --git a/src/opencode/README.md b/src/opencode/README.md new file mode 100644 index 0000000..32cc118 --- /dev/null +++ b/src/opencode/README.md @@ -0,0 +1,42 @@ +# OpenCode + +[English](./README.md) | [中文](./README.zh.md) + +[OpenCode](https://github.com/anomalyco/opencode) is the open source AI coding agent built for the terminal and web. It allows you to use various LLM providers to automate coding tasks in your local or remote projects. + +## Usage + +1. Copy `.env.example` to `.env`. +2. Set your preferred LLM provider API key in `.env` (e.g., `ANTHROPIC_API_KEY`). +3. Set `OPENCODE_PROJECT_DIR` to the path of the project you want the agent to work on. +4. Run the service: + + ```bash + docker compose up -d + ``` + +5. Access the web interface at `http://localhost:4096`. + +## Configuration + +- `OPENCODE_VERSION`: The version of the OpenCode image (default: `1.1.27`). +- `OPENCODE_PORT_OVERRIDE`: The host port to expose the web interface (default: `4096`). +- `OPENCODE_PROJECT_DIR`: Path to the project codebase you want the agent to have access to. +- `ANTHROPIC_API_KEY`: API key for Anthropic Claude models. +- `OPENAI_API_KEY`: API key for OpenAI models. +- `GEMINI_API_KEY`: API key for Google Gemini models. +- `DEEPSEEK_API_KEY`: API key for DeepSeek models. + +## Volumes + +- `opencode_data`: Stores configuration, session data, and cache. +- Mounts the target project directory to `/app`. + +## Resources + +Default limits: + +- CPU: 1.0 +- Memory: 2G + +You can override these in your `.env` file using `OPENCODE_CPU_LIMIT` and `OPENCODE_MEMORY_LIMIT`. diff --git a/src/opencode/README.zh.md b/src/opencode/README.zh.md new file mode 100644 index 0000000..f40ceae --- /dev/null +++ b/src/opencode/README.zh.md @@ -0,0 +1,42 @@ +# OpenCode + +[English](./README.md) | [中文](./README.zh.md) + +[OpenCode](https://github.com/anomalyco/opencode) 是一个为终端和 Web 构建的开源 AI 编程助手。它允许你使用多种大语言模型(LLM)提供商来自动执行本地或远程项目中的编码任务。 + +## 使用方法 + +1. 将 `.env.example` 复制为 `.env`。 +2. 在 `.env` 中设置你偏好的 LLM 提供商 API 密钥(例如 `ANTHROPIC_API_KEY`)。 +3. 将 `OPENCODE_PROJECT_DIR` 设置为你希望助手工作的项目路径。 +4. 启动服务: + + ```bash + docker compose up -d + ``` + +5. 在浏览器中访问 `http://localhost:4096` 进入 Web 界面。 + +## 配置项 + +- `OPENCODE_VERSION`:OpenCode 镜像版本(默认为 `1.1.27`)。 +- `OPENCODE_PORT_OVERRIDE`:映射到宿主机的 Web 端口(默认为 `4096`)。 +- `OPENCODE_PROJECT_DIR`:助手有权访问的项目代码库路径。 +- `ANTHROPIC_API_KEY`:Anthropic Claude 模型的 API 密钥。 +- `OPENAI_API_KEY`:OpenAI 模型的 API 密钥。 +- `GEMINI_API_KEY`:Google Gemini 模型的 API 密钥。 +- `DEEPSEEK_API_KEY`:DeepSeek 模型的 API 密钥。 + +## 数据卷 + +- `opencode_data`:用于存储配置、会话数据和缓存。 +- 将目标项目目录挂载到容器内的 `/app` 路径。 + +## 资源限制 + +默认限制: + +- CPU:1.0 +- 内存:2G + +你可以通过 `.env` 文件中的 `OPENCODE_CPU_LIMIT` 和 `OPENCODE_MEMORY_LIMIT` 来覆盖这些默认值。 diff --git a/src/opencode/docker-compose.yaml b/src/opencode/docker-compose.yaml new file mode 100644 index 0000000..6e0f1b0 --- /dev/null +++ b/src/opencode/docker-compose.yaml @@ -0,0 +1,54 @@ +x-defaults: &defaults + restart: unless-stopped + logging: + driver: json-file + options: + max-size: 100m + max-file: "3" + +services: + opencode: + <<: *defaults + image: ${GLOBAL_REGISTRY:-}ghcr.io/anomalyco/opencode:${OPENCODE_VERSION:-1.1.27} + command: web --hostname 0.0.0.0 --port 4096 + ports: + - "${OPENCODE_PORT_OVERRIDE:-4096}:4096" + volumes: + - opencode_data:/root/.opencode + - ${OPENCODE_PROJECT_DIR:-./project}:/app + environment: + - TZ=${TZ:-UTC} + - ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY:-} + - OPENAI_API_KEY=${OPENAI_API_KEY:-} + - GEMINI_API_KEY=${GEMINI_API_KEY:-} + - DEEPSEEK_API_KEY=${DEEPSEEK_API_KEY:-} + - GROQ_API_KEY=${GROQ_API_KEY:-} + - TOGETHER_API_KEY=${TOGETHER_API_KEY:-} + - MISTRAL_API_KEY=${MISTRAL_API_KEY:-} + - OPENCODE_CONFIG_CONTENT=${OPENCODE_CONFIG_CONTENT:-} + working_dir: /app + healthcheck: + test: + [ + "CMD", + "wget", + "--quiet", + "--tries=1", + "--spider", + "http://localhost:4096/", + ] + interval: 30s + timeout: 10s + retries: 3 + start_period: 30s + deploy: + resources: + limits: + cpus: ${OPENCODE_CPU_LIMIT:-1.0} + memory: ${OPENCODE_MEMORY_LIMIT:-2G} + reservations: + cpus: ${OPENCODE_CPU_RESERVATION:-0.25} + memory: ${OPENCODE_MEMORY_RESERVATION:-512M} + +volumes: + opencode_data: diff --git a/src/openlist/docker-compose.yaml b/src/openlist/docker-compose.yaml index 8297ef5..11663ed 100644 --- a/src/openlist/docker-compose.yaml +++ b/src/openlist/docker-compose.yaml @@ -28,7 +28,15 @@ services: cpus: ${OPENLIST_CPU_RESERVATION:-0.25} memory: ${OPENLIST_MEMORY_RESERVATION:-256M} healthcheck: - test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:5244/"] + test: + [ + "CMD", + "wget", + "--no-verbose", + "--tries=1", + "--spider", + "http://localhost:5244/", + ] interval: 30s timeout: 10s retries: 3 diff --git a/src/opensearch/docker-compose.yaml b/src/opensearch/docker-compose.yaml index 64ffa7c..9c25c3a 100644 --- a/src/opensearch/docker-compose.yaml +++ b/src/opensearch/docker-compose.yaml @@ -40,7 +40,8 @@ services: cpus: ${OPENSEARCH_CPU_RESERVATION:-1.0} memory: ${OPENSEARCH_MEMORY_RESERVATION:-1G} healthcheck: - test: ["CMD-SHELL", "curl -f http://localhost:9200/_cluster/health || exit 1"] + test: + ["CMD-SHELL", "curl -f http://localhost:9200/_cluster/health || exit 1"] interval: 30s timeout: 10s retries: 3 @@ -67,7 +68,15 @@ services: cpus: ${OPENSEARCH_DASHBOARDS_CPU_RESERVATION:-0.5} memory: ${OPENSEARCH_DASHBOARDS_MEMORY_RESERVATION:-512M} healthcheck: - test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:5601/api/status"] + test: + [ + "CMD", + "wget", + "--no-verbose", + "--tries=1", + "--spider", + "http://localhost:5601/api/status", + ] interval: 30s timeout: 10s retries: 3