Compare commits
13 Commits
10313b35e9
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
28ed2462af | ||
|
|
e2ac465417 | ||
|
|
31bcf0c435 | ||
|
|
aeddac52bf | ||
|
|
32742dc535 | ||
|
|
2a010843d1 | ||
|
|
1c528c0e64 | ||
|
|
10aa0a4e85 | ||
|
|
55318da0b2 | ||
|
|
d1a12bc96b | ||
|
|
bba4b43ed6 | ||
|
|
e263ddf084 | ||
|
|
3044032943 |
3
.vscode/extensions.json
vendored
3
.vscode/extensions.json
vendored
@@ -1,6 +1,7 @@
|
||||
{
|
||||
"recommendations": [
|
||||
"yzhang.markdown-all-in-one",
|
||||
"DavidAnson.vscode-markdownlint"
|
||||
"DavidAnson.vscode-markdownlint",
|
||||
"redhat.vscode-yaml"
|
||||
]
|
||||
}
|
||||
|
||||
6
.vscode/settings.json
vendored
6
.vscode/settings.json
vendored
@@ -13,6 +13,12 @@
|
||||
"strings": "off"
|
||||
}
|
||||
},
|
||||
"[yaml]": {
|
||||
"editor.formatOnSave": true
|
||||
},
|
||||
"[dockercompose]": {
|
||||
"editor.formatOnSave": true
|
||||
},
|
||||
"files.eol": "\n",
|
||||
"cSpell.enabled": false
|
||||
}
|
||||
|
||||
251
README.md
251
README.md
@@ -12,129 +12,140 @@ These services require building custom Docker images from source.
|
||||
| [goose](./builds/goose) | 1.18.0 |
|
||||
| [IOPaint](./builds/io-paint) | 1.6.0 |
|
||||
| [K3s inside DinD](./builds/k3s-inside-dind) | 0.2.2 |
|
||||
| [MinerU vLLM](./builds/mineru) | 2.7.1 |
|
||||
| [MinerU vLLM](./builds/mineru) | 2.7.2 |
|
||||
|
||||
## Supported Services
|
||||
|
||||
| Service | Version |
|
||||
| -------------------------------------------------------------- | ------------------- |
|
||||
| [Apache APISIX](./src/apisix) | 3.13.0 |
|
||||
| [Apache Cassandra](./src/cassandra) | 5.0.2 |
|
||||
| [Apache Flink](./src/flink) | 1.20.0 |
|
||||
| [Apache HBase](./src/hbase) | 2.6 |
|
||||
| [Apache HTTP Server](./src/apache) | 2.4.62 |
|
||||
| [Apache Kafka](./src/kafka) | 7.8.0 |
|
||||
| [Apache Pulsar](./src/pulsar) | 4.0.7 |
|
||||
| [Apache RocketMQ](./src/rocketmq) | 5.3.1 |
|
||||
| [Bifrost Gateway](./src/bifrost-gateway) | v1.3.63 |
|
||||
| [Bolt.diy](./apps/bolt-diy) | latest |
|
||||
| [Budibase](./src/budibase) | 3.23.0 |
|
||||
| [BuildingAI](./apps/buildingai) | latest |
|
||||
| [Bytebot](./src/bytebot) | edge |
|
||||
| [Clash](./src/clash) | 1.18.0 |
|
||||
| [ClickHouse](./src/clickhouse) | 24.11.1 |
|
||||
| [Conductor](./src/conductor) | latest |
|
||||
| [Dify](./apps/dify) | 0.18.2 |
|
||||
| [DNSMasq](./src/dnsmasq) | 2.91 |
|
||||
| [Dockge](./src/dockge) | 1 |
|
||||
| [Docker Registry](./src/docker-registry) | 3.0.0 |
|
||||
| [Doris](./src/doris) | 3.0.0 |
|
||||
| [DuckDB](./src/duckdb) | v1.1.3 |
|
||||
| [Easy Dataset](./apps/easy-dataset) | 1.5.1 |
|
||||
| [Elasticsearch](./src/elasticsearch) | 8.16.1 |
|
||||
| [etcd](./src/etcd) | 3.6.0 |
|
||||
| [Firecrawl](./src/firecrawl) | latest |
|
||||
| [frpc](./src/frpc) | 0.65.0 |
|
||||
| [frps](./src/frps) | 0.65.0 |
|
||||
| [Gitea Runner](./src/gitea-runner) | 0.2.13 |
|
||||
| [Gitea](./src/gitea) | 1.24.6 |
|
||||
| [GitLab Runner](./src/gitlab-runner) | 17.10.1 |
|
||||
| [GitLab](./src/gitlab) | 17.10.4-ce.0 |
|
||||
| [GPUStack](./src/gpustack) | v0.5.3 |
|
||||
| [Grafana](./src/grafana) | 12.1.1 |
|
||||
| [Grafana Loki](./src/loki) | 3.3.2 |
|
||||
| [Grafana Tempo](./src/tempo) | 2.7.2 |
|
||||
| [Halo](./src/halo) | 2.21.9 |
|
||||
| [Harbor](./src/harbor) | v2.12.0 |
|
||||
| [HashiCorp Consul](./src/consul) | 1.20.3 |
|
||||
| [Jenkins](./src/jenkins) | 2.486-lts |
|
||||
| [JODConverter](./src/jodconverter) | latest |
|
||||
| [Kestra](./src/kestra) | latest-full |
|
||||
| [Kibana](./src/kibana) | 8.16.1 |
|
||||
| [Kodbox](./src/kodbox) | 1.62 |
|
||||
| [Kong](./src/kong) | 3.8.0 |
|
||||
| [Langflow](./apps/langflow) | latest |
|
||||
| [Langfuse](./apps/langfuse) | 3.115.0 |
|
||||
| [LibreOffice](./src/libreoffice) | latest |
|
||||
| [libSQL Server](./src/libsql) | latest |
|
||||
| [LiteLLM](./src/litellm) | main-stable |
|
||||
| [Logstash](./src/logstash) | 8.16.1 |
|
||||
| [MariaDB Galera Cluster](./src/mariadb-galera) | 11.7.2 |
|
||||
| [Memos](./src/memos) | 0.25.3 |
|
||||
| [Milvus Standalone Embed](./src/milvus-standalone-embed) | v2.6.7 |
|
||||
| [Milvus Standalone](./src/milvus-standalone) | v2.6.7 |
|
||||
| [Minecraft Bedrock Server](./src/minecraft-bedrock-server) | latest |
|
||||
| [MinIO](./src/minio) | 0.20251015 |
|
||||
| [MLflow](./src/mlflow) | v2.20.2 |
|
||||
| [MongoDB ReplicaSet Single](./src/mongodb-replicaset-single) | 8.2.3 |
|
||||
| [MongoDB ReplicaSet](./src/mongodb-replicaset) | 8.2.3 |
|
||||
| [MongoDB Standalone](./src/mongodb-standalone) | 8.2.3 |
|
||||
| [MySQL](./src/mysql) | 9.4.0 |
|
||||
| [n8n](./apps/n8n) | 1.114.0 |
|
||||
| [Nacos](./src/nacos) | v3.1.0 |
|
||||
| [NebulaGraph](./src/nebulagraph) | v3.8.0 |
|
||||
| [NexaSDK](./src/nexa-sdk) | v0.2.62 |
|
||||
| [Neo4j](./src/neo4j) | 5.27.4 |
|
||||
| [Netdata](./src/netdata) | latest |
|
||||
| [Nginx](./src/nginx) | 1.29.1 |
|
||||
| [Node Exporter](./src/node-exporter) | v1.8.2 |
|
||||
| [OceanBase](./src/oceanbase) | 4.3.3 |
|
||||
| [Odoo](./src/odoo) | 19.0 |
|
||||
| [Ollama](./src/ollama) | 0.12.0 |
|
||||
| [Open WebUI](./src/open-webui) | main |
|
||||
| [Phoenix (Arize)](./src/phoenix) | 12.28.1-nonroot |
|
||||
| [Pingora Proxy Manager](./src/pingora-proxy-manager) | v1.0.3 |
|
||||
| [Open WebUI Rust](./src/open-webui-rust) | latest |
|
||||
| [OpenCoze](./apps/opencoze) | See Docs |
|
||||
| [OpenCut](./src/opencut) | latest |
|
||||
| [OpenList](./src/openlist) | latest |
|
||||
| [OpenLIT](./apps/openlit) | latest |
|
||||
| [OpenObserve](./apps/openobserve) | v0.50.0 |
|
||||
| [OpenSearch](./src/opensearch) | 2.19.0 |
|
||||
| [OpenTelemetry Collector](./src/otel-collector) | 0.115.1 |
|
||||
| [Overleaf](./src/overleaf) | 5.2.1 |
|
||||
| [PocketBase](./src/pocketbase) | 0.30.0 |
|
||||
| [Podman](./src/podman) | v5.7.1 |
|
||||
| [Portainer](./src/portainer) | 2.27.3-alpine |
|
||||
| [Portkey AI Gateway](./src/portkey-gateway) | latest |
|
||||
| [PostgreSQL](./src/postgres) | 17.6 |
|
||||
| [Prometheus](./src/prometheus) | 3.5.0 |
|
||||
| [PyTorch](./src/pytorch) | 2.6.0 |
|
||||
| [Qdrant](./src/qdrant) | 1.15.4 |
|
||||
| [RabbitMQ](./src/rabbitmq) | 4.1.4 |
|
||||
| [Ray](./src/ray) | 2.42.1 |
|
||||
| [Redpanda](./src/redpanda) | v24.3.1 |
|
||||
| [Redis Cluster](./src/redis-cluster) | 8.2.1 |
|
||||
| [Redis](./src/redis) | 8.2.1 |
|
||||
| [Renovate](./src/renovate) | 42.52.5-full |
|
||||
| [Restate Cluster](./src/restate-cluster) | 1.5.3 |
|
||||
| [Restate](./src/restate) | 1.5.3 |
|
||||
| [SearXNG](./src/searxng) | 2025.1.20-1ce14ef99 |
|
||||
| [SigNoz](./src/signoz) | 0.55.0 |
|
||||
| [Sim](./apps/sim) | latest |
|
||||
| [Stable Diffusion WebUI](./apps/stable-diffusion-webui-docker) | latest |
|
||||
| [Stirling-PDF](./apps/stirling-pdf) | latest |
|
||||
| [Temporal](./src/temporal) | 1.24.2 |
|
||||
| [TiDB](./src/tidb) | v8.5.0 |
|
||||
| [TiKV](./src/tikv) | v8.5.0 |
|
||||
| [Trigger.dev](./src/trigger-dev) | v4.2.0 |
|
||||
| [TrailBase](./src/trailbase) | 0.22.4 |
|
||||
| [Valkey Cluster](./src/valkey-cluster) | 8.0 |
|
||||
| [Valkey](./src/valkey) | 8.0 |
|
||||
| [Verdaccio](./src/verdaccio) | 6.1.2 |
|
||||
| [vLLM](./src/vllm) | v0.13.0 |
|
||||
| [Windmill](./src/windmill) | main |
|
||||
| [ZooKeeper](./src/zookeeper) | 3.9.3 |
|
||||
| Service | Version |
|
||||
| -------------------------------------------------------------- | -------------------- |
|
||||
| [Apache APISIX](./src/apisix) | 3.13.0 |
|
||||
| [Apache Cassandra](./src/cassandra) | 5.0.2 |
|
||||
| [Apache Flink](./src/flink) | 1.20.0 |
|
||||
| [Apache HBase](./src/hbase) | 2.6 |
|
||||
| [Apache HTTP Server](./src/apache) | 2.4.62 |
|
||||
| [Apache Kafka](./src/kafka) | 7.8.0 |
|
||||
| [Apache Pulsar](./src/pulsar) | 4.0.7 |
|
||||
| [Apache RocketMQ](./src/rocketmq) | 5.3.1 |
|
||||
| [Bifrost Gateway](./src/bifrost-gateway) | v1.3.63 |
|
||||
| [Bolt.diy](./apps/bolt-diy) | latest |
|
||||
| [Budibase](./src/budibase) | 3.23.0 |
|
||||
| [BuildingAI](./apps/buildingai) | latest |
|
||||
| [Bytebot](./src/bytebot) | edge |
|
||||
| [Clash](./src/clash) | 1.18.0 |
|
||||
| [ClickHouse](./src/clickhouse) | 24.11.1 |
|
||||
| [Conductor](./src/conductor) | latest |
|
||||
| [DeepTutor](./apps/deeptutor) | latest |
|
||||
| [Dify](./apps/dify) | 0.18.2 |
|
||||
| [DNSMasq](./src/dnsmasq) | 2.91 |
|
||||
| [Dockge](./src/dockge) | 1 |
|
||||
| [Docker Android Emulator](./src/docker-android) | api-33 |
|
||||
| [Docker Registry](./src/docker-registry) | 3.0.0 |
|
||||
| [Doris](./src/doris) | 3.0.0 |
|
||||
| [DuckDB](./src/duckdb) | v1.1.3 |
|
||||
| [Easy Dataset](./apps/easy-dataset) | 1.5.1 |
|
||||
| [Elasticsearch](./src/elasticsearch) | 8.16.1 |
|
||||
| [etcd](./src/etcd) | 3.6.0 |
|
||||
| [FalkorDB](./src/falkordb) | v4.14.11 |
|
||||
| [Firecrawl](./src/firecrawl) | latest |
|
||||
| [Flowise](./src/flowise) | 3.0.12 |
|
||||
| [frpc](./src/frpc) | 0.65.0 |
|
||||
| [frps](./src/frps) | 0.65.0 |
|
||||
| [Gitea Runner](./src/gitea-runner) | 0.2.13 |
|
||||
| [Gitea](./src/gitea) | 1.25.4-rootless |
|
||||
| [GitLab Runner](./src/gitlab-runner) | 17.10.1 |
|
||||
| [GitLab](./src/gitlab) | 17.10.4-ce.0 |
|
||||
| [GPUStack](./src/gpustack) | v0.5.3 |
|
||||
| [Grafana](./src/grafana) | 12.1.1 |
|
||||
| [Grafana Loki](./src/loki) | 3.3.2 |
|
||||
| [Grafana Tempo](./src/tempo) | 2.7.2 |
|
||||
| [Halo](./src/halo) | 2.21.9 |
|
||||
| [Harbor](./src/harbor) | v2.12.0 |
|
||||
| [HashiCorp Consul](./src/consul) | 1.20.3 |
|
||||
| [InfluxDB](./src/influxdb) | 2.8.0 |
|
||||
| [Jenkins](./src/jenkins) | 2.486-lts |
|
||||
| [JODConverter](./src/jodconverter) | latest |
|
||||
| [Kestra](./src/kestra) | latest-full |
|
||||
| [Kibana](./src/kibana) | 8.16.1 |
|
||||
| [Kodbox](./src/kodbox) | 1.62 |
|
||||
| [Kong](./src/kong) | 3.8.0 |
|
||||
| [Langflow](./apps/langflow) | latest |
|
||||
| [Langfuse](./apps/langfuse) | 3.115.0 |
|
||||
| [LibreOffice](./src/libreoffice) | latest |
|
||||
| [libSQL Server](./src/libsql) | latest |
|
||||
| [LiteLLM](./src/litellm) | main-stable |
|
||||
| [llama.cpp](./src/llama.cpp) | server |
|
||||
| [LMDeploy](./src/lmdeploy) | v0.11.1 |
|
||||
| [Logstash](./src/logstash) | 8.16.1 |
|
||||
| [MariaDB Galera Cluster](./src/mariadb-galera) | 11.7.2 |
|
||||
| [Memos](./src/memos) | 0.25.3 |
|
||||
| [Milvus Standalone Embed](./src/milvus-standalone-embed) | v2.6.7 |
|
||||
| [Milvus Standalone](./src/milvus-standalone) | v2.6.7 |
|
||||
| [Minecraft Bedrock Server](./src/minecraft-bedrock-server) | latest |
|
||||
| [MinIO](./src/minio) | 0.20251015 |
|
||||
| [MLflow](./src/mlflow) | v2.20.2 |
|
||||
| [MoltBot](./apps/moltbot) | main |
|
||||
| [MongoDB ReplicaSet Single](./src/mongodb-replicaset-single) | 8.2.3 |
|
||||
| [MongoDB ReplicaSet](./src/mongodb-replicaset) | 8.2.3 |
|
||||
| [MongoDB Standalone](./src/mongodb-standalone) | 8.2.3 |
|
||||
| [MySQL](./src/mysql) | 9.4.0 |
|
||||
| [n8n](./apps/n8n) | 1.114.0 |
|
||||
| [Nacos](./src/nacos) | v3.1.0 |
|
||||
| [NebulaGraph](./src/nebulagraph) | v3.8.0 |
|
||||
| [NexaSDK](./src/nexa-sdk) | v0.2.62 |
|
||||
| [Neo4j](./src/neo4j) | 5.27.4 |
|
||||
| [Netdata](./src/netdata) | latest |
|
||||
| [Nginx](./src/nginx) | 1.29.1 |
|
||||
| [Node Exporter](./src/node-exporter) | v1.8.2 |
|
||||
| [OceanBase](./src/oceanbase) | 4.3.3 |
|
||||
| [Odoo](./src/odoo) | 19.0 |
|
||||
| [Ollama](./src/ollama) | 0.12.0 |
|
||||
| [Open WebUI](./src/open-webui) | main |
|
||||
| [Phoenix (Arize)](./src/phoenix) | 12.31.2-nonroot |
|
||||
| [Pingora Proxy Manager](./src/pingora-proxy-manager) | v1.0.3 |
|
||||
| [Open WebUI Rust](./src/open-webui-rust) | latest |
|
||||
| [OpenCode](./src/opencode) | 1.1.27 |
|
||||
| [OpenCoze](./apps/opencoze) | See Docs |
|
||||
| [OpenCut](./src/opencut) | latest |
|
||||
| [OpenList](./src/openlist) | latest |
|
||||
| [OpenLIT](./apps/openlit) | latest |
|
||||
| [OpenObserve](./apps/openobserve) | v0.50.0 (enterprise) |
|
||||
| [OpenSearch](./src/opensearch) | 2.19.0 |
|
||||
| [OpenTelemetry Collector](./src/otel-collector) | 0.115.1 |
|
||||
| [Overleaf](./src/overleaf) | 5.2.1 |
|
||||
| [PocketBase](./src/pocketbase) | 0.30.0 |
|
||||
| [Podman](./src/podman) | v5.7.1 |
|
||||
| [Pogocache](./src/pogocache) | 1.3.1 |
|
||||
| [Portainer](./src/portainer) | 2.27.3-alpine |
|
||||
| [Portkey AI Gateway](./src/portkey-gateway) | latest |
|
||||
| [PostgreSQL](./src/postgres) | 17.6 |
|
||||
| [Prometheus](./src/prometheus) | 3.5.0 |
|
||||
| [PyTorch](./src/pytorch) | 2.6.0 |
|
||||
| [Qdrant](./src/qdrant) | 1.15.4 |
|
||||
| [RabbitMQ](./src/rabbitmq) | 4.1.4 |
|
||||
| [Ray](./src/ray) | 2.42.1 |
|
||||
| [Redpanda](./src/redpanda) | v24.3.1 |
|
||||
| [Redis Cluster](./src/redis-cluster) | 8.2.1 |
|
||||
| [Redis](./src/redis) | 8.2.1 |
|
||||
| [Renovate](./src/renovate) | 42.85.4-full |
|
||||
| [Restate Cluster](./src/restate-cluster) | 1.5.3 |
|
||||
| [Restate](./src/restate) | 1.5.3 |
|
||||
| [SearXNG](./src/searxng) | 2025.1.20-1ce14ef99 |
|
||||
| [Selenium](./src/selenium) | 144.0-20260120 |
|
||||
| [SigNoz](./src/signoz) | 0.55.0 |
|
||||
| [Sim](./apps/sim) | latest |
|
||||
| [Stable Diffusion WebUI](./apps/stable-diffusion-webui-docker) | latest |
|
||||
| [Stirling-PDF](./apps/stirling-pdf) | latest |
|
||||
| [Temporal](./src/temporal) | 1.24.2 |
|
||||
| [TiDB](./src/tidb) | v8.5.0 |
|
||||
| [TiKV](./src/tikv) | v8.5.0 |
|
||||
| [Trigger.dev](./src/trigger-dev) | v4.2.0 |
|
||||
| [TrailBase](./src/trailbase) | 0.22.4 |
|
||||
| [Valkey Cluster](./src/valkey-cluster) | 8.0 |
|
||||
| [Valkey](./src/valkey) | 8.0 |
|
||||
| [Verdaccio](./src/verdaccio) | 6.1.2 |
|
||||
| [vLLM](./src/vllm) | v0.13.0 |
|
||||
| [Windmill](./src/windmill) | main |
|
||||
| [ZooKeeper](./src/zookeeper) | 3.9.3 |
|
||||
|
||||
## MCP Servers
|
||||
|
||||
|
||||
251
README.zh.md
251
README.zh.md
@@ -12,129 +12,140 @@ Compose Anything 通过提供一组高质量的 Docker Compose 配置文件,
|
||||
| [goose](./builds/goose) | 1.18.0 |
|
||||
| [IOPaint](./builds/io-paint) | 1.6.0 |
|
||||
| [K3s inside DinD](./builds/k3s-inside-dind) | 0.2.2 |
|
||||
| [MinerU vLLM](./builds/mineru) | 2.7.1 |
|
||||
| [MinerU vLLM](./builds/mineru) | 2.7.2 |
|
||||
|
||||
## 已经支持的服务
|
||||
|
||||
| 服务 | 版本 |
|
||||
| -------------------------------------------------------------- | ------------------- |
|
||||
| [Apache APISIX](./src/apisix) | 3.13.0 |
|
||||
| [Apache Cassandra](./src/cassandra) | 5.0.2 |
|
||||
| [Apache Flink](./src/flink) | 1.20.0 |
|
||||
| [Apache HBase](./src/hbase) | 2.6 |
|
||||
| [Apache HTTP Server](./src/apache) | 2.4.62 |
|
||||
| [Apache Kafka](./src/kafka) | 7.8.0 |
|
||||
| [Apache Pulsar](./src/pulsar) | 4.0.7 |
|
||||
| [Apache RocketMQ](./src/rocketmq) | 5.3.1 |
|
||||
| [Bifrost Gateway](./src/bifrost-gateway) | v1.3.63 |
|
||||
| [Bolt.diy](./apps/bolt-diy) | latest |
|
||||
| [Budibase](./src/budibase) | 3.23.0 |
|
||||
| [BuildingAI](./apps/buildingai) | latest |
|
||||
| [Bytebot](./src/bytebot) | edge |
|
||||
| [Clash](./src/clash) | 1.18.0 |
|
||||
| [ClickHouse](./src/clickhouse) | 24.11.1 |
|
||||
| [Conductor](./src/conductor) | latest |
|
||||
| [Dify](./apps/dify) | 0.18.2 |
|
||||
| [DNSMasq](./src/dnsmasq) | 2.91 |
|
||||
| [Dockge](./src/dockge) | 1 |
|
||||
| [Docker Registry](./src/docker-registry) | 3.0.0 |
|
||||
| [Doris](./src/doris) | 3.0.0 |
|
||||
| [DuckDB](./src/duckdb) | v1.1.3 |
|
||||
| [Easy Dataset](./apps/easy-dataset) | 1.5.1 |
|
||||
| [Elasticsearch](./src/elasticsearch) | 8.16.1 |
|
||||
| [etcd](./src/etcd) | 3.6.0 |
|
||||
| [Firecrawl](./src/firecrawl) | latest |
|
||||
| [frpc](./src/frpc) | 0.65.0 |
|
||||
| [frps](./src/frps) | 0.65.0 |
|
||||
| [Gitea Runner](./src/gitea-runner) | 0.2.13 |
|
||||
| [Gitea](./src/gitea) | 1.24.6 |
|
||||
| [GitLab Runner](./src/gitlab-runner) | 17.10.1 |
|
||||
| [GitLab](./src/gitlab) | 17.10.4-ce.0 |
|
||||
| [GPUStack](./src/gpustack) | v0.5.3 |
|
||||
| [Grafana](./src/grafana) | 12.1.1 |
|
||||
| [Grafana Loki](./src/loki) | 3.3.2 |
|
||||
| [Grafana Tempo](./src/tempo) | 2.7.2 |
|
||||
| [Halo](./src/halo) | 2.21.9 |
|
||||
| [Harbor](./src/harbor) | v2.12.0 |
|
||||
| [HashiCorp Consul](./src/consul) | 1.20.3 |
|
||||
| [Jenkins](./src/jenkins) | 2.486-lts |
|
||||
| [JODConverter](./src/jodconverter) | latest |
|
||||
| [Kestra](./src/kestra) | latest-full |
|
||||
| [Kibana](./src/kibana) | 8.16.1 |
|
||||
| [Kodbox](./src/kodbox) | 1.62 |
|
||||
| [Kong](./src/kong) | 3.8.0 |
|
||||
| [Langflow](./apps/langflow) | latest |
|
||||
| [Langfuse](./apps/langfuse) | 3.115.0 |
|
||||
| [LibreOffice](./src/libreoffice) | latest |
|
||||
| [libSQL Server](./src/libsql) | latest |
|
||||
| [LiteLLM](./src/litellm) | main-stable |
|
||||
| [Logstash](./src/logstash) | 8.16.1 |
|
||||
| [MariaDB Galera Cluster](./src/mariadb-galera) | 11.7.2 |
|
||||
| [Memos](./src/memos) | 0.25.3 |
|
||||
| [Milvus Standalone Embed](./src/milvus-standalone-embed) | v2.6.7 |
|
||||
| [Milvus Standalone](./src/milvus-standalone) | v2.6.7 |
|
||||
| [Minecraft Bedrock Server](./src/minecraft-bedrock-server) | latest |
|
||||
| [MinIO](./src/minio) | 0.20251015 |
|
||||
| [MLflow](./src/mlflow) | v2.20.2 |
|
||||
| [MongoDB ReplicaSet Single](./src/mongodb-replicaset-single) | 8.2.3 |
|
||||
| [MongoDB ReplicaSet](./src/mongodb-replicaset) | 8.2.3 |
|
||||
| [MongoDB Standalone](./src/mongodb-standalone) | 8.2.3 |
|
||||
| [MySQL](./src/mysql) | 9.4.0 |
|
||||
| [n8n](./apps/n8n) | 1.114.0 |
|
||||
| [Nacos](./src/nacos) | v3.1.0 |
|
||||
| [NebulaGraph](./src/nebulagraph) | v3.8.0 |
|
||||
| [NexaSDK](./src/nexa-sdk) | v0.2.62 |
|
||||
| [Neo4j](./src/neo4j) | 5.27.4 |
|
||||
| [Netdata](./src/netdata) | latest |
|
||||
| [Nginx](./src/nginx) | 1.29.1 |
|
||||
| [Node Exporter](./src/node-exporter) | v1.8.2 |
|
||||
| [OceanBase](./src/oceanbase) | 4.3.3 |
|
||||
| [Odoo](./src/odoo) | 19.0 |
|
||||
| [Ollama](./src/ollama) | 0.12.0 |
|
||||
| [Open WebUI](./src/open-webui) | main |
|
||||
| [Phoenix (Arize)](./src/phoenix) | 12.28.1-nonroot |
|
||||
| [Pingora Proxy Manager](./src/pingora-proxy-manager) | v1.0.3 |
|
||||
| [Open WebUI Rust](./src/open-webui-rust) | latest |
|
||||
| [OpenCoze](./apps/opencoze) | See Docs |
|
||||
| [OpenCut](./src/opencut) | latest |
|
||||
| [OpenList](./src/openlist) | latest |
|
||||
| [OpenLIT](./apps/openlit) | latest |
|
||||
| [OpenObserve](./apps/openobserve) | v0.50.0 |
|
||||
| [OpenSearch](./src/opensearch) | 2.19.0 |
|
||||
| [OpenTelemetry Collector](./src/otel-collector) | 0.115.1 |
|
||||
| [Overleaf](./src/overleaf) | 5.2.1 |
|
||||
| [PocketBase](./src/pocketbase) | 0.30.0 |
|
||||
| [Podman](./src/podman) | v5.7.1 |
|
||||
| [Portainer](./src/portainer) | 2.27.3-alpine |
|
||||
| [Portkey AI Gateway](./src/portkey-gateway) | latest |
|
||||
| [PostgreSQL](./src/postgres) | 17.6 |
|
||||
| [Prometheus](./src/prometheus) | 3.5.0 |
|
||||
| [PyTorch](./src/pytorch) | 2.6.0 |
|
||||
| [Qdrant](./src/qdrant) | 1.15.4 |
|
||||
| [RabbitMQ](./src/rabbitmq) | 4.1.4 |
|
||||
| [Ray](./src/ray) | 2.42.1 |
|
||||
| [Redpanda](./src/redpanda) | v24.3.1 |
|
||||
| [Redis Cluster](./src/redis-cluster) | 8.2.1 |
|
||||
| [Redis](./src/redis) | 8.2.1 |
|
||||
| [Renovate](./src/renovate) | 42.52.5-full |
|
||||
| [Restate Cluster](./src/restate-cluster) | 1.5.3 |
|
||||
| [Restate](./src/restate) | 1.5.3 |
|
||||
| [SearXNG](./src/searxng) | 2025.1.20-1ce14ef99 |
|
||||
| [SigNoz](./src/signoz) | 0.55.0 |
|
||||
| [Sim](./apps/sim) | latest |
|
||||
| [Stable Diffusion WebUI](./apps/stable-diffusion-webui-docker) | latest |
|
||||
| [Stirling-PDF](./apps/stirling-pdf) | latest |
|
||||
| [Temporal](./src/temporal) | 1.24.2 |
|
||||
| [TiDB](./src/tidb) | v8.5.0 |
|
||||
| [TiKV](./src/tikv) | v8.5.0 |
|
||||
| [Trigger.dev](./src/trigger-dev) | v4.2.0 |
|
||||
| [TrailBase](./src/trailbase) | 0.22.4 |
|
||||
| [Valkey Cluster](./src/valkey-cluster) | 8.0 |
|
||||
| [Valkey](./src/valkey) | 8.0 |
|
||||
| [Verdaccio](./src/verdaccio) | 6.1.2 |
|
||||
| [vLLM](./src/vllm) | v0.13.0 |
|
||||
| [Windmill](./src/windmill) | main |
|
||||
| [ZooKeeper](./src/zookeeper) | 3.9.3 |
|
||||
| 服务 | 版本 |
|
||||
| -------------------------------------------------------------- | --------------------- |
|
||||
| [Apache APISIX](./src/apisix) | 3.13.0 |
|
||||
| [Apache Cassandra](./src/cassandra) | 5.0.2 |
|
||||
| [Apache Flink](./src/flink) | 1.20.0 |
|
||||
| [Apache HBase](./src/hbase) | 2.6 |
|
||||
| [Apache HTTP Server](./src/apache) | 2.4.62 |
|
||||
| [Apache Kafka](./src/kafka) | 7.8.0 |
|
||||
| [Apache Pulsar](./src/pulsar) | 4.0.7 |
|
||||
| [Apache RocketMQ](./src/rocketmq) | 5.3.1 |
|
||||
| [Bifrost Gateway](./src/bifrost-gateway) | v1.3.63 |
|
||||
| [Bolt.diy](./apps/bolt-diy) | latest |
|
||||
| [Budibase](./src/budibase) | 3.23.0 |
|
||||
| [BuildingAI](./apps/buildingai) | latest |
|
||||
| [Bytebot](./src/bytebot) | edge |
|
||||
| [Clash](./src/clash) | 1.18.0 |
|
||||
| [ClickHouse](./src/clickhouse) | 24.11.1 |
|
||||
| [Conductor](./src/conductor) | latest |
|
||||
| [DeepTutor](./apps/deeptutor) | latest |
|
||||
| [Dify](./apps/dify) | 0.18.2 |
|
||||
| [DNSMasq](./src/dnsmasq) | 2.91 |
|
||||
| [Dockge](./src/dockge) | 1 |
|
||||
| [Docker Android Emulator](./src/docker-android) | api-33 |
|
||||
| [Docker Registry](./src/docker-registry) | 3.0.0 |
|
||||
| [Doris](./src/doris) | 3.0.0 |
|
||||
| [DuckDB](./src/duckdb) | v1.1.3 |
|
||||
| [Easy Dataset](./apps/easy-dataset) | 1.5.1 |
|
||||
| [Elasticsearch](./src/elasticsearch) | 8.16.1 |
|
||||
| [etcd](./src/etcd) | 3.6.0 |
|
||||
| [FalkorDB](./src/falkordb) | v4.14.11 |
|
||||
| [Firecrawl](./src/firecrawl) | latest |
|
||||
| [Flowise](./src/flowise) | 3.0.12 |
|
||||
| [frpc](./src/frpc) | 0.65.0 |
|
||||
| [frps](./src/frps) | 0.65.0 |
|
||||
| [Gitea Runner](./src/gitea-runner) | 0.2.13 |
|
||||
| [Gitea](./src/gitea) | 1.25.4-rootless |
|
||||
| [GitLab Runner](./src/gitlab-runner) | 17.10.1 |
|
||||
| [GitLab](./src/gitlab) | 17.10.4-ce.0 |
|
||||
| [GPUStack](./src/gpustack) | v0.5.3 |
|
||||
| [Grafana](./src/grafana) | 12.1.1 |
|
||||
| [Grafana Loki](./src/loki) | 3.3.2 |
|
||||
| [Grafana Tempo](./src/tempo) | 2.7.2 |
|
||||
| [Halo](./src/halo) | 2.21.9 |
|
||||
| [Harbor](./src/harbor) | v2.12.0 |
|
||||
| [HashiCorp Consul](./src/consul) | 1.20.3 |
|
||||
| [InfluxDB](./src/influxdb) | 2.8.0 |
|
||||
| [Jenkins](./src/jenkins) | 2.486-lts |
|
||||
| [JODConverter](./src/jodconverter) | latest |
|
||||
| [Kestra](./src/kestra) | latest-full |
|
||||
| [Kibana](./src/kibana) | 8.16.1 |
|
||||
| [Kodbox](./src/kodbox) | 1.62 |
|
||||
| [Kong](./src/kong) | 3.8.0 |
|
||||
| [Langflow](./apps/langflow) | latest |
|
||||
| [Langfuse](./apps/langfuse) | 3.115.0 |
|
||||
| [LibreOffice](./src/libreoffice) | latest |
|
||||
| [libSQL Server](./src/libsql) | latest |
|
||||
| [LiteLLM](./src/litellm) | main-stable |
|
||||
| [llama.cpp](./src/llama.cpp) | server |
|
||||
| [LMDeploy](./src/lmdeploy) | v0.11.1 |
|
||||
| [Logstash](./src/logstash) | 8.16.1 |
|
||||
| [MariaDB Galera Cluster](./src/mariadb-galera) | 11.7.2 |
|
||||
| [Memos](./src/memos) | 0.25.3 |
|
||||
| [Milvus Standalone Embed](./src/milvus-standalone-embed) | v2.6.7 |
|
||||
| [Milvus Standalone](./src/milvus-standalone) | v2.6.7 |
|
||||
| [Minecraft Bedrock Server](./src/minecraft-bedrock-server) | latest |
|
||||
| [MinIO](./src/minio) | 0.20251015 |
|
||||
| [MLflow](./src/mlflow) | v2.20.2 |
|
||||
| [MoltBot](./apps/moltbot) | main |
|
||||
| [MongoDB ReplicaSet Single](./src/mongodb-replicaset-single) | 8.2.3 |
|
||||
| [MongoDB ReplicaSet](./src/mongodb-replicaset) | 8.2.3 |
|
||||
| [MongoDB Standalone](./src/mongodb-standalone) | 8.2.3 |
|
||||
| [MySQL](./src/mysql) | 9.4.0 |
|
||||
| [n8n](./apps/n8n) | 1.114.0 |
|
||||
| [Nacos](./src/nacos) | v3.1.0 |
|
||||
| [NebulaGraph](./src/nebulagraph) | v3.8.0 |
|
||||
| [NexaSDK](./src/nexa-sdk) | v0.2.62 |
|
||||
| [Neo4j](./src/neo4j) | 5.27.4 |
|
||||
| [Netdata](./src/netdata) | latest |
|
||||
| [Nginx](./src/nginx) | 1.29.1 |
|
||||
| [Node Exporter](./src/node-exporter) | v1.8.2 |
|
||||
| [OceanBase](./src/oceanbase) | 4.3.3 |
|
||||
| [Odoo](./src/odoo) | 19.0 |
|
||||
| [Ollama](./src/ollama) | 0.12.0 |
|
||||
| [Open WebUI](./src/open-webui) | main |
|
||||
| [Phoenix (Arize)](./src/phoenix) | 12.31.2-nonroot |
|
||||
| [Pingora Proxy Manager](./src/pingora-proxy-manager) | v1.0.3 |
|
||||
| [Open WebUI Rust](./src/open-webui-rust) | latest |
|
||||
| [OpenCode](./src/opencode) | 1.1.27 |
|
||||
| [OpenCoze](./apps/opencoze) | See Docs |
|
||||
| [OpenCut](./src/opencut) | latest |
|
||||
| [OpenList](./src/openlist) | latest |
|
||||
| [OpenLIT](./apps/openlit) | latest |
|
||||
| [OpenObserve](./apps/openobserve) | v0.50.0(enterprise) |
|
||||
| [OpenSearch](./src/opensearch) | 2.19.0 |
|
||||
| [OpenTelemetry Collector](./src/otel-collector) | 0.115.1 |
|
||||
| [Overleaf](./src/overleaf) | 5.2.1 |
|
||||
| [PocketBase](./src/pocketbase) | 0.30.0 |
|
||||
| [Podman](./src/podman) | v5.7.1 |
|
||||
| [Pogocache](./src/pogocache) | 1.3.1 |
|
||||
| [Portainer](./src/portainer) | 2.27.3-alpine |
|
||||
| [Portkey AI Gateway](./src/portkey-gateway) | latest |
|
||||
| [PostgreSQL](./src/postgres) | 17.6 |
|
||||
| [Prometheus](./src/prometheus) | 3.5.0 |
|
||||
| [PyTorch](./src/pytorch) | 2.6.0 |
|
||||
| [Qdrant](./src/qdrant) | 1.15.4 |
|
||||
| [RabbitMQ](./src/rabbitmq) | 4.1.4 |
|
||||
| [Ray](./src/ray) | 2.42.1 |
|
||||
| [Redpanda](./src/redpanda) | v24.3.1 |
|
||||
| [Redis Cluster](./src/redis-cluster) | 8.2.1 |
|
||||
| [Redis](./src/redis) | 8.2.1 |
|
||||
| [Renovate](./src/renovate) | 42.85.4-full |
|
||||
| [Restate Cluster](./src/restate-cluster) | 1.5.3 |
|
||||
| [Restate](./src/restate) | 1.5.3 |
|
||||
| [SearXNG](./src/searxng) | 2025.1.20-1ce14ef99 |
|
||||
| [Selenium](./src/selenium) | 144.0-20260120 |
|
||||
| [SigNoz](./src/signoz) | 0.55.0 |
|
||||
| [Sim](./apps/sim) | latest |
|
||||
| [Stable Diffusion WebUI](./apps/stable-diffusion-webui-docker) | latest |
|
||||
| [Stirling-PDF](./apps/stirling-pdf) | latest |
|
||||
| [Temporal](./src/temporal) | 1.24.2 |
|
||||
| [TiDB](./src/tidb) | v8.5.0 |
|
||||
| [TiKV](./src/tikv) | v8.5.0 |
|
||||
| [Trigger.dev](./src/trigger-dev) | v4.2.0 |
|
||||
| [TrailBase](./src/trailbase) | 0.22.4 |
|
||||
| [Valkey Cluster](./src/valkey-cluster) | 8.0 |
|
||||
| [Valkey](./src/valkey) | 8.0 |
|
||||
| [Verdaccio](./src/verdaccio) | 6.1.2 |
|
||||
| [vLLM](./src/vllm) | v0.13.0 |
|
||||
| [Windmill](./src/windmill) | main |
|
||||
| [ZooKeeper](./src/zookeeper) | 3.9.3 |
|
||||
|
||||
## MCP 服务器
|
||||
|
||||
|
||||
97
apps/deeptutor/.env.example
Normal file
97
apps/deeptutor/.env.example
Normal file
@@ -0,0 +1,97 @@
|
||||
# DeepTutor Configuration
|
||||
# Copy this file to .env and fill in your API keys
|
||||
|
||||
#! ==================================================
|
||||
#! General Settings
|
||||
#! ==================================================
|
||||
|
||||
# Timezone (default: UTC)
|
||||
TZ=UTC
|
||||
|
||||
# User and Group ID for file permissions (default: 1000)
|
||||
# Adjust if your host user has a different UID/GID
|
||||
PUID=1000
|
||||
PGID=1000
|
||||
|
||||
# Global registry prefix (optional)
|
||||
# Example: registry.example.com/ or leave empty for Docker Hub/GHCR
|
||||
GLOBAL_REGISTRY=
|
||||
|
||||
#! ==================================================
|
||||
#! DeepTutor Version
|
||||
#! ==================================================
|
||||
|
||||
# Image version (default: latest)
|
||||
# Available tags: latest, v0.5.x
|
||||
# See: https://github.com/HKUDS/DeepTutor/pkgs/container/deeptutor
|
||||
DEEPTUTOR_VERSION=latest
|
||||
|
||||
#! ==================================================
|
||||
#! Port Configuration
|
||||
#! ==================================================
|
||||
|
||||
# Backend port (internal: 8001)
|
||||
BACKEND_PORT=8001
|
||||
# Host port override for backend
|
||||
DEEPTUTOR_BACKEND_PORT_OVERRIDE=8001
|
||||
|
||||
# Frontend port (internal: 3782)
|
||||
FRONTEND_PORT=3782
|
||||
# Host port override for frontend
|
||||
DEEPTUTOR_FRONTEND_PORT_OVERRIDE=3782
|
||||
|
||||
#! ==================================================
|
||||
#! API Base URLs
|
||||
#! ==================================================
|
||||
|
||||
# Internal API base URL (used by frontend to communicate with backend)
|
||||
NEXT_PUBLIC_API_BASE=http://localhost:8001
|
||||
|
||||
# External API base URL (for cloud deployment, set to your public URL)
|
||||
# Example: https://your-server.com:8001
|
||||
# For local deployment, use the same as NEXT_PUBLIC_API_BASE
|
||||
NEXT_PUBLIC_API_BASE_EXTERNAL=http://localhost:8001
|
||||
|
||||
#! ==================================================
|
||||
#! LLM API Keys (Required)
|
||||
#! ==================================================
|
||||
|
||||
# OpenAI API Key (Required)
|
||||
# Get from: https://platform.openai.com/api-keys
|
||||
OPENAI_API_KEY=sk-your-openai-api-key-here
|
||||
|
||||
# OpenAI Base URL (default: https://api.openai.com/v1)
|
||||
# For OpenAI-compatible APIs (e.g., Azure OpenAI, custom endpoints)
|
||||
OPENAI_BASE_URL=https://api.openai.com/v1
|
||||
|
||||
# Default LLM Model (default: gpt-4o)
|
||||
# Options: gpt-4o, gpt-4-turbo, gpt-4, gpt-3.5-turbo, etc.
|
||||
DEFAULT_MODEL=gpt-4o
|
||||
|
||||
#! ==================================================
|
||||
#! Additional LLM API Keys (Optional)
|
||||
#! ==================================================
|
||||
|
||||
# Anthropic API Key (Optional, for Claude models)
|
||||
# Get from: https://console.anthropic.com/
|
||||
ANTHROPIC_API_KEY=
|
||||
|
||||
# Perplexity API Key (Optional, for web search)
|
||||
# Get from: https://www.perplexity.ai/settings/api
|
||||
PERPLEXITY_API_KEY=
|
||||
|
||||
# DashScope API Key (Optional, for Alibaba Cloud models)
|
||||
# Get from: https://dashscope.console.aliyun.com/
|
||||
DASHSCOPE_API_KEY=
|
||||
|
||||
#! ==================================================
|
||||
#! Resource Limits
|
||||
#! ==================================================
|
||||
|
||||
# CPU limits (default: 4.00 cores limit, 1.00 cores reservation)
|
||||
DEEPTUTOR_CPU_LIMIT=4.00
|
||||
DEEPTUTOR_CPU_RESERVATION=1.00
|
||||
|
||||
# Memory limits (default: 8G limit, 2G reservation)
|
||||
DEEPTUTOR_MEMORY_LIMIT=8G
|
||||
DEEPTUTOR_MEMORY_RESERVATION=2G
|
||||
248
apps/deeptutor/README.md
Normal file
248
apps/deeptutor/README.md
Normal file
@@ -0,0 +1,248 @@
|
||||
# DeepTutor
|
||||
|
||||
[中文说明](README.zh.md) | English
|
||||
|
||||
## Overview
|
||||
|
||||
DeepTutor is an AI-powered personalized learning assistant that transforms any document into an interactive learning experience with multi-agent intelligence. It helps you solve problems, generate questions, conduct research, collaborate on writing, organize notes, and guides you through learning paths.
|
||||
|
||||
**Project:** <https://github.com/HKUDS/DeepTutor>
|
||||
**License:** Apache-2.0
|
||||
**Documentation:** <https://hkuds.github.io/DeepTutor/>
|
||||
|
||||
## Features
|
||||
|
||||
- **Problem Solving** — Detailed step-by-step solutions with visual diagrams
|
||||
- **Question Generation** — Adaptive questions based on your knowledge level
|
||||
- **Research Assistant** — Deep research with multi-agent collaboration
|
||||
- **Co-Writer** — Interactive idea generation and writing assistance
|
||||
- **Smart Notebook** — Organize and retrieve learning materials efficiently
|
||||
- **Guided Learning** — Personalized learning paths and progress tracking
|
||||
- **Multi-Agent System** — Specialized agents for different learning tasks
|
||||
- **RAG Integration** — LightRAG and RAG-Anything for knowledge retrieval
|
||||
- **Code Execution** — Built-in code playground for practice
|
||||
|
||||
## Quick Start
|
||||
|
||||
### Prerequisites
|
||||
|
||||
- Docker and Docker Compose
|
||||
- OpenAI API key (required)
|
||||
- Optional: Anthropic, Perplexity, or DashScope API keys
|
||||
|
||||
### Installation
|
||||
|
||||
1. **Clone this repository**
|
||||
|
||||
```bash
|
||||
git clone <your-compose-anything-repo>
|
||||
cd apps/deeptutor
|
||||
```
|
||||
|
||||
2. **Configure environment**
|
||||
|
||||
```bash
|
||||
cp .env.example .env
|
||||
# Edit .env and add your API keys
|
||||
```
|
||||
|
||||
**Required configuration:**
|
||||
- `OPENAI_API_KEY` — Your OpenAI API key
|
||||
|
||||
**Optional configuration:**
|
||||
- `ANTHROPIC_API_KEY` — For Claude models
|
||||
- `PERPLEXITY_API_KEY` — For web search
|
||||
- `DASHSCOPE_API_KEY` — For Alibaba Cloud models
|
||||
- Adjust ports if needed (default: 8001 for backend, 3782 for frontend)
|
||||
- Set `NEXT_PUBLIC_API_BASE_EXTERNAL` for cloud deployments
|
||||
|
||||
3. **Optional: Custom agent configuration**
|
||||
|
||||
Create a `config/agents.yaml` file to customize agent behaviors (see [documentation](https://hkuds.github.io/DeepTutor/guide/config.html) for details).
|
||||
|
||||
4. **Start the service**
|
||||
|
||||
```bash
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
First run takes approximately 30-60 seconds to initialize.
|
||||
|
||||
5. **Access the application**
|
||||
|
||||
- **Frontend:** <http://localhost:3782>
|
||||
- **Backend API:** <http://localhost:8001>
|
||||
- **API Documentation:** <http://localhost:8001/docs>
|
||||
|
||||
## Usage
|
||||
|
||||
### Create Knowledge Base
|
||||
|
||||
1. Navigate to <http://localhost:3782/knowledge>
|
||||
2. Click "New Knowledge Base"
|
||||
3. Upload documents (supports PDF, DOCX, TXT, Markdown, HTML, etc.)
|
||||
4. Wait for processing to complete
|
||||
|
||||
### Learning Modes
|
||||
|
||||
- **Solve** — Get step-by-step solutions to problems
|
||||
- **Question** — Generate practice questions based on your materials
|
||||
- **Research** — Deep research with multi-agent collaboration
|
||||
- **Co-Writer** — Interactive writing and idea generation
|
||||
- **Notebook** — Organize and manage your learning materials
|
||||
- **Guide** — Follow personalized learning paths
|
||||
|
||||
### Advanced Features
|
||||
|
||||
- **Code Execution** — Practice coding directly in the interface
|
||||
- **Visual Diagrams** — Automatic diagram generation for complex concepts
|
||||
- **Export** — Download your work as PDF or Markdown
|
||||
- **Multi-language** — Support for multiple languages
|
||||
|
||||
## Configuration
|
||||
|
||||
### Environment Variables
|
||||
|
||||
Key environment variables (see [.env.example](.env.example) for all options):
|
||||
|
||||
| Variable | Default | Description |
|
||||
| ------------------------ | ---------- | ------------------------- |
|
||||
| `OPENAI_API_KEY` | (required) | Your OpenAI API key |
|
||||
| `DEFAULT_MODEL` | `gpt-4o` | Default LLM model |
|
||||
| `BACKEND_PORT` | `8001` | Backend server port |
|
||||
| `FRONTEND_PORT` | `3782` | Frontend application port |
|
||||
| `DEEPTUTOR_CPU_LIMIT` | `4.00` | CPU limit (cores) |
|
||||
| `DEEPTUTOR_MEMORY_LIMIT` | `8G` | Memory limit |
|
||||
|
||||
### Ports
|
||||
|
||||
- **8001** — Backend API server
|
||||
- **3782** — Frontend web interface
|
||||
|
||||
### Volumes
|
||||
|
||||
- `deeptutor_data` — User data, knowledge bases, and learning materials
|
||||
- `./config` — Custom agent configurations (optional)
|
||||
|
||||
## Resource Requirements
|
||||
|
||||
**Minimum:**
|
||||
|
||||
- CPU: 1 core
|
||||
- Memory: 2GB
|
||||
- Disk: 2GB + space for knowledge bases
|
||||
|
||||
**Recommended:**
|
||||
|
||||
- CPU: 4 cores
|
||||
- Memory: 8GB
|
||||
- Disk: 10GB+
|
||||
|
||||
## Supported Models
|
||||
|
||||
DeepTutor supports multiple LLM providers:
|
||||
|
||||
- **OpenAI** — GPT-4, GPT-4 Turbo, GPT-3.5 Turbo
|
||||
- **Anthropic** — Claude 3 (Opus, Sonnet, Haiku)
|
||||
- **Perplexity** — For web search integration
|
||||
- **DashScope** — Alibaba Cloud models
|
||||
- **OpenAI-compatible APIs** — Any API compatible with OpenAI format
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Backend fails to start
|
||||
|
||||
- Verify `OPENAI_API_KEY` is set correctly in `.env`
|
||||
- Check logs: `docker compose logs -f`
|
||||
- Ensure ports 8001 and 3782 are not in use
|
||||
- Verify sufficient disk space for volumes
|
||||
|
||||
### Frontend cannot connect to backend
|
||||
|
||||
- Confirm backend is running: visit <http://localhost:8001/docs>
|
||||
- For cloud deployments, set `NEXT_PUBLIC_API_BASE_EXTERNAL` to your public URL
|
||||
- Check firewall settings
|
||||
|
||||
### Knowledge base processing fails
|
||||
|
||||
- Ensure sufficient memory (recommended 8GB+)
|
||||
- Check document format is supported
|
||||
- Review logs for specific errors
|
||||
|
||||
### API rate limits
|
||||
|
||||
- Monitor your API usage on provider dashboards
|
||||
- Consider upgrading your API plan
|
||||
- Use different models for different tasks
|
||||
|
||||
## Security Notes
|
||||
|
||||
- **API Keys** — Keep your API keys secure, never commit them to version control
|
||||
- **Network Exposure** — For production deployments, use HTTPS and proper authentication
|
||||
- **Data Privacy** — User data is stored in Docker volumes; ensure proper backup and security
|
||||
- **Resource Limits** — Set appropriate CPU and memory limits to prevent resource exhaustion
|
||||
|
||||
## Updates
|
||||
|
||||
To update to the latest version:
|
||||
|
||||
```bash
|
||||
# Pull the latest image
|
||||
docker compose pull
|
||||
|
||||
# Recreate containers
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
To update to a specific version, edit `DEEPTUTOR_VERSION` in `.env` and run:
|
||||
|
||||
```bash
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
## Advanced Usage
|
||||
|
||||
### Custom Agent Configuration
|
||||
|
||||
Create `config/agents.yaml` to customize agent behaviors:
|
||||
|
||||
```yaml
|
||||
agents:
|
||||
solver:
|
||||
model: gpt-4o
|
||||
temperature: 0.7
|
||||
researcher:
|
||||
model: gpt-4-turbo
|
||||
max_tokens: 4000
|
||||
```
|
||||
|
||||
See [official documentation](https://hkuds.github.io/DeepTutor/guide/config.html) for detailed configuration options.
|
||||
|
||||
### Cloud Deployment
|
||||
|
||||
For cloud deployment, additional configuration is needed:
|
||||
|
||||
1. Set public URL in `.env`:
|
||||
|
||||
```env
|
||||
NEXT_PUBLIC_API_BASE_EXTERNAL=https://your-domain.com:8001
|
||||
```
|
||||
|
||||
2. Configure reverse proxy (nginx/Caddy) for HTTPS
|
||||
3. Ensure proper firewall rules
|
||||
4. Consider using environment-specific secrets management
|
||||
|
||||
### Using Different Embedding Models
|
||||
|
||||
DeepTutor uses `text-embedding-3-large` by default. To use different embedding models, refer to the [official documentation](https://hkuds.github.io/DeepTutor/guide/config.html).
|
||||
|
||||
## Links
|
||||
|
||||
- **GitHub:** <https://github.com/HKUDS/DeepTutor>
|
||||
- **Documentation:** <https://hkuds.github.io/DeepTutor/>
|
||||
- **Issues:** <https://github.com/HKUDS/DeepTutor/issues>
|
||||
- **Discussions:** <https://github.com/HKUDS/DeepTutor/discussions>
|
||||
|
||||
## License
|
||||
|
||||
DeepTutor is licensed under the Apache-2.0 License. See the [official repository](https://github.com/HKUDS/DeepTutor) for details.
|
||||
248
apps/deeptutor/README.zh.md
Normal file
248
apps/deeptutor/README.zh.md
Normal file
@@ -0,0 +1,248 @@
|
||||
# DeepTutor
|
||||
|
||||
中文说明 | [English](README.md)
|
||||
|
||||
## 概述
|
||||
|
||||
DeepTutor 是一个 AI 驱动的个性化学习助手,通过多智能体系统将任何文档转化为交互式学习体验。它可以帮助您解决问题、生成题目、进行研究、协作写作、整理笔记,并引导您完成学习路径。
|
||||
|
||||
**项目地址:** <https://github.com/HKUDS/DeepTutor>
|
||||
**许可证:** Apache-2.0
|
||||
**文档:** <https://hkuds.github.io/DeepTutor/>
|
||||
|
||||
## 功能特性
|
||||
|
||||
- **问题求解** — 提供详细的分步解决方案和可视化图表
|
||||
- **题目生成** — 根据您的知识水平生成自适应题目
|
||||
- **研究助手** — 通过多智能体协作进行深度研究
|
||||
- **协作写作** — 交互式创意生成和写作辅助
|
||||
- **智能笔记** — 高效组织和检索学习材料
|
||||
- **引导学习** — 个性化学习路径和进度跟踪
|
||||
- **多智能体系统** — 针对不同学习任务的专业智能体
|
||||
- **RAG 集成** — 使用 LightRAG 和 RAG-Anything 进行知识检索
|
||||
- **代码执行** — 内置代码练习环境
|
||||
|
||||
## 快速开始
|
||||
|
||||
### 前置要求
|
||||
|
||||
- Docker 和 Docker Compose
|
||||
- OpenAI API 密钥(必需)
|
||||
- 可选:Anthropic、Perplexity 或 DashScope API 密钥
|
||||
|
||||
### 安装步骤
|
||||
|
||||
1. **克隆仓库**
|
||||
|
||||
```bash
|
||||
git clone <your-compose-anything-repo>
|
||||
cd apps/deeptutor
|
||||
```
|
||||
|
||||
2. **配置环境变量**
|
||||
|
||||
```bash
|
||||
cp .env.example .env
|
||||
# 编辑 .env 文件并添加您的 API 密钥
|
||||
```
|
||||
|
||||
**必需配置:**
|
||||
- `OPENAI_API_KEY` — 您的 OpenAI API 密钥
|
||||
|
||||
**可选配置:**
|
||||
- `ANTHROPIC_API_KEY` — 用于 Claude 模型
|
||||
- `PERPLEXITY_API_KEY` — 用于网络搜索
|
||||
- `DASHSCOPE_API_KEY` — 用于阿里云模型
|
||||
- 如需调整端口(默认:后端 8001,前端 3782)
|
||||
- 云端部署时设置 `NEXT_PUBLIC_API_BASE_EXTERNAL`
|
||||
|
||||
3. **可选:自定义智能体配置**
|
||||
|
||||
创建 `config/agents.yaml` 文件以自定义智能体行为(详见[文档](https://hkuds.github.io/DeepTutor/guide/config.html))。
|
||||
|
||||
4. **启动服务**
|
||||
|
||||
```bash
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
首次运行需要约 30-60 秒初始化。
|
||||
|
||||
5. **访问应用**
|
||||
|
||||
- **前端界面:** <http://localhost:3782>
|
||||
- **后端 API:** <http://localhost:8001>
|
||||
- **API 文档:** <http://localhost:8001/docs>
|
||||
|
||||
## 使用方法
|
||||
|
||||
### 创建知识库
|
||||
|
||||
1. 访问 <http://localhost:3782/knowledge>
|
||||
2. 点击"新建知识库"
|
||||
3. 上传文档(支持 PDF、DOCX、TXT、Markdown、HTML 等)
|
||||
4. 等待处理完成
|
||||
|
||||
### 学习模式
|
||||
|
||||
- **求解(Solve)** — 获取问题的分步解决方案
|
||||
- **题目(Question)** — 基于学习材料生成练习题
|
||||
- **研究(Research)** — 通过多智能体协作进行深度研究
|
||||
- **协作写作(Co-Writer)** — 交互式写作和创意生成
|
||||
- **笔记(Notebook)** — 组织和管理学习材料
|
||||
- **引导(Guide)** — 遵循个性化学习路径
|
||||
|
||||
### 高级功能
|
||||
|
||||
- **代码执行** — 在界面中直接练习编码
|
||||
- **可视化图表** — 为复杂概念自动生成图表
|
||||
- **导出** — 将您的工作下载为 PDF 或 Markdown
|
||||
- **多语言支持** — 支持多种语言
|
||||
|
||||
## 配置说明
|
||||
|
||||
### 环境变量
|
||||
|
||||
主要环境变量(所有选项见 [.env.example](.env.example)):
|
||||
|
||||
| 变量 | 默认值 | 描述 |
|
||||
| ------------------------ | -------- | -------------------- |
|
||||
| `OPENAI_API_KEY` | (必需) | 您的 OpenAI API 密钥 |
|
||||
| `DEFAULT_MODEL` | `gpt-4o` | 默认 LLM 模型 |
|
||||
| `BACKEND_PORT` | `8001` | 后端服务器端口 |
|
||||
| `FRONTEND_PORT` | `3782` | 前端应用端口 |
|
||||
| `DEEPTUTOR_CPU_LIMIT` | `4.00` | CPU 限制(核心数) |
|
||||
| `DEEPTUTOR_MEMORY_LIMIT` | `8G` | 内存限制 |
|
||||
|
||||
### 端口说明
|
||||
|
||||
- **8001** — 后端 API 服务器
|
||||
- **3782** — 前端 Web 界面
|
||||
|
||||
### 数据卷
|
||||
|
||||
- `deeptutor_data` — 用户数据、知识库和学习材料
|
||||
- `./config` — 自定义智能体配置(可选)
|
||||
|
||||
## 资源要求
|
||||
|
||||
**最低配置:**
|
||||
|
||||
- CPU:1 核心
|
||||
- 内存:2GB
|
||||
- 磁盘:2GB + 知识库所需空间
|
||||
|
||||
**推荐配置:**
|
||||
|
||||
- CPU:4 核心
|
||||
- 内存:8GB
|
||||
- 磁盘:10GB+
|
||||
|
||||
## 支持的模型
|
||||
|
||||
DeepTutor 支持多个 LLM 提供商:
|
||||
|
||||
- **OpenAI** — GPT-4、GPT-4 Turbo、GPT-3.5 Turbo
|
||||
- **Anthropic** — Claude 3(Opus、Sonnet、Haiku)
|
||||
- **Perplexity** — 用于网络搜索集成
|
||||
- **DashScope** — 阿里云模型
|
||||
- **OpenAI 兼容 API** — 任何与 OpenAI 格式兼容的 API
|
||||
|
||||
## 故障排查
|
||||
|
||||
### 后端启动失败
|
||||
|
||||
- 验证 `.env` 中的 `OPENAI_API_KEY` 是否正确设置
|
||||
- 查看日志:`docker compose logs -f`
|
||||
- 确保端口 8001 和 3782 未被占用
|
||||
- 验证数据卷有足够的磁盘空间
|
||||
|
||||
### 前端无法连接后端
|
||||
|
||||
- 确认后端正在运行:访问 <http://localhost:8001/docs>
|
||||
- 云端部署时,将 `NEXT_PUBLIC_API_BASE_EXTERNAL` 设置为您的公网 URL
|
||||
- 检查防火墙设置
|
||||
|
||||
### 知识库处理失败
|
||||
|
||||
- 确保有足够的内存(推荐 8GB+)
|
||||
- 检查文档格式是否支持
|
||||
- 查看日志了解具体错误
|
||||
|
||||
### API 速率限制
|
||||
|
||||
- 在提供商控制台监控 API 使用情况
|
||||
- 考虑升级 API 计划
|
||||
- 为不同任务使用不同模型
|
||||
|
||||
## 安全提示
|
||||
|
||||
- **API 密钥** — 妥善保管您的 API 密钥,切勿提交到版本控制系统
|
||||
- **网络暴露** — 生产环境部署时,使用 HTTPS 和适当的身份验证
|
||||
- **数据隐私** — 用户数据存储在 Docker 卷中,请确保适当的备份和安全措施
|
||||
- **资源限制** — 设置合适的 CPU 和内存限制以防止资源耗尽
|
||||
|
||||
## 更新
|
||||
|
||||
更新到最新版本:
|
||||
|
||||
```bash
|
||||
# 拉取最新镜像
|
||||
docker compose pull
|
||||
|
||||
# 重新创建容器
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
更新到特定版本,编辑 `.env` 中的 `DEEPTUTOR_VERSION` 并运行:
|
||||
|
||||
```bash
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
## 高级用法
|
||||
|
||||
### 自定义智能体配置
|
||||
|
||||
创建 `config/agents.yaml` 以自定义智能体行为:
|
||||
|
||||
```yaml
|
||||
agents:
|
||||
solver:
|
||||
model: gpt-4o
|
||||
temperature: 0.7
|
||||
researcher:
|
||||
model: gpt-4-turbo
|
||||
max_tokens: 4000
|
||||
```
|
||||
|
||||
详细配置选项请参见[官方文档](https://hkuds.github.io/DeepTutor/guide/config.html)。
|
||||
|
||||
### 云端部署
|
||||
|
||||
云端部署需要额外配置:
|
||||
|
||||
1. 在 `.env` 中设置公网 URL:
|
||||
|
||||
```env
|
||||
NEXT_PUBLIC_API_BASE_EXTERNAL=https://your-domain.com:8001
|
||||
```
|
||||
|
||||
2. 配置反向代理(nginx/Caddy)以支持 HTTPS
|
||||
3. 确保适当的防火墙规则
|
||||
4. 考虑使用特定环境的密钥管理
|
||||
|
||||
### 使用不同的嵌入模型
|
||||
|
||||
DeepTutor 默认使用 `text-embedding-3-large`。要使用不同的嵌入模型,请参考[官方文档](https://hkuds.github.io/DeepTutor/guide/config.html)。
|
||||
|
||||
## 相关链接
|
||||
|
||||
- **GitHub:** <https://github.com/HKUDS/DeepTutor>
|
||||
- **文档:** <https://hkuds.github.io/DeepTutor/>
|
||||
- **问题反馈:** <https://github.com/HKUDS/DeepTutor/issues>
|
||||
- **讨论区:** <https://github.com/HKUDS/DeepTutor/discussions>
|
||||
|
||||
## 许可证
|
||||
|
||||
DeepTutor 使用 Apache-2.0 许可证。详情请参见[官方仓库](https://github.com/HKUDS/DeepTutor)。
|
||||
68
apps/deeptutor/docker-compose.yaml
Normal file
68
apps/deeptutor/docker-compose.yaml
Normal file
@@ -0,0 +1,68 @@
|
||||
# DeepTutor: AI-Powered Personalized Learning Assistant
|
||||
# https://github.com/HKUDS/DeepTutor
|
||||
# Transform any document into an interactive learning experience with multi-agent intelligence
|
||||
|
||||
x-defaults: &defaults
|
||||
restart: unless-stopped
|
||||
logging:
|
||||
driver: json-file
|
||||
options:
|
||||
max-size: 100m
|
||||
max-file: "3"
|
||||
|
||||
services:
|
||||
deeptutor:
|
||||
<<: *defaults
|
||||
image: ${GLOBAL_REGISTRY:-ghcr.io}/hkuds/deeptutor:${DEEPTUTOR_VERSION:-latest}
|
||||
ports:
|
||||
- "${DEEPTUTOR_BACKEND_PORT_OVERRIDE:-8001}:${BACKEND_PORT:-8001}"
|
||||
- "${DEEPTUTOR_FRONTEND_PORT_OVERRIDE:-3782}:${FRONTEND_PORT:-3782}"
|
||||
volumes:
|
||||
- deeptutor_data:/app/data
|
||||
- ./config:/app/config:ro
|
||||
environment:
|
||||
- TZ=${TZ:-UTC}
|
||||
# Backend port
|
||||
- BACKEND_PORT=${BACKEND_PORT:-8001}
|
||||
# Frontend port
|
||||
- FRONTEND_PORT=${FRONTEND_PORT:-3782}
|
||||
# API base URLs
|
||||
- NEXT_PUBLIC_API_BASE=${NEXT_PUBLIC_API_BASE:-http://localhost:8001}
|
||||
- NEXT_PUBLIC_API_BASE_EXTERNAL=${NEXT_PUBLIC_API_BASE_EXTERNAL:-http://localhost:8001}
|
||||
# LLM API Keys
|
||||
- OPENAI_API_KEY=${OPENAI_API_KEY}
|
||||
- OPENAI_BASE_URL=${OPENAI_BASE_URL:-https://api.openai.com/v1}
|
||||
- ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY:-}
|
||||
- PERPLEXITY_API_KEY=${PERPLEXITY_API_KEY:-}
|
||||
- DASHSCOPE_API_KEY=${DASHSCOPE_API_KEY:-}
|
||||
# Default LLM model
|
||||
- DEFAULT_MODEL=${DEFAULT_MODEL:-gpt-4o}
|
||||
# User ID and Group ID for permission management
|
||||
- PUID=${PUID:-1000}
|
||||
- PGID=${PGID:-1000}
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"curl",
|
||||
"-f",
|
||||
"http://localhost:${BACKEND_PORT:-8001}/health",
|
||||
"||",
|
||||
"exit",
|
||||
"1",
|
||||
]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 60s
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
cpus: ${DEEPTUTOR_CPU_LIMIT:-4.00}
|
||||
memory: ${DEEPTUTOR_MEMORY_LIMIT:-8G}
|
||||
reservations:
|
||||
cpus: ${DEEPTUTOR_CPU_RESERVATION:-1.00}
|
||||
memory: ${DEEPTUTOR_MEMORY_RESERVATION:-2G}
|
||||
|
||||
volumes:
|
||||
deeptutor_data:
|
||||
54
apps/moltbot/.env.example
Normal file
54
apps/moltbot/.env.example
Normal file
@@ -0,0 +1,54 @@
|
||||
# MoltBot Environment Configuration
|
||||
# Copy this file to .env and configure the values
|
||||
|
||||
# Timezone (default: UTC)
|
||||
TZ=UTC
|
||||
|
||||
# Global container registry prefix (optional)
|
||||
# Examples: docker.io/, ghcr.io/, your-registry.com/
|
||||
GLOBAL_REGISTRY=
|
||||
|
||||
# MoltBot Version
|
||||
# Use 'main' for latest, or specific version tag like 'v2026.1.27'
|
||||
MOLTBOT_VERSION=main
|
||||
|
||||
# === Gateway Configuration ===
|
||||
# Gateway access token (REQUIRED - generate a secure random token)
|
||||
# Example: openssl rand -hex 32
|
||||
MOLTBOT_GATEWAY_TOKEN=your-secure-token-here
|
||||
|
||||
# Gateway bind address
|
||||
# Options: loopback (127.0.0.1), lan (0.0.0.0 for LAN access)
|
||||
MOLTBOT_GATEWAY_BIND=lan
|
||||
|
||||
# Gateway internal port (default: 18789)
|
||||
MOLTBOT_GATEWAY_PORT=18789
|
||||
|
||||
# Gateway host port override (default: 18789)
|
||||
MOLTBOT_GATEWAY_PORT_OVERRIDE=18789
|
||||
|
||||
# Bridge port override (default: 18790)
|
||||
MOLTBOT_BRIDGE_PORT_OVERRIDE=18790
|
||||
|
||||
# === Model API Keys (Optional - if not using OAuth) ===
|
||||
# Anthropic Claude API Key
|
||||
ANTHROPIC_API_KEY=
|
||||
|
||||
# OpenAI API Key
|
||||
OPENAI_API_KEY=
|
||||
|
||||
# Claude AI Session Keys (for web session auth)
|
||||
CLAUDE_AI_SESSION_KEY=
|
||||
CLAUDE_WEB_SESSION_KEY=
|
||||
CLAUDE_WEB_COOKIE=
|
||||
|
||||
# === Resource Limits ===
|
||||
# Gateway service resource limits
|
||||
MOLTBOT_CPU_LIMIT=2.0
|
||||
MOLTBOT_MEMORY_LIMIT=2G
|
||||
MOLTBOT_CPU_RESERVATION=1.0
|
||||
MOLTBOT_MEMORY_RESERVATION=1G
|
||||
|
||||
# CLI service resource limits
|
||||
MOLTBOT_CLI_CPU_LIMIT=1.0
|
||||
MOLTBOT_CLI_MEMORY_LIMIT=512M
|
||||
214
apps/moltbot/README.md
Normal file
214
apps/moltbot/README.md
Normal file
@@ -0,0 +1,214 @@
|
||||
# MoltBot
|
||||
|
||||
MoltBot is a personal AI assistant that runs on your own devices. It integrates with multiple messaging platforms (WhatsApp, Telegram, Slack, Discord, Google Chat, Signal, iMessage, Microsoft Teams, WebChat) and provides AI-powered assistance across all your channels.
|
||||
|
||||
## Features
|
||||
|
||||
- **Multi-channel Support**: WhatsApp, Telegram, Slack, Discord, Google Chat, Signal, iMessage, BlueBubbles, Microsoft Teams, Matrix, Zalo, WebChat
|
||||
- **Local-first Gateway**: Single control plane for sessions, channels, tools, and events
|
||||
- **Multi-agent Routing**: Route inbound channels to isolated agents with per-agent sessions
|
||||
- **Voice Wake + Talk Mode**: Always-on speech for macOS/iOS/Android with ElevenLabs
|
||||
- **Live Canvas**: Agent-driven visual workspace with A2UI
|
||||
- **First-class Tools**: Browser, canvas, nodes, cron, sessions, and channel-specific actions
|
||||
- **Companion Apps**: macOS menu bar app + iOS/Android nodes
|
||||
- **Skills Platform**: Bundled, managed, and workspace skills with install gating
|
||||
|
||||
## Quick Start
|
||||
|
||||
1. Copy the example environment file:
|
||||
|
||||
```bash
|
||||
cp .env.example .env
|
||||
```
|
||||
|
||||
2. Generate a secure gateway token:
|
||||
|
||||
```bash
|
||||
# Using OpenSSL
|
||||
openssl rand -hex 32
|
||||
|
||||
# Or using Python
|
||||
python3 -c "import secrets; print(secrets.token_hex(32))"
|
||||
```
|
||||
|
||||
3. Edit `.env` and set at least:
|
||||
- `MOLTBOT_GATEWAY_TOKEN` - Your generated token
|
||||
- `ANTHROPIC_API_KEY` or `OPENAI_API_KEY` - If using API key auth
|
||||
|
||||
4. Start the gateway:
|
||||
|
||||
```bash
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
5. Access the Control UI:
|
||||
- Open <http://localhost:18789> in your browser
|
||||
- Enter your gateway token when prompted
|
||||
|
||||
## Configuration
|
||||
|
||||
### Gateway Access
|
||||
|
||||
The gateway can be accessed in two ways:
|
||||
|
||||
- **Loopback** (`MOLTBOT_GATEWAY_BIND=loopback`): Only accessible from the host machine (127.0.0.1)
|
||||
- **LAN** (`MOLTBOT_GATEWAY_BIND=lan`): Accessible from your local network (0.0.0.0)
|
||||
|
||||
For production deployments, consider:
|
||||
|
||||
- Using Tailscale Serve/Funnel for secure remote access
|
||||
- Setting up SSH tunnels
|
||||
- Implementing reverse proxy with authentication
|
||||
|
||||
### Model Configuration
|
||||
|
||||
MoltBot supports multiple AI model providers:
|
||||
|
||||
- **Anthropic Claude** (Recommended): Claude Pro/Max with OAuth or API key
|
||||
- **OpenAI**: ChatGPT/Codex with OAuth or API key
|
||||
- **Custom Providers**: Configure via the Control UI or config file
|
||||
|
||||
Set API keys in `.env` or use OAuth authentication through the onboarding wizard.
|
||||
|
||||
### Channel Integration
|
||||
|
||||
To connect messaging platforms:
|
||||
|
||||
1. **WhatsApp**: Use the CLI to link device
|
||||
|
||||
```bash
|
||||
docker compose run --rm moltbot-cli channels login
|
||||
```
|
||||
|
||||
2. **Telegram**: Set `TELEGRAM_BOT_TOKEN` in config
|
||||
|
||||
3. **Discord**: Set `DISCORD_BOT_TOKEN` in config
|
||||
|
||||
4. **Slack**: Set `SLACK_BOT_TOKEN` and `SLACK_APP_TOKEN` in config
|
||||
|
||||
See the [official documentation](https://docs.molt.bot/channels) for detailed setup instructions.
|
||||
|
||||
## Using the CLI
|
||||
|
||||
The CLI service is available via the `cli` profile:
|
||||
|
||||
```bash
|
||||
# Run onboarding wizard
|
||||
docker compose run --rm --service-ports moltbot-cli onboard
|
||||
|
||||
# List providers
|
||||
docker compose run --rm moltbot-cli providers list
|
||||
|
||||
# Send a message
|
||||
docker compose run --rm moltbot-cli message send --to +1234567890 --message "Hello"
|
||||
|
||||
# Check health
|
||||
docker compose run --rm moltbot-cli health --port 18789
|
||||
```
|
||||
|
||||
## Security Considerations
|
||||
|
||||
1. **Gateway Token**: Keep your gateway token secure. This is the authentication method for the Control UI and WebSocket connections.
|
||||
|
||||
2. **DM Access**: By default, MoltBot uses pairing mode for direct messages from unknown senders. They receive a pairing code that you must approve.
|
||||
|
||||
3. **Network Exposure**: If exposing the gateway beyond localhost, use proper authentication and encryption:
|
||||
- Set up Tailscale for secure remote access
|
||||
- Use SSH tunnels
|
||||
- Implement reverse proxy with HTTPS and authentication
|
||||
|
||||
4. **API Keys**: Never commit API keys to version control. Use `.env` file or secrets management.
|
||||
|
||||
5. **Sandbox Mode**: For group/channel safety, enable sandbox mode to run non-main sessions in Docker containers.
|
||||
|
||||
## Advanced Configuration
|
||||
|
||||
### Resource Limits
|
||||
|
||||
Adjust CPU and memory limits in `.env`:
|
||||
|
||||
```env
|
||||
MOLTBOT_CPU_LIMIT=2.0
|
||||
MOLTBOT_MEMORY_LIMIT=2G
|
||||
MOLTBOT_CPU_RESERVATION=1.0
|
||||
MOLTBOT_MEMORY_RESERVATION=1G
|
||||
```
|
||||
|
||||
### Persistent Data
|
||||
|
||||
Data is stored in two Docker volumes:
|
||||
|
||||
- `moltbot_config`: Configuration files and credentials (~/.clawdbot)
|
||||
- `moltbot_workspace`: Agent workspace and skills (~/clawd)
|
||||
|
||||
To backup your data:
|
||||
|
||||
```bash
|
||||
docker run --rm -v moltbot_config:/data -v $(pwd):/backup alpine tar czf /backup/moltbot-config-backup.tar.gz /data
|
||||
docker run --rm -v moltbot_workspace:/data -v $(pwd):/backup alpine tar czf /backup/moltbot-workspace-backup.tar.gz /data
|
||||
```
|
||||
|
||||
### Custom Configuration File
|
||||
|
||||
Create a custom config file at `~/.clawdbot/moltbot.json` (inside the container):
|
||||
|
||||
```json
|
||||
{
|
||||
"agents": {
|
||||
"defaults": {
|
||||
"model": {
|
||||
"primary": "anthropic/claude-opus-4-5",
|
||||
"fallbacks": ["anthropic/claude-sonnet-4-5", "openai/gpt-4o"]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Gateway Won't Start
|
||||
|
||||
1. Check logs: `docker compose logs moltbot-gateway`
|
||||
2. Verify gateway token is set in `.env`
|
||||
3. Ensure port 18789 is not already in use
|
||||
|
||||
### Can't Access Control UI
|
||||
|
||||
1. Verify gateway bind setting matches your access method
|
||||
2. Check firewall rules if accessing from another machine
|
||||
3. Ensure container is healthy: `docker compose ps`
|
||||
|
||||
### Model API Errors
|
||||
|
||||
1. Verify API keys are correctly set in `.env`
|
||||
2. Check API key validity and quota
|
||||
3. Review logs for specific error messages
|
||||
|
||||
### Run Doctor Command
|
||||
|
||||
The doctor command helps diagnose common issues:
|
||||
|
||||
```bash
|
||||
docker compose run --rm moltbot-cli doctor
|
||||
```
|
||||
|
||||
## Documentation
|
||||
|
||||
- [Official Website](https://molt.bot)
|
||||
- [Full Documentation](https://docs.molt.bot)
|
||||
- [Getting Started Guide](https://docs.molt.bot/start/getting-started)
|
||||
- [Configuration Reference](https://docs.molt.bot/gateway/configuration)
|
||||
- [Security Guide](https://docs.molt.bot/gateway/security)
|
||||
- [Docker Installation](https://docs.molt.bot/install/docker)
|
||||
- [GitHub Repository](https://github.com/moltbot/moltbot)
|
||||
|
||||
## License
|
||||
|
||||
MoltBot is released under the MIT License. See the [LICENSE](https://github.com/moltbot/moltbot/blob/main/LICENSE) file for details.
|
||||
|
||||
## Community
|
||||
|
||||
- [Discord](https://discord.gg/clawd)
|
||||
- [GitHub Discussions](https://github.com/moltbot/moltbot/discussions)
|
||||
- [Issues](https://github.com/moltbot/moltbot/issues)
|
||||
214
apps/moltbot/README.zh.md
Normal file
214
apps/moltbot/README.zh.md
Normal file
@@ -0,0 +1,214 @@
|
||||
# MoltBot
|
||||
|
||||
MoltBot 是一个运行在你自己设备上的个人 AI 助手。它集成了多个消息平台(WhatsApp、Telegram、Slack、Discord、Google Chat、Signal、iMessage、Microsoft Teams、WebChat),并在所有频道上提供 AI 驱动的帮助。
|
||||
|
||||
## 功能特性
|
||||
|
||||
- **多频道支持**:WhatsApp、Telegram、Slack、Discord、Google Chat、Signal、iMessage、BlueBubbles、Microsoft Teams、Matrix、Zalo、WebChat
|
||||
- **本地优先网关**:会话、频道、工具和事件的统一控制平面
|
||||
- **多代理路由**:将入站频道路由到具有独立会话的隔离代理
|
||||
- **语音唤醒 + 对话模式**:macOS/iOS/Android 上的永久在线语音支持(使用 ElevenLabs)
|
||||
- **实时画布**:由代理驱动的可视化工作空间,支持 A2UI
|
||||
- **一流工具**:浏览器、画布、节点、定时任务、会话和特定频道的操作
|
||||
- **配套应用**:macOS 菜单栏应用 + iOS/Android 节点
|
||||
- **技能平台**:内置、托管和工作区技能,支持安装门控
|
||||
|
||||
## 快速开始
|
||||
|
||||
1. 复制示例环境文件:
|
||||
|
||||
```bash
|
||||
cp .env.example .env
|
||||
```
|
||||
|
||||
2. 生成安全的网关令牌:
|
||||
|
||||
```bash
|
||||
# 使用 OpenSSL
|
||||
openssl rand -hex 32
|
||||
|
||||
# 或使用 Python
|
||||
python3 -c "import secrets; print(secrets.token_hex(32))"
|
||||
```
|
||||
|
||||
3. 编辑 `.env` 文件,至少设置:
|
||||
- `MOLTBOT_GATEWAY_TOKEN` - 你生成的令牌
|
||||
- `ANTHROPIC_API_KEY` 或 `OPENAI_API_KEY` - 如果使用 API 密钥认证
|
||||
|
||||
4. 启动网关:
|
||||
|
||||
```bash
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
5. 访问控制界面:
|
||||
- 在浏览器中打开 <http://localhost:18789>
|
||||
- 在提示时输入你的网关令牌
|
||||
|
||||
## 配置
|
||||
|
||||
### 网关访问
|
||||
|
||||
网关可以通过两种方式访问:
|
||||
|
||||
- **回环地址**(`MOLTBOT_GATEWAY_BIND=loopback`):仅从主机访问(127.0.0.1)
|
||||
- **局域网**(`MOLTBOT_GATEWAY_BIND=lan`):从本地网络访问(0.0.0.0)
|
||||
|
||||
对于生产部署,建议:
|
||||
|
||||
- 使用 Tailscale Serve/Funnel 进行安全的远程访问
|
||||
- 设置 SSH 隧道
|
||||
- 实现带认证的反向代理
|
||||
|
||||
### 模型配置
|
||||
|
||||
MoltBot 支持多个 AI 模型提供商:
|
||||
|
||||
- **Anthropic Claude**(推荐):Claude Pro/Max,支持 OAuth 或 API 密钥
|
||||
- **OpenAI**:ChatGPT/Codex,支持 OAuth 或 API 密钥
|
||||
- **自定义提供商**:通过控制界面或配置文件进行配置
|
||||
|
||||
在 `.env` 文件中设置 API 密钥,或通过入门向导使用 OAuth 认证。
|
||||
|
||||
### 频道集成
|
||||
|
||||
连接消息平台:
|
||||
|
||||
1. **WhatsApp**:使用 CLI 链接设备
|
||||
|
||||
```bash
|
||||
docker compose run --rm moltbot-cli channels login
|
||||
```
|
||||
|
||||
2. **Telegram**:在配置中设置 `TELEGRAM_BOT_TOKEN`
|
||||
|
||||
3. **Discord**:在配置中设置 `DISCORD_BOT_TOKEN`
|
||||
|
||||
4. **Slack**:在配置中设置 `SLACK_BOT_TOKEN` 和 `SLACK_APP_TOKEN`
|
||||
|
||||
详细设置说明请参阅[官方文档](https://docs.molt.bot/channels)。
|
||||
|
||||
## 使用命令行界面
|
||||
|
||||
CLI 服务可通过 `cli` 配置文件使用:
|
||||
|
||||
```bash
|
||||
# 运行入门向导
|
||||
docker compose run --rm --service-ports moltbot-cli onboard
|
||||
|
||||
# 列出提供商
|
||||
docker compose run --rm moltbot-cli providers list
|
||||
|
||||
# 发送消息
|
||||
docker compose run --rm moltbot-cli message send --to +1234567890 --message "你好"
|
||||
|
||||
# 检查健康状态
|
||||
docker compose run --rm moltbot-cli health --port 18789
|
||||
```
|
||||
|
||||
## 安全注意事项
|
||||
|
||||
1. **网关令牌**:保护好你的网关令牌。这是控制界面和 WebSocket 连接的认证方式。
|
||||
|
||||
2. **私信访问**:默认情况下,MoltBot 对来自未知发送者的私信使用配对模式。他们会收到一个配对码,你必须批准。
|
||||
|
||||
3. **网络暴露**:如果在 localhost 之外暴露网关,请使用适当的认证和加密:
|
||||
- 设置 Tailscale 进行安全的远程访问
|
||||
- 使用 SSH 隧道
|
||||
- 实现带 HTTPS 和认证的反向代理
|
||||
|
||||
4. **API 密钥**:永远不要将 API 密钥提交到版本控制。使用 `.env` 文件或密钥管理。
|
||||
|
||||
5. **沙箱模式**:为了群组/频道安全,启用沙箱模式以在 Docker 容器中运行非主会话。
|
||||
|
||||
## 高级配置
|
||||
|
||||
### 资源限制
|
||||
|
||||
在 `.env` 文件中调整 CPU 和内存限制:
|
||||
|
||||
```env
|
||||
MOLTBOT_CPU_LIMIT=2.0
|
||||
MOLTBOT_MEMORY_LIMIT=2G
|
||||
MOLTBOT_CPU_RESERVATION=1.0
|
||||
MOLTBOT_MEMORY_RESERVATION=1G
|
||||
```
|
||||
|
||||
### 持久化数据
|
||||
|
||||
数据存储在两个 Docker 卷中:
|
||||
|
||||
- `moltbot_config`:配置文件和凭据(~/.clawdbot)
|
||||
- `moltbot_workspace`:代理工作区和技能(~/clawd)
|
||||
|
||||
备份数据:
|
||||
|
||||
```bash
|
||||
docker run --rm -v moltbot_config:/data -v $(pwd):/backup alpine tar czf /backup/moltbot-config-backup.tar.gz /data
|
||||
docker run --rm -v moltbot_workspace:/data -v $(pwd):/backup alpine tar czf /backup/moltbot-workspace-backup.tar.gz /data
|
||||
```
|
||||
|
||||
### 自定义配置文件
|
||||
|
||||
在 `~/.clawdbot/moltbot.json`(容器内)创建自定义配置文件:
|
||||
|
||||
```json
|
||||
{
|
||||
"agents": {
|
||||
"defaults": {
|
||||
"model": {
|
||||
"primary": "anthropic/claude-opus-4-5",
|
||||
"fallbacks": ["anthropic/claude-sonnet-4-5", "openai/gpt-4o"]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## 故障排除
|
||||
|
||||
### 网关无法启动
|
||||
|
||||
1. 检查日志:`docker compose logs moltbot-gateway`
|
||||
2. 验证网关令牌是否在 `.env` 中设置
|
||||
3. 确保端口 18789 未被占用
|
||||
|
||||
### 无法访问控制界面
|
||||
|
||||
1. 验证网关绑定设置是否与你的访问方式匹配
|
||||
2. 如果从另一台机器访问,检查防火墙规则
|
||||
3. 确保容器健康:`docker compose ps`
|
||||
|
||||
### 模型 API 错误
|
||||
|
||||
1. 验证 API 密钥是否在 `.env` 中正确设置
|
||||
2. 检查 API 密钥有效性和配额
|
||||
3. 查看日志中的具体错误消息
|
||||
|
||||
### 运行诊断命令
|
||||
|
||||
诊断命令可帮助诊断常见问题:
|
||||
|
||||
```bash
|
||||
docker compose run --rm moltbot-cli doctor
|
||||
```
|
||||
|
||||
## 文档
|
||||
|
||||
- [官方网站](https://molt.bot)
|
||||
- [完整文档](https://docs.molt.bot)
|
||||
- [入门指南](https://docs.molt.bot/start/getting-started)
|
||||
- [配置参考](https://docs.molt.bot/gateway/configuration)
|
||||
- [安全指南](https://docs.molt.bot/gateway/security)
|
||||
- [Docker 安装](https://docs.molt.bot/install/docker)
|
||||
- [GitHub 仓库](https://github.com/moltbot/moltbot)
|
||||
|
||||
## 许可证
|
||||
|
||||
MoltBot 使用 MIT 许可证发布。详情请参阅 [LICENSE](https://github.com/moltbot/moltbot/blob/main/LICENSE) 文件。
|
||||
|
||||
## 社区
|
||||
|
||||
- [Discord](https://discord.gg/clawd)
|
||||
- [GitHub 讨论](https://github.com/moltbot/moltbot/discussions)
|
||||
- [问题跟踪](https://github.com/moltbot/moltbot/issues)
|
||||
88
apps/moltbot/docker-compose.yaml
Normal file
88
apps/moltbot/docker-compose.yaml
Normal file
@@ -0,0 +1,88 @@
|
||||
# MoltBot - Personal AI Assistant Docker Compose Configuration
|
||||
# Official Repository: https://github.com/moltbot/moltbot
|
||||
# Documentation: https://docs.molt.bot
|
||||
|
||||
x-defaults: &defaults
|
||||
restart: unless-stopped
|
||||
logging:
|
||||
driver: json-file
|
||||
options:
|
||||
max-size: 100m
|
||||
max-file: "3"
|
||||
|
||||
services:
|
||||
moltbot-gateway:
|
||||
<<: *defaults
|
||||
image: ${GLOBAL_REGISTRY:-ghcr.io}/moltbot/moltbot:${MOLTBOT_VERSION:-main}
|
||||
environment:
|
||||
- TZ=${TZ:-UTC}
|
||||
- HOME=/home/node
|
||||
- NODE_ENV=production
|
||||
- TERM=xterm-256color
|
||||
# Gateway configuration
|
||||
- CLAWDBOT_GATEWAY_TOKEN=${MOLTBOT_GATEWAY_TOKEN}
|
||||
- CLAWDBOT_GATEWAY_BIND=${MOLTBOT_GATEWAY_BIND:-lan}
|
||||
- CLAWDBOT_GATEWAY_PORT=${MOLTBOT_GATEWAY_PORT:-18789}
|
||||
# Optional: Model API keys (if not using OAuth)
|
||||
- ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY:-}
|
||||
- OPENAI_API_KEY=${OPENAI_API_KEY:-}
|
||||
- CLAUDE_AI_SESSION_KEY=${CLAUDE_AI_SESSION_KEY:-}
|
||||
- CLAUDE_WEB_SESSION_KEY=${CLAUDE_WEB_SESSION_KEY:-}
|
||||
- CLAUDE_WEB_COOKIE=${CLAUDE_WEB_COOKIE:-}
|
||||
volumes:
|
||||
- moltbot_config:/home/node/.clawdbot
|
||||
- moltbot_workspace:/home/node/clawd
|
||||
ports:
|
||||
- "${MOLTBOT_GATEWAY_PORT_OVERRIDE:-18789}:18789"
|
||||
- "${MOLTBOT_BRIDGE_PORT_OVERRIDE:-18790}:18790"
|
||||
command:
|
||||
- node
|
||||
- dist/index.js
|
||||
- gateway
|
||||
- --bind
|
||||
- "${MOLTBOT_GATEWAY_BIND:-lan}"
|
||||
- --port
|
||||
- "18789"
|
||||
healthcheck:
|
||||
test: ["CMD", "node", "dist/index.js", "health", "--port", "18789"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 60s
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
cpus: ${MOLTBOT_CPU_LIMIT:-2.0}
|
||||
memory: ${MOLTBOT_MEMORY_LIMIT:-2G}
|
||||
reservations:
|
||||
cpus: ${MOLTBOT_CPU_RESERVATION:-1.0}
|
||||
memory: ${MOLTBOT_MEMORY_RESERVATION:-1G}
|
||||
|
||||
moltbot-cli:
|
||||
<<: *defaults
|
||||
image: ${GLOBAL_REGISTRY:-ghcr.io}/moltbot/moltbot:${MOLTBOT_VERSION:-main}
|
||||
environment:
|
||||
- TZ=${TZ:-UTC}
|
||||
- HOME=/home/node
|
||||
- TERM=xterm-256color
|
||||
- BROWSER=echo
|
||||
- CLAUDE_AI_SESSION_KEY=${CLAUDE_AI_SESSION_KEY:-}
|
||||
- CLAUDE_WEB_SESSION_KEY=${CLAUDE_WEB_SESSION_KEY:-}
|
||||
- CLAUDE_WEB_COOKIE=${CLAUDE_WEB_COOKIE:-}
|
||||
volumes:
|
||||
- moltbot_config:/home/node/.clawdbot
|
||||
- moltbot_workspace:/home/node/clawd
|
||||
stdin_open: true
|
||||
tty: true
|
||||
entrypoint: ["node", "dist/index.js"]
|
||||
profiles:
|
||||
- cli
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
cpus: ${MOLTBOT_CLI_CPU_LIMIT:-1.0}
|
||||
memory: ${MOLTBOT_CLI_MEMORY_LIMIT:-512M}
|
||||
|
||||
volumes:
|
||||
moltbot_config:
|
||||
moltbot_workspace:
|
||||
@@ -22,8 +22,8 @@ ZO_DATA_DIR=/data
|
||||
|
||||
# Root user credentials
|
||||
# IMPORTANT: Change these default credentials before deploying to production
|
||||
ZO_ROOT_USER_EMAIL=admin@example.com
|
||||
ZO_ROOT_USER_PASSWORD=Complexpass#123
|
||||
ZO_ROOT_USER_EMAIL="root@example.com"
|
||||
ZO_ROOT_USER_PASSWORD="Complexpass#123"
|
||||
|
||||
# Optional: S3 object storage configuration
|
||||
# Leave empty to use local disk storage
|
||||
|
||||
@@ -40,13 +40,13 @@
|
||||
|
||||
### Basic Configuration
|
||||
|
||||
| Environment Variable | Description | Default |
|
||||
| --------------------------- | ------------------------------- | ------------------- |
|
||||
| `OPENOBSERVE_VERSION` | OpenObserve image version | `v0.50.0` |
|
||||
| `OPENOBSERVE_PORT_OVERRIDE` | Web UI port | `5080` |
|
||||
| `ZO_ROOT_USER_EMAIL` | Root user email | `admin@example.com` |
|
||||
| `ZO_ROOT_USER_PASSWORD` | Root user password | `Complexpass#123` |
|
||||
| `ZO_DATA_DIR` | Data directory inside container | `/data` |
|
||||
| Environment Variable | Description | Default |
|
||||
| --------------------------- | ------------------------------- | ------------------ |
|
||||
| `OPENOBSERVE_VERSION` | OpenObserve image version | `v0.50.0` |
|
||||
| `OPENOBSERVE_PORT_OVERRIDE` | Web UI port | `5080` |
|
||||
| `ZO_ROOT_USER_EMAIL` | Root user email | `root@example.com` |
|
||||
| `ZO_ROOT_USER_PASSWORD` | Root user password | `Complexpass#123` |
|
||||
| `ZO_DATA_DIR` | Data directory inside container | `/data` |
|
||||
|
||||
### S3 Object Storage (Optional)
|
||||
|
||||
@@ -83,7 +83,7 @@ Send OTLP data to `http://localhost:5080/api/default/` with authentication.
|
||||
### Logs via HTTP
|
||||
|
||||
```bash
|
||||
curl -u admin@example.com:Complexpass#123 \
|
||||
curl -u root@example.com:Complexpass#123 \
|
||||
-H "Content-Type: application/json" \
|
||||
http://localhost:5080/api/default/logs/_json \
|
||||
-d '[{"message": "Hello OpenObserve", "level": "info"}]'
|
||||
|
||||
@@ -40,13 +40,13 @@
|
||||
|
||||
### 基础配置
|
||||
|
||||
| 环境变量 | 说明 | 默认值 |
|
||||
| --------------------------- | -------------------- | ------------------- |
|
||||
| `OPENOBSERVE_VERSION` | OpenObserve 镜像版本 | `v0.50.0` |
|
||||
| `OPENOBSERVE_PORT_OVERRIDE` | Web UI 端口 | `5080` |
|
||||
| `ZO_ROOT_USER_EMAIL` | 根用户邮箱 | `admin@example.com` |
|
||||
| `ZO_ROOT_USER_PASSWORD` | 根用户密码 | `Complexpass#123` |
|
||||
| `ZO_DATA_DIR` | 容器内数据目录 | `/data` |
|
||||
| 环境变量 | 说明 | 默认值 |
|
||||
| --------------------------- | -------------------- | ------------------ |
|
||||
| `OPENOBSERVE_VERSION` | OpenObserve 镜像版本 | `v0.50.0` |
|
||||
| `OPENOBSERVE_PORT_OVERRIDE` | Web UI 端口 | `5080` |
|
||||
| `ZO_ROOT_USER_EMAIL` | 根用户邮箱 | `root@example.com` |
|
||||
| `ZO_ROOT_USER_PASSWORD` | 根用户密码 | `Complexpass#123` |
|
||||
| `ZO_DATA_DIR` | 容器内数据目录 | `/data` |
|
||||
|
||||
### S3 对象存储(可选)
|
||||
|
||||
@@ -83,7 +83,7 @@ OpenObserve 支持多种采集方式:
|
||||
### 通过 HTTP 采集日志
|
||||
|
||||
```bash
|
||||
curl -u admin@example.com:Complexpass#123 \
|
||||
curl -u root@example.com:Complexpass#123 \
|
||||
-H "Content-Type: application/json" \
|
||||
http://localhost:5080/api/default/logs/_json \
|
||||
-d '[{"message": "Hello OpenObserve", "level": "info"}]'
|
||||
|
||||
@@ -21,7 +21,7 @@ services:
|
||||
environment:
|
||||
- TZ=${TZ:-UTC}
|
||||
- ZO_DATA_DIR=${ZO_DATA_DIR:-/data}
|
||||
- ZO_ROOT_USER_EMAIL=${ZO_ROOT_USER_EMAIL:-admin@example.com}
|
||||
- ZO_ROOT_USER_EMAIL=${ZO_ROOT_USER_EMAIL:-root@example.com}
|
||||
- ZO_ROOT_USER_PASSWORD=${ZO_ROOT_USER_PASSWORD:-Complexpass#123}
|
||||
# Optional: S3 configuration for object storage
|
||||
- ZO_S3_BUCKET_NAME=${ZO_S3_BUCKET_NAME:-}
|
||||
|
||||
@@ -2,29 +2,51 @@
|
||||
# KVM-based secure sandbox environment
|
||||
|
||||
# Global registry prefix (optional)
|
||||
# Leave empty to pull from Docker Hub
|
||||
# Leave empty to use ghcr.io/zerocore-ai/microsandbox
|
||||
GLOBAL_REGISTRY=
|
||||
|
||||
# MicroSandbox version
|
||||
# Default: 0.2.6
|
||||
MICROSANDBOX_VERSION=0.2.6
|
||||
# Default: latest (recommended to use latest stable version)
|
||||
# Set to empty string to always pull the latest version
|
||||
MICROSANDBOX_VERSION=latest
|
||||
|
||||
# Auto pull base images on build
|
||||
# Debian base image version
|
||||
# Default: 13.2-slim
|
||||
DEBIAN_VERSION=13.2-slim
|
||||
|
||||
# Auto pull base images on build (Python, Node.js SDKs)
|
||||
# Default: true
|
||||
MICROSANDBOX_AUTO_PULL_IMAGES=true
|
||||
|
||||
# Development mode (disables API key requirements)
|
||||
# Default: true (for easier testing)
|
||||
# Set to false in production environments
|
||||
MICROSANDBOX_DEV_MODE=true
|
||||
|
||||
# Timezone
|
||||
# Default: UTC
|
||||
TZ=UTC
|
||||
|
||||
# ============================================
|
||||
# Port Configuration
|
||||
# ============================================
|
||||
|
||||
# Internal port (inside container)
|
||||
# Default: 5555
|
||||
MICROSANDBOX_PORT=5555
|
||||
|
||||
# External port (host machine)
|
||||
# Default: 5555
|
||||
MICROSANDBOX_PORT_OVERRIDE=5555
|
||||
|
||||
# ============================================
|
||||
# Resource Limits
|
||||
# ============================================
|
||||
|
||||
# CPU limits
|
||||
# MicroSandbox requires more CPU for KVM virtualization
|
||||
MICROSANDBOX_CPU_LIMIT=4.00
|
||||
MICROSANDBOX_CPU_RESERVATION=1.00
|
||||
MICROSANDBOX_CPU_LIMIT=4
|
||||
MICROSANDBOX_CPU_RESERVATION=1
|
||||
|
||||
# Memory limits
|
||||
# MicroSandbox requires more memory for running VMs
|
||||
|
||||
@@ -2,7 +2,7 @@ ARG DEBIAN_VERSION=13.2-slim
|
||||
FROM debian:${DEBIAN_VERSION}
|
||||
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
ARG MICROSANDBOX_VERSION=0.2.6
|
||||
ARG MICROSANDBOX_VERSION
|
||||
ARG TARGETARCH
|
||||
|
||||
RUN apt update && \
|
||||
@@ -12,32 +12,22 @@ RUN apt update && \
|
||||
apt clean && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Download and install microsandbox binary based on architecture
|
||||
RUN ARCH=${TARGETARCH:-amd64} && \
|
||||
case "${ARCH}" in \
|
||||
amd64) MICROSANDBOX_ARCH="x86_64" ;; \
|
||||
arm64) MICROSANDBOX_ARCH="aarch64" ;; \
|
||||
*) echo "Unsupported architecture: ${ARCH}" && exit 1 ;; \
|
||||
esac && \
|
||||
curl -fsSL "https://github.com/zerocore-ai/microsandbox/releases/download/microsandbox-v${MICROSANDBOX_VERSION}/microsandbox-${MICROSANDBOX_VERSION}-linux-${MICROSANDBOX_ARCH}.tar.gz" \
|
||||
-o /tmp/microsandbox.tar.gz && \
|
||||
mkdir -p /usr/local/bin /usr/local/lib && \
|
||||
tar -xzf /tmp/microsandbox.tar.gz -C /tmp && \
|
||||
cd /tmp/microsandbox-${MICROSANDBOX_VERSION}-linux-${MICROSANDBOX_ARCH} && \
|
||||
mv ms* /usr/local/bin/ && \
|
||||
mv *.so.* /usr/local/lib/ && \
|
||||
chmod +x /usr/local/bin/ms* && \
|
||||
rm -rf /tmp/microsandbox*
|
||||
# Install microsandbox using the official install script
|
||||
# This ensures we always get the latest version if MICROSANDBOX_VERSION is not set
|
||||
# and handles architecture detection automatically
|
||||
RUN VERSION="${MICROSANDBOX_VERSION:-}" && \
|
||||
curl -fsSL https://raw.githubusercontent.com/zerocore-ai/microsandbox/refs/heads/main/scripts/install_microsandbox.sh | sh
|
||||
|
||||
# Setup directories for root user
|
||||
RUN mkdir -p /root/.local/bin /root/.local/lib /root/.microsandbox
|
||||
|
||||
# Set up environment variables (based on setup_env.sh)
|
||||
# Set up environment variables
|
||||
ENV PATH="/root/.local/bin:/usr/local/bin:${PATH}"
|
||||
ENV LD_LIBRARY_PATH="/root/.local/lib:/usr/local/lib:${LD_LIBRARY_PATH}"
|
||||
ENV HOME="/root"
|
||||
|
||||
WORKDIR /root
|
||||
# Set working directory to match docker-compose working_dir
|
||||
WORKDIR /workspace
|
||||
|
||||
ARG MICROSANDBOX_AUTO_PULL_IMAGES=true
|
||||
RUN if [ "${MICROSANDBOX_AUTO_PULL_IMAGES}" = "true" ]; then \
|
||||
|
||||
@@ -101,20 +101,23 @@ docker compose run --rm microsandbox --help
|
||||
|
||||
### Environment Variables
|
||||
|
||||
| Variable | Description | Default |
|
||||
| --------------------------------- | ------------------------------ | ------- |
|
||||
| `MICROSANDBOX_VERSION` | MicroSandbox version | `0.2.6` |
|
||||
| `MICROSANDBOX_AUTO_PULL_IMAGES` | Auto pull base images on build | `true` |
|
||||
| `MICROSANDBOX_PORT_OVERRIDE` | Port mapping for MicroSandbox | `5555` |
|
||||
| `TZ` | Container timezone | `UTC` |
|
||||
| `MICROSANDBOX_CPU_LIMIT` | Maximum CPU cores | `4.00` |
|
||||
| `MICROSANDBOX_CPU_RESERVATION` | Reserved CPU cores | `1.00` |
|
||||
| `MICROSANDBOX_MEMORY_LIMIT` | Maximum memory allocation | `4G` |
|
||||
| `MICROSANDBOX_MEMORY_RESERVATION` | Reserved memory | `1G` |
|
||||
| Variable | Description | Default |
|
||||
| --------------------------------- | ------------------------------------- | ----------- |
|
||||
| `MICROSANDBOX_VERSION` | MicroSandbox version | `latest` |
|
||||
| `DEBIAN_VERSION` | Debian base image version | `13.2-slim` |
|
||||
| `MICROSANDBOX_AUTO_PULL_IMAGES` | Auto pull base images on build | `true` |
|
||||
| `MICROSANDBOX_DEV_MODE` | Enable dev mode (no API key required) | `true` |
|
||||
| `MICROSANDBOX_PORT` | Internal container port | `5555` |
|
||||
| `MICROSANDBOX_PORT_OVERRIDE` | External host port mapping | `5555` |
|
||||
| `TZ` | Container timezone | `UTC` |
|
||||
| `MICROSANDBOX_CPU_LIMIT` | Maximum CPU cores | `4` |
|
||||
| `MICROSANDBOX_CPU_RESERVATION` | Reserved CPU cores | `1` |
|
||||
| `MICROSANDBOX_MEMORY_LIMIT` | Maximum memory allocation | `4G` |
|
||||
| `MICROSANDBOX_MEMORY_RESERVATION` | Reserved memory | `1G` |
|
||||
|
||||
### Volume Mounts
|
||||
|
||||
- `microsandbox_config`: MicroSandbox configuration and state
|
||||
- `microsandbox_namespaces`: MicroSandbox namespace configurations and VM state
|
||||
- `microsandbox_workspace`: Working directory for sandbox operations
|
||||
|
||||
## Security Considerations
|
||||
@@ -127,6 +130,13 @@ MicroSandbox requires `privileged: true` to access KVM devices. This is necessar
|
||||
- Review the code you plan to execute in the sandbox
|
||||
- Keep the MicroSandbox image updated with security patches
|
||||
- Use network isolation if running untrusted code
|
||||
- In production environments, disable dev mode by setting `MICROSANDBOX_DEV_MODE=false`
|
||||
|
||||
**Why Privileged Mode?**
|
||||
|
||||
MicroSandbox uses KVM (Kernel-based Virtual Machine) to provide hardware-level isolation. Unlike Docker containers, which share the host kernel, MicroSandbox creates true virtual machines with their own kernels. This provides much stronger security boundaries, even though the Docker container itself runs in privileged mode.
|
||||
|
||||
The privileged container is only the orchestrator - the actual untrusted code runs inside isolated VMs with hardware-enforced boundaries. This architecture is specifically designed for running untrusted code safely.
|
||||
|
||||
### KVM Device Access
|
||||
|
||||
|
||||
@@ -101,32 +101,42 @@ docker compose run --rm microsandbox --help
|
||||
|
||||
### 环境变量
|
||||
|
||||
| 变量 | 描述 | 默认值 |
|
||||
| --------------------------------- | ---------------------- | ------- |
|
||||
| `MICROSANDBOX_VERSION` | MicroSandbox 版本 | `0.2.6` |
|
||||
| `MICROSANDBOX_AUTO_PULL_IMAGES` | 构建时自动拉取基础镜像 | `true` |
|
||||
| `MICROSANDBOX_PORT_OVERRIDE` | MicroSandbox 端口映射 | `5555` |
|
||||
| `TZ` | 容器时区 | `UTC` |
|
||||
| `MICROSANDBOX_CPU_LIMIT` | CPU 核心数上限 | `4.00` |
|
||||
| `MICROSANDBOX_CPU_RESERVATION` | CPU 核心数预留 | `1.00` |
|
||||
| `MICROSANDBOX_MEMORY_LIMIT` | 最大内存分配 | `4G` |
|
||||
| `MICROSANDBOX_MEMORY_RESERVATION` | 内存预留 | `1G` |
|
||||
| 变量 | 描述 | 默认值 |
|
||||
| --------------------------------- | ----------------------------- | ----------- |
|
||||
| `MICROSANDBOX_VERSION` | MicroSandbox 版本 | `latest` |
|
||||
| `DEBIAN_VERSION` | Debian 基础镜像版本 | `13.2-slim` |
|
||||
| `MICROSANDBOX_AUTO_PULL_IMAGES` | 构建时自动拉取基础镜像 | `true` |
|
||||
| `MICROSANDBOX_DEV_MODE` | 启用开发模式(无需 API 密钥) | `true` |
|
||||
| `MICROSANDBOX_PORT` | 容器内部端口 | `5555` |
|
||||
| `MICROSANDBOX_PORT_OVERRIDE` | 外部主机端口映射 | `5555` |
|
||||
| `TZ` | 容器时区 | `UTC` |
|
||||
| `MICROSANDBOX_CPU_LIMIT` | CPU 核心数上限 | `4` |
|
||||
| `MICROSANDBOX_CPU_RESERVATION` | CPU 核心数预留 | `1` |
|
||||
| `MICROSANDBOX_MEMORY_LIMIT` | 最大内存分配 | `4G` |
|
||||
| `MICROSANDBOX_MEMORY_RESERVATION` | 内存预留 | `1G` |
|
||||
|
||||
### 卷挂载
|
||||
|
||||
- `microsandbox_config`:MicroSandbox 配置和状态
|
||||
- `microsandbox_namespaces`:MicroSandbox 命名空间配置和虚拟机状态
|
||||
- `microsandbox_workspace`:沙箱操作的工作目录
|
||||
|
||||
## 安全注意事项
|
||||
|
||||
### 特权模式
|
||||
|
||||
MicroSandbox 需要 `privileged: true` 以访问 KVM 设备。这对于硬件虚拟化是必需的,但会授予容器提升的权限。请考虑以下事项:
|
||||
MicroSandbox 需要 `privileged: true` 以访问 KVM 设备。这对于硬件虚拟化是必需的,但会授予容器提升的权限。请考虑以下事项:
|
||||
|
||||
- 仅在受信任的系统上运行 MicroSandbox
|
||||
- 审查您计划在沙箱中执行的代码
|
||||
- 保持 MicroSandbox 镜像更新以获取安全补丁
|
||||
- 如果运行不受信任的代码,请使用网络隔离
|
||||
- 在生产环境中,通过设置 `MICROSANDBOX_DEV_MODE=false` 禁用开发模式
|
||||
|
||||
**为什么需要特权模式?**
|
||||
|
||||
MicroSandbox 使用 KVM(基于内核的虚拟机)来提供硬件级隔离。与共享主机内核的 Docker 容器不同,MicroSandbox 创建具有自己内核的真实虚拟机。这提供了更强的安全边界,即使 Docker 容器本身以特权模式运行。
|
||||
|
||||
特权容器只是协调器——实际的不受信任代码在具有硬件强制边界的隔离虚拟机内运行。此架构专门设计用于安全地运行不受信任的代码。
|
||||
|
||||
### KVM 设备访问
|
||||
|
||||
|
||||
@@ -8,7 +8,7 @@ x-defaults: &defaults
|
||||
driver: json-file
|
||||
options:
|
||||
max-size: 100m
|
||||
max-file: 3
|
||||
max-file: "3"
|
||||
|
||||
services:
|
||||
microsandbox:
|
||||
@@ -21,11 +21,14 @@ services:
|
||||
- linux/arm64
|
||||
args:
|
||||
- DEBIAN_VERSION=${DEBIAN_VERSION:-13.2-slim}
|
||||
- MICROSANDBOX_VERSION=${MICROSANDBOX_VERSION:-0.2.6}
|
||||
- MICROSANDBOX_VERSION=${MICROSANDBOX_VERSION:-}
|
||||
- MICROSANDBOX_AUTO_PULL_IMAGES=${MICROSANDBOX_AUTO_PULL_IMAGES:-true}
|
||||
image: ${GLOBAL_REGISTRY:-}alexsuntop/microsandbox:${MICROSANDBOX_VERSION:-0.2.6}
|
||||
image: ${GLOBAL_REGISTRY:-ghcr.io}/zerocore-ai/microsandbox:${MICROSANDBOX_VERSION:-latest}
|
||||
ports:
|
||||
- ${MICROSANDBOX_PORT_OVERRIDE:-5555}:${MICROSANDBOX_PORT:-5555}
|
||||
- "${MICROSANDBOX_PORT_OVERRIDE:-5555}:${MICROSANDBOX_PORT:-5555}"
|
||||
# Privileged mode and relaxed security profiles are required for KVM access
|
||||
# This provides hardware-level isolation inside the container
|
||||
# See README.md for security considerations
|
||||
privileged: true
|
||||
security_opt:
|
||||
- apparmor=unconfined
|
||||
@@ -34,31 +37,41 @@ services:
|
||||
- TZ=${TZ:-UTC}
|
||||
- MICROSANDBOX_HOME=/root/.microsandbox
|
||||
volumes:
|
||||
- microsandbox_config:/root/.microsandbox/namespaces
|
||||
# Store namespace configurations and VM state
|
||||
- microsandbox_namespaces:/root/.microsandbox/namespaces
|
||||
# Working directory for sandbox operations
|
||||
- microsandbox_workspace:/workspace
|
||||
devices:
|
||||
# KVM device for hardware-accelerated virtualization
|
||||
- /dev/kvm:/dev/kvm
|
||||
# TUN device for network tunneling (TAP/TUN interfaces)
|
||||
- /dev/net/tun:/dev/net/tun
|
||||
command:
|
||||
[
|
||||
"server",
|
||||
"start",
|
||||
"--host",
|
||||
"0.0.0.0",
|
||||
"--port",
|
||||
"${MICROSANDBOX_PORT:-5555}",
|
||||
"--dev",
|
||||
]
|
||||
- /bin/sh
|
||||
- -c
|
||||
- >
|
||||
if [ "$${MICROSANDBOX_DEV_MODE:-true}" = "true" ]; then
|
||||
DEV_FLAG="--dev";
|
||||
else
|
||||
DEV_FLAG="";
|
||||
fi;
|
||||
exec server start --host 0.0.0.0 --port $${MICROSANDBOX_PORT:-5555} $${DEV_FLAG};
|
||||
working_dir: /workspace
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "msb --version || exit 1"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 30s
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
cpus: ${MICROSANDBOX_CPU_LIMIT:-4.00}
|
||||
cpus: ${MICROSANDBOX_CPU_LIMIT:-4}
|
||||
memory: ${MICROSANDBOX_MEMORY_LIMIT:-4G}
|
||||
reservations:
|
||||
cpus: ${MICROSANDBOX_CPU_RESERVATION:-1.00}
|
||||
cpus: ${MICROSANDBOX_CPU_RESERVATION:-1}
|
||||
memory: ${MICROSANDBOX_MEMORY_RESERVATION:-1G}
|
||||
|
||||
volumes:
|
||||
microsandbox_config:
|
||||
microsandbox_namespaces:
|
||||
microsandbox_workspace:
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# MinerU Docker image
|
||||
MINERU_VERSION=2.7.1
|
||||
MINERU_VERSION=2.7.3
|
||||
|
||||
# Port configurations
|
||||
MINERU_PORT_OVERRIDE_VLLM=30000
|
||||
|
||||
@@ -19,7 +19,7 @@ RUN apt-get update && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Install mineru latest
|
||||
RUN python3 -m pip install -U 'mineru[core]>=2.7.1' --break-system-packages && \
|
||||
RUN python3 -m pip install -U 'mineru[core]>=2.7.3' --break-system-packages && \
|
||||
python3 -m pip cache purge
|
||||
|
||||
# Download models and update the configuration file
|
||||
|
||||
@@ -39,7 +39,7 @@ mineru -p demo.pdf -o ./output -b vlm-http-client -u http://localhost:30000
|
||||
|
||||
## Configuration
|
||||
|
||||
- `MINERU_VERSION`: The version for MinerU, default is `2.7.1`.
|
||||
- `MINERU_VERSION`: The version for MinerU, default is `2.7.3`.
|
||||
- `MINERU_PORT_OVERRIDE_VLLM`: The host port for the VLLM server, default is `30000`.
|
||||
- `MINERU_PORT_OVERRIDE_API`: The host port for the API service, default is `8000`.
|
||||
- `MINERU_PORT_OVERRIDE_GRADIO`: The host port for the Gradio WebUI, default is `7860`.
|
||||
|
||||
@@ -39,7 +39,7 @@ mineru -p demo.pdf -o ./output -b vlm-http-client -u http://localhost:30000
|
||||
|
||||
## 配置
|
||||
|
||||
- `MINERU_VERSION`: MinerU 的 Docker 镜像版本,默认为 `2.7.1`。
|
||||
- `MINERU_VERSION`: MinerU 的 Docker 镜像版本,默认为 `2.7.3`。
|
||||
- `MINERU_PORT_OVERRIDE_VLLM`: VLLM 服务器的主机端口,默认为 `30000`。
|
||||
- `MINERU_PORT_OVERRIDE_API`: API 服务的主机端口,默认为 `8000`。
|
||||
- `MINERU_PORT_OVERRIDE_GRADIO`: Gradio WebUI 的主机端口,默认为 `7860`。
|
||||
|
||||
@@ -8,7 +8,7 @@ x-defaults: &defaults
|
||||
|
||||
x-mineru-vllm: &mineru-vllm
|
||||
<<: *defaults
|
||||
image: ${GLOBAL_REGISTRY:-}alexsuntop/mineru:${MINERU_VERSION:-2.7.1}
|
||||
image: ${GLOBAL_REGISTRY:-}alexsuntop/mineru:${MINERU_VERSION:-2.7.3}
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile
|
||||
@@ -45,29 +45,10 @@ services:
|
||||
- ${MINERU_PORT_OVERRIDE_VLLM:-30000}:30000
|
||||
entrypoint: mineru-openai-server
|
||||
command:
|
||||
# ==================== Engine Selection ====================
|
||||
# WARNING: Only ONE engine can be enabled at a time!
|
||||
# Choose 'vllm' OR 'lmdeploy' (uncomment one line below)
|
||||
- --engine vllm
|
||||
# --engine lmdeploy
|
||||
|
||||
# ==================== vLLM Engine Parameters ====================
|
||||
# Uncomment if using --engine vllm
|
||||
- --host 0.0.0.0
|
||||
- --port 30000
|
||||
# Multi-GPU configuration (increase throughput)
|
||||
# --data-parallel-size 2
|
||||
# Single GPU memory optimization (reduce if VRAM insufficient)
|
||||
# --gpu-memory-utilization 0.5 # Try 0.4 or lower if issues persist
|
||||
|
||||
# ==================== LMDeploy Engine Parameters ====================
|
||||
# Uncomment if using --engine lmdeploy
|
||||
# --server-name 0.0.0.0
|
||||
# --server-port 30000
|
||||
# Multi-GPU configuration (increase throughput)
|
||||
# --dp 2
|
||||
# Single GPU memory optimization (reduce if VRAM insufficient)
|
||||
# --cache-max-entry-count 0.5 # Try 0.4 or lower if issues persist
|
||||
--host 0.0.0.0
|
||||
--port 30000
|
||||
# --data-parallel-size 2 # If using multiple GPUs, increase throughput using vllm's multi-GPU parallel mode
|
||||
# --gpu-memory-utilization 0.9 # If running on a single GPU and encountering VRAM shortage, reduce the KV cache size by this parameter, if VRAM issues persist, try lowering it further to `0.4` or below.
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "curl -f http://localhost:30000/health || exit 1"]
|
||||
interval: 30s
|
||||
@@ -82,21 +63,11 @@ services:
|
||||
- ${MINERU_PORT_OVERRIDE_API:-8000}:8000
|
||||
entrypoint: mineru-api
|
||||
command:
|
||||
# ==================== Server Configuration ====================
|
||||
- --host 0.0.0.0
|
||||
- --port 8000
|
||||
|
||||
# ==================== vLLM Engine Parameters ====================
|
||||
# Multi-GPU configuration
|
||||
# --data-parallel-size 2
|
||||
# Single GPU memory optimization
|
||||
# --gpu-memory-utilization 0.5 # Try 0.4 or lower if VRAM insufficient
|
||||
|
||||
# ==================== LMDeploy Engine Parameters ====================
|
||||
# Multi-GPU configuration
|
||||
# --dp 2
|
||||
# Single GPU memory optimization
|
||||
# --cache-max-entry-count 0.5 # Try 0.4 or lower if VRAM insufficient
|
||||
--host 0.0.0.0
|
||||
--port 8000
|
||||
# parameters for vllm-engine
|
||||
# --data-parallel-size 2 # If using multiple GPUs, increase throughput using vllm's multi-GPU parallel mode
|
||||
# --gpu-memory-utilization 0.5 # If running on a single GPU and encountering VRAM shortage, reduce the KV cache size by this parameter, if VRAM issues persist, try lowering it further to `0.4` or below.
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
@@ -105,7 +76,7 @@ services:
|
||||
"--no-verbose",
|
||||
"--tries=1",
|
||||
"--spider",
|
||||
"http://localhost:8000/health",
|
||||
"http://localhost:8000/docs",
|
||||
]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
@@ -119,30 +90,13 @@ services:
|
||||
- ${MINERU_PORT_OVERRIDE_GRADIO:-7860}:7860
|
||||
entrypoint: mineru-gradio
|
||||
command:
|
||||
# ==================== Gradio Server Configuration ====================
|
||||
- --server-name 0.0.0.0
|
||||
- --server-port 7860
|
||||
|
||||
# ==================== Gradio Feature Settings ====================
|
||||
# --enable-api false # Disable API endpoint
|
||||
# --max-convert-pages 20 # Limit conversion page count
|
||||
|
||||
# ==================== Engine Selection ====================
|
||||
# WARNING: Only ONE engine can be enabled at a time!
|
||||
|
||||
# Option 1: vLLM Engine (recommended for most users)
|
||||
- --enable-vllm-engine true
|
||||
# Multi-GPU configuration
|
||||
# --data-parallel-size 2
|
||||
# Single GPU memory optimization
|
||||
# --gpu-memory-utilization 0.5 # Try 0.4 or lower if VRAM insufficient
|
||||
|
||||
# Option 2: LMDeploy Engine
|
||||
# --enable-lmdeploy-engine true
|
||||
# Multi-GPU configuration
|
||||
# --dp 2
|
||||
# Single GPU memory optimization
|
||||
# --cache-max-entry-count 0.5 # Try 0.4 or lower if VRAM insufficient
|
||||
--server-name 0.0.0.0
|
||||
--server-port 7860
|
||||
# --enable-api false # If you want to disable the API, set this to false
|
||||
# --max-convert-pages 20 # If you want to limit the number of pages for conversion, set this to a specific number
|
||||
# parameters for vllm-engine
|
||||
# --data-parallel-size 2 # If using multiple GPUs, increase throughput using vllm's multi-GPU parallel mode
|
||||
# --gpu-memory-utilization 0.5 # If running on a single GPU and encountering VRAM shortage, reduce the KV cache size by this parameter, if VRAM issues persist, try lowering it further to `0.4` or below.
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
|
||||
@@ -24,7 +24,15 @@ services:
|
||||
cpus: ${BYTEBOT_DESKTOP_CPU_RESERVATION:-1.0}
|
||||
memory: ${BYTEBOT_DESKTOP_MEMORY_RESERVATION:-2G}
|
||||
healthcheck:
|
||||
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:9990/"]
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"wget",
|
||||
"--no-verbose",
|
||||
"--tries=1",
|
||||
"--spider",
|
||||
"http://localhost:9990/",
|
||||
]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
@@ -56,7 +64,15 @@ services:
|
||||
cpus: ${BYTEBOT_AGENT_CPU_RESERVATION:-0.5}
|
||||
memory: ${BYTEBOT_AGENT_MEMORY_RESERVATION:-512M}
|
||||
healthcheck:
|
||||
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:9991/health"]
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"wget",
|
||||
"--no-verbose",
|
||||
"--tries=1",
|
||||
"--spider",
|
||||
"http://localhost:9991/health",
|
||||
]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
@@ -83,7 +99,15 @@ services:
|
||||
cpus: ${BYTEBOT_UI_CPU_RESERVATION:-0.25}
|
||||
memory: ${BYTEBOT_UI_MEMORY_RESERVATION:-256M}
|
||||
healthcheck:
|
||||
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:9992/"]
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"wget",
|
||||
"--no-verbose",
|
||||
"--tries=1",
|
||||
"--spider",
|
||||
"http://localhost:9992/",
|
||||
]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
|
||||
31
src/docker-android/.env.example
Normal file
31
src/docker-android/.env.example
Normal file
@@ -0,0 +1,31 @@
|
||||
# Docker Android image tag (e.g., api-33, api-33-playstore)
|
||||
# DOCKER_ANDROID_VERSION=api-33
|
||||
|
||||
# Docker Android GPU image tag (used by the gpu profile)
|
||||
# DOCKER_ANDROID_GPU_VERSION=api-33-cuda
|
||||
|
||||
# Host port overrides
|
||||
# DOCKER_ANDROID_ADB_PORT_OVERRIDE=5555
|
||||
# DOCKER_ANDROID_CONSOLE_PORT_OVERRIDE=5554
|
||||
|
||||
# KVM device path (Linux hosts only)
|
||||
# DOCKER_ANDROID_KVM_DEVICE=/dev/kvm
|
||||
|
||||
# Optional ADB key directory for Play Store images
|
||||
# DOCKER_ANDROID_KEYS_DIR=./keys
|
||||
|
||||
# Emulator settings
|
||||
# DOCKER_ANDROID_DISABLE_ANIMATION=false
|
||||
# DOCKER_ANDROID_DISABLE_HIDDEN_POLICY=false
|
||||
# DOCKER_ANDROID_SKIP_AUTH=true
|
||||
# DOCKER_ANDROID_MEMORY=8192
|
||||
# DOCKER_ANDROID_CORES=4
|
||||
|
||||
# GPU settings
|
||||
# DOCKER_ANDROID_GPU_COUNT=1
|
||||
|
||||
# Resource limits
|
||||
# DOCKER_ANDROID_CPU_LIMIT=2
|
||||
# DOCKER_ANDROID_MEMORY_LIMIT=8G
|
||||
# DOCKER_ANDROID_CPU_RESERVATION=1
|
||||
# DOCKER_ANDROID_MEMORY_RESERVATION=4G
|
||||
59
src/docker-android/README.md
Normal file
59
src/docker-android/README.md
Normal file
@@ -0,0 +1,59 @@
|
||||
# Docker Android Emulator
|
||||
|
||||
[English](./README.md) | [中文](./README.zh.md)
|
||||
|
||||
This service deploys the HQarroum Docker Android emulator image.
|
||||
|
||||
## Usage
|
||||
|
||||
- Start the default emulator:
|
||||
|
||||
```bash
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
- Start with GPU acceleration:
|
||||
|
||||
```bash
|
||||
docker compose --profile gpu up -d
|
||||
```
|
||||
|
||||
- Connect with ADB:
|
||||
|
||||
```bash
|
||||
adb connect 127.0.0.1:5555
|
||||
```
|
||||
|
||||
## Services
|
||||
|
||||
- `docker_android`: Android emulator (default).
|
||||
- `docker_android_gpu`: Android emulator with CUDA support (profile: `gpu`).
|
||||
|
||||
## Configuration
|
||||
|
||||
- `DOCKER_ANDROID_VERSION`: Image tag, default is `api-33`.
|
||||
- `DOCKER_ANDROID_GPU_VERSION`: GPU image tag, default is `api-33-cuda`.
|
||||
- `DOCKER_ANDROID_ADB_PORT_OVERRIDE`: Host port for ADB, default is `5555`.
|
||||
- `DOCKER_ANDROID_CONSOLE_PORT_OVERRIDE`: Host port for emulator console, default is `5554`.
|
||||
- `DOCKER_ANDROID_KVM_DEVICE`: KVM device path, default is `/dev/kvm`.
|
||||
- `DOCKER_ANDROID_KEYS_DIR`: ADB key directory for Play Store images, default is `./keys`.
|
||||
- `DOCKER_ANDROID_DISABLE_ANIMATION`: Disable animations, default is `false`.
|
||||
- `DOCKER_ANDROID_DISABLE_HIDDEN_POLICY`: Disable hidden API policy, default is `false`.
|
||||
- `DOCKER_ANDROID_SKIP_AUTH`: Skip ADB authentication, default is `true`.
|
||||
- `DOCKER_ANDROID_MEMORY`: Emulator RAM in MB, default is `8192`.
|
||||
- `DOCKER_ANDROID_CORES`: Emulator CPU cores, default is `4`.
|
||||
- `DOCKER_ANDROID_GPU_COUNT`: Number of GPUs, default is `1`.
|
||||
- `DOCKER_ANDROID_CPU_LIMIT`: CPU limit, default is `2`.
|
||||
- `DOCKER_ANDROID_MEMORY_LIMIT`: Memory limit, default is `8G`.
|
||||
- `DOCKER_ANDROID_CPU_RESERVATION`: CPU reservation, default is `1`.
|
||||
- `DOCKER_ANDROID_MEMORY_RESERVATION`: Memory reservation, default is `4G`.
|
||||
|
||||
## Volumes
|
||||
|
||||
- `docker_android_data`: Android AVD data stored at `/data`.
|
||||
|
||||
## Notes
|
||||
|
||||
- Linux with KVM is required for performance. Ensure `/dev/kvm` is available.
|
||||
- For Play Store images, set `DOCKER_ANDROID_VERSION=api-33-playstore` and place `adbkey` and `adbkey.pub` in the `./keys` directory.
|
||||
- The emulator is headless and can be controlled with `scrcpy` after connecting ADB.
|
||||
59
src/docker-android/README.zh.md
Normal file
59
src/docker-android/README.zh.md
Normal file
@@ -0,0 +1,59 @@
|
||||
# Docker Android Emulator
|
||||
|
||||
[English](./README.md) | [中文](./README.zh.md)
|
||||
|
||||
该服务用于部署 HQarroum 的 Docker Android Emulator 镜像。
|
||||
|
||||
## 使用方法
|
||||
|
||||
- 启动默认模拟器:
|
||||
|
||||
```bash
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
- 启动 GPU 加速:
|
||||
|
||||
```bash
|
||||
docker compose --profile gpu up -d
|
||||
```
|
||||
|
||||
- 使用 ADB 连接:
|
||||
|
||||
```bash
|
||||
adb connect 127.0.0.1:5555
|
||||
```
|
||||
|
||||
## 服务
|
||||
|
||||
- `docker_android`:默认 Android 模拟器。
|
||||
- `docker_android_gpu`:带 CUDA 的 Android 模拟器(Profile:`gpu`)。
|
||||
|
||||
## 配置
|
||||
|
||||
- `DOCKER_ANDROID_VERSION`:镜像标签,默认 `api-33`。
|
||||
- `DOCKER_ANDROID_GPU_VERSION`:GPU 镜像标签,默认 `api-33-cuda`。
|
||||
- `DOCKER_ANDROID_ADB_PORT_OVERRIDE`:ADB 主机端口,默认 `5555`。
|
||||
- `DOCKER_ANDROID_CONSOLE_PORT_OVERRIDE`:模拟器控制台端口,默认 `5554`。
|
||||
- `DOCKER_ANDROID_KVM_DEVICE`:KVM 设备路径,默认 `/dev/kvm`。
|
||||
- `DOCKER_ANDROID_KEYS_DIR`:Play Store 镜像的 ADB 密钥目录,默认 `./keys`。
|
||||
- `DOCKER_ANDROID_DISABLE_ANIMATION`:禁用动画,默认 `false`。
|
||||
- `DOCKER_ANDROID_DISABLE_HIDDEN_POLICY`:禁用隐藏 API 策略,默认 `false`。
|
||||
- `DOCKER_ANDROID_SKIP_AUTH`:跳过 ADB 认证,默认 `true`。
|
||||
- `DOCKER_ANDROID_MEMORY`:模拟器内存(MB),默认 `8192`。
|
||||
- `DOCKER_ANDROID_CORES`:模拟器 CPU 核心数,默认 `4`。
|
||||
- `DOCKER_ANDROID_GPU_COUNT`:GPU 数量,默认 `1`。
|
||||
- `DOCKER_ANDROID_CPU_LIMIT`:CPU 限制,默认 `2`。
|
||||
- `DOCKER_ANDROID_MEMORY_LIMIT`:内存限制,默认 `8G`。
|
||||
- `DOCKER_ANDROID_CPU_RESERVATION`:CPU 预留,默认 `1`。
|
||||
- `DOCKER_ANDROID_MEMORY_RESERVATION`:内存预留,默认 `4G`。
|
||||
|
||||
## 数据卷
|
||||
|
||||
- `docker_android_data`:Android AVD 数据目录,挂载到 `/data`。
|
||||
|
||||
## 说明
|
||||
|
||||
- 建议在支持 KVM 的 Linux 主机上运行,确保 `/dev/kvm` 可用。
|
||||
- Play Store 镜像请设置 `DOCKER_ANDROID_VERSION=api-33-playstore`,并将 `adbkey` 与 `adbkey.pub` 放到 `./keys` 目录。
|
||||
- 模拟器为无界面模式,ADB 连接后可使用 `scrcpy` 进行控制。
|
||||
83
src/docker-android/docker-compose.yaml
Normal file
83
src/docker-android/docker-compose.yaml
Normal file
@@ -0,0 +1,83 @@
|
||||
x-defaults: &defaults
|
||||
restart: unless-stopped
|
||||
logging:
|
||||
driver: json-file
|
||||
options:
|
||||
max-size: 100m
|
||||
max-file: "3"
|
||||
|
||||
services:
|
||||
docker_android:
|
||||
<<: *defaults
|
||||
image: ${GLOBAL_REGISTRY:-}halimqarroum/docker-android:${DOCKER_ANDROID_VERSION:-api-33}
|
||||
ports:
|
||||
- "${DOCKER_ANDROID_ADB_PORT_OVERRIDE:-5555}:5555"
|
||||
- "${DOCKER_ANDROID_CONSOLE_PORT_OVERRIDE:-5554}:5554"
|
||||
volumes:
|
||||
- docker_android_data:/data
|
||||
- ${DOCKER_ANDROID_KEYS_DIR:-./keys}:/keys:ro
|
||||
environment:
|
||||
- TZ=${TZ:-UTC}
|
||||
- DISABLE_ANIMATION=${DOCKER_ANDROID_DISABLE_ANIMATION:-false}
|
||||
- DISABLE_HIDDEN_POLICY=${DOCKER_ANDROID_DISABLE_HIDDEN_POLICY:-false}
|
||||
- SKIP_AUTH=${DOCKER_ANDROID_SKIP_AUTH:-true}
|
||||
- MEMORY=${DOCKER_ANDROID_MEMORY:-8192}
|
||||
- CORES=${DOCKER_ANDROID_CORES:-4}
|
||||
devices:
|
||||
- "${DOCKER_ANDROID_KVM_DEVICE:-/dev/kvm}:/dev/kvm"
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "adb devices 2>/dev/null | grep -q emulator"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 5
|
||||
start_period: 60s
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
cpus: ${DOCKER_ANDROID_CPU_LIMIT:-2}
|
||||
memory: ${DOCKER_ANDROID_MEMORY_LIMIT:-8G}
|
||||
reservations:
|
||||
cpus: ${DOCKER_ANDROID_CPU_RESERVATION:-1}
|
||||
memory: ${DOCKER_ANDROID_MEMORY_RESERVATION:-4G}
|
||||
|
||||
docker_android_gpu:
|
||||
<<: *defaults
|
||||
profiles:
|
||||
- gpu
|
||||
image: ${GLOBAL_REGISTRY:-}halimqarroum/docker-android:${DOCKER_ANDROID_GPU_VERSION:-api-33-cuda}
|
||||
ports:
|
||||
- "${DOCKER_ANDROID_ADB_PORT_OVERRIDE:-5555}:5555"
|
||||
- "${DOCKER_ANDROID_CONSOLE_PORT_OVERRIDE:-5554}:5554"
|
||||
volumes:
|
||||
- docker_android_data:/data
|
||||
- ${DOCKER_ANDROID_KEYS_DIR:-./keys}:/keys:ro
|
||||
environment:
|
||||
- TZ=${TZ:-UTC}
|
||||
- DISABLE_ANIMATION=${DOCKER_ANDROID_DISABLE_ANIMATION:-false}
|
||||
- DISABLE_HIDDEN_POLICY=${DOCKER_ANDROID_DISABLE_HIDDEN_POLICY:-false}
|
||||
- SKIP_AUTH=${DOCKER_ANDROID_SKIP_AUTH:-true}
|
||||
- MEMORY=${DOCKER_ANDROID_MEMORY:-8192}
|
||||
- CORES=${DOCKER_ANDROID_CORES:-4}
|
||||
devices:
|
||||
- "${DOCKER_ANDROID_KVM_DEVICE:-/dev/kvm}:/dev/kvm"
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "adb devices 2>/dev/null | grep -q emulator"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 5
|
||||
start_period: 60s
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
cpus: ${DOCKER_ANDROID_CPU_LIMIT:-2}
|
||||
memory: ${DOCKER_ANDROID_MEMORY_LIMIT:-8G}
|
||||
reservations:
|
||||
cpus: ${DOCKER_ANDROID_CPU_RESERVATION:-1}
|
||||
memory: ${DOCKER_ANDROID_MEMORY_RESERVATION:-4G}
|
||||
devices:
|
||||
- driver: nvidia
|
||||
count: ${DOCKER_ANDROID_GPU_COUNT:-1}
|
||||
capabilities: [gpu]
|
||||
|
||||
volumes:
|
||||
docker_android_data:
|
||||
18
src/falkordb/.env.example
Normal file
18
src/falkordb/.env.example
Normal file
@@ -0,0 +1,18 @@
|
||||
# FalkorDB Version
|
||||
# Latest stable version can be found at https://hub.docker.com/r/falkordb/falkordb/tags
|
||||
FALKORDB_VERSION=v4.14.11
|
||||
|
||||
# Port configuration
|
||||
# Port for Redis protocol (Graph Database)
|
||||
FALKORDB_PORT_OVERRIDE=6379
|
||||
# Port for FalkorDB Browser UI
|
||||
FALKORDB_BROWSER_PORT_OVERRIDE=3000
|
||||
|
||||
# Resource limits
|
||||
FALKORDB_CPU_LIMIT=1.00
|
||||
FALKORDB_MEMORY_LIMIT=2G
|
||||
FALKORDB_CPU_RESERVATION=0.25
|
||||
FALKORDB_MEMORY_RESERVATION=512M
|
||||
|
||||
# Timezone
|
||||
TZ=UTC
|
||||
31
src/falkordb/README.md
Normal file
31
src/falkordb/README.md
Normal file
@@ -0,0 +1,31 @@
|
||||
# FalkorDB
|
||||
|
||||
[FalkorDB](https://falkordb.com/) is a low-latency property graph database that leverages sparse matrices and linear algebra for high-performance graph queries. It is a community-driven fork of RedisGraph, optimized for large-scale knowledge graphs and AI-powered applications.
|
||||
|
||||
## Getting Started
|
||||
|
||||
1. Copy `.env.example` to `.env` and adjust the configuration as needed.
|
||||
2. Start the service:
|
||||
|
||||
```bash
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
3. Access the FalkorDB Browser at `http://localhost:3000`.
|
||||
4. Connect to the database using `redis-cli` or any Redis-compatible client on port `6379`.
|
||||
|
||||
## Environment Variables
|
||||
|
||||
| Variable | Description | Default |
|
||||
| -------------------------------- | ---------------------------- | ---------- |
|
||||
| `FALKORDB_VERSION` | Version of FalkorDB image | `v4.14.11` |
|
||||
| `FALKORDB_PORT_OVERRIDE` | Host port for Redis protocol | `6379` |
|
||||
| `FALKORDB_BROWSER_PORT_OVERRIDE` | Host port for Browser UI | `3000` |
|
||||
| `FALKORDB_CPU_LIMIT` | Maximum CPU cycles | `1.00` |
|
||||
| `FALKORDB_MEMORY_LIMIT` | Maximum memory | `2G` |
|
||||
|
||||
## Resources
|
||||
|
||||
- [Official Documentation](https://docs.falkordb.com/)
|
||||
- [GitHub Repository](https://github.com/FalkorDB/FalkorDB)
|
||||
- [Docker Hub](https://hub.docker.com/r/falkordb/falkordb)
|
||||
31
src/falkordb/README.zh.md
Normal file
31
src/falkordb/README.zh.md
Normal file
@@ -0,0 +1,31 @@
|
||||
# FalkorDB
|
||||
|
||||
[FalkorDB](https://falkordb.com/) 是一个低延迟的属性图数据库,利用稀疏矩阵和线性代数实现高性能图查询。它是 RedisGraph 的社区驱动分支,针对大规模知识图谱和 AI 驱动的应用进行了优化。
|
||||
|
||||
## 快速开始
|
||||
|
||||
1. 将 `.env.example` 复制为 `.env` 并根据需要调整配置。
|
||||
2. 启动服务:
|
||||
|
||||
```bash
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
3. 通过 `http://localhost:3000` 访问 FalkorDB Browser 界面。
|
||||
4. 使用 `redis-cli` 或任何兼容 Redis 的客户端连接到 `6379` 端口。
|
||||
|
||||
## 环境变量
|
||||
|
||||
| 变量名 | 描述 | 默认值 |
|
||||
| -------------------------------- | -------------------- | ---------- |
|
||||
| `FALKORDB_VERSION` | FalkorDB 镜像版本 | `v4.14.11` |
|
||||
| `FALKORDB_PORT_OVERRIDE` | Redis 协议的主机端口 | `6379` |
|
||||
| `FALKORDB_BROWSER_PORT_OVERRIDE` | 浏览器界面的主机端口 | `3000` |
|
||||
| `FALKORDB_CPU_LIMIT` | 最大 CPU 使用率 | `1.00` |
|
||||
| `FALKORDB_MEMORY_LIMIT` | 最大内存限制 | `2G` |
|
||||
|
||||
## 相关资源
|
||||
|
||||
- [官方文档](https://docs.falkordb.com/)
|
||||
- [GitHub 仓库](https://github.com/FalkorDB/FalkorDB)
|
||||
- [Docker Hub](https://hub.docker.com/r/falkordb/falkordb)
|
||||
36
src/falkordb/docker-compose.yaml
Normal file
36
src/falkordb/docker-compose.yaml
Normal file
@@ -0,0 +1,36 @@
|
||||
x-defaults: &defaults
|
||||
restart: unless-stopped
|
||||
logging:
|
||||
driver: json-file
|
||||
options:
|
||||
max-size: 100m
|
||||
max-file: "3"
|
||||
|
||||
services:
|
||||
falkordb:
|
||||
<<: *defaults
|
||||
image: ${GLOBAL_REGISTRY:-}falkordb/falkordb:${FALKORDB_VERSION:-v4.14.11}
|
||||
ports:
|
||||
- "${FALKORDB_PORT_OVERRIDE:-6379}:6379"
|
||||
- "${FALKORDB_BROWSER_PORT_OVERRIDE:-3000}:3000"
|
||||
volumes:
|
||||
- falkordb_data:/data
|
||||
environment:
|
||||
- TZ=${TZ:-UTC}
|
||||
healthcheck:
|
||||
test: ["CMD", "redis-cli", "ping"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 10s
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
cpus: ${FALKORDB_CPU_LIMIT:-1.00}
|
||||
memory: ${FALKORDB_MEMORY_LIMIT:-2G}
|
||||
reservations:
|
||||
cpus: ${FALKORDB_CPU_RESERVATION:-0.25}
|
||||
memory: ${FALKORDB_MEMORY_RESERVATION:-512M}
|
||||
|
||||
volumes:
|
||||
falkordb_data:
|
||||
21
src/flowise/.env.example
Normal file
21
src/flowise/.env.example
Normal file
@@ -0,0 +1,21 @@
|
||||
# Global Registry Prefix (optional)
|
||||
# GLOBAL_REGISTRY=
|
||||
|
||||
# Flowise Image Version
|
||||
FLOWISE_VERSION=3.0.12
|
||||
|
||||
# Timezone
|
||||
TZ=UTC
|
||||
|
||||
# Port to bind to on the host machine
|
||||
FLOWISE_PORT_OVERRIDE=3000
|
||||
|
||||
# Resource Limits
|
||||
FLOWISE_CPU_LIMIT=1
|
||||
FLOWISE_MEMORY_LIMIT=1024M
|
||||
FLOWISE_CPU_RESERVATION=0.5
|
||||
FLOWISE_MEMORY_RESERVATION=512M
|
||||
|
||||
# Optional basic auth (leave empty to disable)
|
||||
# FLOWISE_USERNAME=
|
||||
# FLOWISE_PASSWORD=
|
||||
32
src/flowise/README.md
Normal file
32
src/flowise/README.md
Normal file
@@ -0,0 +1,32 @@
|
||||
# Flowise
|
||||
|
||||
[English](./README.md) | [中文](./README.zh.md)
|
||||
|
||||
Quick start: <https://docs.flowiseai.com>.
|
||||
|
||||
This service deploys Flowise, a visual LLM orchestration platform.
|
||||
|
||||
## Services
|
||||
|
||||
- `flowise`: The Flowise service.
|
||||
|
||||
## Configuration
|
||||
|
||||
- `GLOBAL_REGISTRY`: The registry prefix for the Flowise image, default is empty.
|
||||
- `FLOWISE_VERSION`: The version of the Flowise image, default is `3.0.12`.
|
||||
- `TZ`: The timezone for the container, default is `UTC`.
|
||||
- `FLOWISE_PORT_OVERRIDE`: The host port for Flowise, default is `3000`.
|
||||
- `FLOWISE_CPU_LIMIT`: The CPU limit for the Flowise service, default is `1`.
|
||||
- `FLOWISE_MEMORY_LIMIT`: The memory limit for the Flowise service, default is `1024M`.
|
||||
- `FLOWISE_CPU_RESERVATION`: The CPU reservation for the Flowise service, default is `0.5`.
|
||||
- `FLOWISE_MEMORY_RESERVATION`: The memory reservation for the Flowise service, default is `512M`.
|
||||
- `FLOWISE_USERNAME`: Optional basic auth username. Leave empty to disable.
|
||||
- `FLOWISE_PASSWORD`: Optional basic auth password. Leave empty to disable.
|
||||
|
||||
## Volumes
|
||||
|
||||
- `flowise_data`: A volume for storing Flowise data.
|
||||
|
||||
## Notes
|
||||
|
||||
- The health check uses the `/api/v1/ping` endpoint.
|
||||
32
src/flowise/README.zh.md
Normal file
32
src/flowise/README.zh.md
Normal file
@@ -0,0 +1,32 @@
|
||||
# Flowise
|
||||
|
||||
[English](./README.md) | [中文](./README.zh.md)
|
||||
|
||||
快速开始:<https://docs.flowiseai.com>。
|
||||
|
||||
此服务用于部署 Flowise,一个可视化的 LLM 编排平台。
|
||||
|
||||
## 服务
|
||||
|
||||
- `flowise`:Flowise 服务。
|
||||
|
||||
## 配置
|
||||
|
||||
- `GLOBAL_REGISTRY`:Flowise 镜像的仓库前缀,默认为空。
|
||||
- `FLOWISE_VERSION`:Flowise 镜像版本,默认是 `3.0.12`。
|
||||
- `TZ`:容器时区,默认是 `UTC`。
|
||||
- `FLOWISE_PORT_OVERRIDE`:Flowise 的宿主机端口,默认是 `3000`。
|
||||
- `FLOWISE_CPU_LIMIT`:Flowise 服务的 CPU 限制,默认是 `1`。
|
||||
- `FLOWISE_MEMORY_LIMIT`:Flowise 服务的内存限制,默认是 `1024M`。
|
||||
- `FLOWISE_CPU_RESERVATION`:Flowise 服务的 CPU 预留,默认是 `0.5`。
|
||||
- `FLOWISE_MEMORY_RESERVATION`:Flowise 服务的内存预留,默认是 `512M`。
|
||||
- `FLOWISE_USERNAME`:可选的基础认证用户名,不设置则禁用。
|
||||
- `FLOWISE_PASSWORD`:可选的基础认证密码,不设置则禁用。
|
||||
|
||||
## 数据卷
|
||||
|
||||
- `flowise_data`:用于存储 Flowise 数据的卷。
|
||||
|
||||
## 说明
|
||||
|
||||
- 健康检查使用 `/api/v1/ping` 端点。
|
||||
44
src/flowise/docker-compose.yaml
Normal file
44
src/flowise/docker-compose.yaml
Normal file
@@ -0,0 +1,44 @@
|
||||
x-defaults: &defaults
|
||||
restart: unless-stopped
|
||||
logging:
|
||||
driver: json-file
|
||||
options:
|
||||
max-size: 100m
|
||||
max-file: "3"
|
||||
|
||||
services:
|
||||
flowise:
|
||||
<<: *defaults
|
||||
image: ${GLOBAL_REGISTRY:-}flowiseai/flowise:${FLOWISE_VERSION:-3.0.12}
|
||||
ports:
|
||||
- "${FLOWISE_PORT_OVERRIDE:-3000}:3000"
|
||||
volumes:
|
||||
- flowise_data:/root/.flowise
|
||||
environment:
|
||||
- TZ=${TZ:-UTC}
|
||||
- PORT=3000
|
||||
- FLOWISE_USERNAME=${FLOWISE_USERNAME:-}
|
||||
- FLOWISE_PASSWORD=${FLOWISE_PASSWORD:-}
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"node",
|
||||
"-e",
|
||||
"require('http').get('http://localhost:3000/api/v1/ping',res=>process.exit(res.statusCode===200?0:1)).on('error',()=>process.exit(1))"
|
||||
]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 5
|
||||
start_period: 20s
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
cpus: ${FLOWISE_CPU_LIMIT:-1}
|
||||
memory: ${FLOWISE_MEMORY_LIMIT:-1024M}
|
||||
reservations:
|
||||
cpus: ${FLOWISE_CPU_RESERVATION:-0.5}
|
||||
memory: ${FLOWISE_MEMORY_RESERVATION:-512M}
|
||||
|
||||
volumes:
|
||||
flowise_data:
|
||||
@@ -1,5 +1,5 @@
|
||||
# Gitea Runner version
|
||||
GITEA_RUNNER_VERSION=0.2.13-dind
|
||||
GITEA_RUNNER_VERSION=0.2.13
|
||||
|
||||
# Gitea instance URL
|
||||
INSTANCE_URL=http://localhost:3000
|
||||
|
||||
@@ -36,7 +36,7 @@ runner:
|
||||
# It works when something like `uses: actions/checkout@v4` is used and DEFAULT_ACTIONS_URL is set to github,
|
||||
# and github_mirror is not empty. In this case,
|
||||
# it replaces https://github.com with the value here, which is useful for some special network environments.
|
||||
github_mirror: ''
|
||||
github_mirror: ""
|
||||
# The labels of a runner are used to determine which jobs the runner can run, and how to run them.
|
||||
# Like: "macos-arm64:host" or "ubuntu-latest:docker://docker.gitea.com/runner-images:ubuntu-latest"
|
||||
# Find more images provided by Gitea at https://gitea.com/docker.gitea.com/runner-images .
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Gitea Version
|
||||
GITEA_VERSION=1.25.2-rootless
|
||||
GITEA_VERSION=1.25.4-rootless
|
||||
|
||||
# Database configuration
|
||||
GITEA_DB_TYPE=postgres
|
||||
|
||||
@@ -9,7 +9,7 @@ x-defaults: &defaults
|
||||
services:
|
||||
gitea:
|
||||
<<: *defaults
|
||||
image: ${GLOBAL_REGISTRY:-}gitea/gitea:${GITEA_VERSION:-1.25.2-rootless}
|
||||
image: ${GLOBAL_REGISTRY:-}gitea/gitea:${GITEA_VERSION:-1.25.4-rootless}
|
||||
environment:
|
||||
- USER_UID=1000
|
||||
- USER_GID=1000
|
||||
|
||||
33
src/influxdb/.env.example
Normal file
33
src/influxdb/.env.example
Normal file
@@ -0,0 +1,33 @@
|
||||
# InfluxDB Version
|
||||
INFLUXDB_VERSION=2.8.0
|
||||
|
||||
# Timezone
|
||||
TZ=UTC
|
||||
|
||||
# Initialization mode (setup or upgrade)
|
||||
INFLUXDB_INIT_MODE=setup
|
||||
|
||||
# Admin user credentials
|
||||
INFLUXDB_ADMIN_USERNAME=admin
|
||||
INFLUXDB_ADMIN_PASSWORD=changeme123456
|
||||
|
||||
# Organization name
|
||||
INFLUXDB_ORG=myorg
|
||||
|
||||
# Default bucket name
|
||||
INFLUXDB_BUCKET=mybucket
|
||||
|
||||
# Retention period (0 means infinite)
|
||||
INFLUXDB_RETENTION=0
|
||||
|
||||
# Admin token for API access
|
||||
INFLUXDB_ADMIN_TOKEN=mytoken123456
|
||||
|
||||
# Port to bind to on the host machine
|
||||
INFLUXDB_PORT_OVERRIDE=8086
|
||||
|
||||
# Resource limits
|
||||
INFLUXDB_CPU_LIMIT=2.0
|
||||
INFLUXDB_MEMORY_LIMIT=2G
|
||||
INFLUXDB_CPU_RESERVATION=0.5
|
||||
INFLUXDB_MEMORY_RESERVATION=512M
|
||||
169
src/influxdb/README.md
Normal file
169
src/influxdb/README.md
Normal file
@@ -0,0 +1,169 @@
|
||||
# InfluxDB
|
||||
|
||||
InfluxDB is a high-performance, open-source time series database designed for handling high write and query loads. It is ideal for storing and analyzing metrics, events, and real-time analytics data.
|
||||
|
||||
## Features
|
||||
|
||||
- **Time Series Optimized**: Purpose-built for time-stamped data
|
||||
- **High Performance**: Fast writes and queries for time series data
|
||||
- **SQL-like Query Language**: Flux and InfluxQL for flexible data querying
|
||||
- **Built-in UI**: Web-based interface for data exploration and visualization
|
||||
- **Retention Policies**: Automatic data expiration and downsampling
|
||||
- **Multi-tenancy**: Organizations and buckets for data isolation
|
||||
|
||||
## Quick Start
|
||||
|
||||
1. Copy the environment file and customize it:
|
||||
|
||||
```bash
|
||||
cp .env.example .env
|
||||
```
|
||||
|
||||
2. Edit `.env` to configure your InfluxDB instance:
|
||||
- `INFLUXDB_ADMIN_USERNAME`: Admin username (default: admin)
|
||||
- `INFLUXDB_ADMIN_PASSWORD`: Admin password (default: changeme123456)
|
||||
- `INFLUXDB_ORG`: Organization name (default: myorg)
|
||||
- `INFLUXDB_BUCKET`: Default bucket name (default: mybucket)
|
||||
- `INFLUXDB_ADMIN_TOKEN`: API access token (default: mytoken123456)
|
||||
|
||||
3. Start InfluxDB:
|
||||
|
||||
```bash
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
4. Access the InfluxDB UI at `http://localhost:8086`
|
||||
|
||||
## Configuration
|
||||
|
||||
### Environment Variables
|
||||
|
||||
| Variable | Description | Default |
|
||||
| ------------------------- | ----------------------------------- | ---------------- |
|
||||
| `INFLUXDB_VERSION` | InfluxDB version | `2.8.0` |
|
||||
| `TZ` | Timezone | `UTC` |
|
||||
| `INFLUXDB_INIT_MODE` | Initialization mode (setup/upgrade) | `setup` |
|
||||
| `INFLUXDB_ADMIN_USERNAME` | Admin username | `admin` |
|
||||
| `INFLUXDB_ADMIN_PASSWORD` | Admin password | `changeme123456` |
|
||||
| `INFLUXDB_ORG` | Organization name | `myorg` |
|
||||
| `INFLUXDB_BUCKET` | Default bucket name | `mybucket` |
|
||||
| `INFLUXDB_RETENTION` | Retention period (0 for infinite) | `0` |
|
||||
| `INFLUXDB_ADMIN_TOKEN` | Admin API token | `mytoken123456` |
|
||||
| `INFLUXDB_PORT_OVERRIDE` | Host port binding | `8086` |
|
||||
|
||||
### Volumes
|
||||
|
||||
- `influxdb_data`: Stores time series data
|
||||
- `influxdb_config`: Stores configuration files
|
||||
|
||||
## Usage
|
||||
|
||||
### Accessing the Web UI
|
||||
|
||||
Open your browser and navigate to:
|
||||
|
||||
```text
|
||||
http://localhost:8086
|
||||
```
|
||||
|
||||
Login with the credentials configured in your `.env` file.
|
||||
|
||||
### Using the CLI
|
||||
|
||||
Execute commands inside the container:
|
||||
|
||||
```bash
|
||||
docker compose exec influxdb influx
|
||||
```
|
||||
|
||||
### Writing Data
|
||||
|
||||
Using the Flux query language:
|
||||
|
||||
```bash
|
||||
docker compose exec influxdb influx write \
|
||||
--bucket mybucket \
|
||||
--org myorg \
|
||||
'measurement,tag=value field=42'
|
||||
```
|
||||
|
||||
### Querying Data
|
||||
|
||||
Query data using the CLI:
|
||||
|
||||
```bash
|
||||
docker compose exec influxdb influx query \
|
||||
--org myorg \
|
||||
'from(bucket: "mybucket") |> range(start: -1h)'
|
||||
```
|
||||
|
||||
## API Access
|
||||
|
||||
InfluxDB provides a RESTful API for programmatic access:
|
||||
|
||||
```bash
|
||||
curl -X POST "http://localhost:8086/api/v2/query?org=myorg" \
|
||||
-H "Authorization: Token mytoken123456" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"query": "from(bucket: \"mybucket\") |> range(start: -1h)"}'
|
||||
```
|
||||
|
||||
## Backup and Restore
|
||||
|
||||
### Backup
|
||||
|
||||
```bash
|
||||
docker compose exec influxdb influx backup /var/lib/influxdb2/backup
|
||||
docker compose cp influxdb:/var/lib/influxdb2/backup ./backup
|
||||
```
|
||||
|
||||
### Restore
|
||||
|
||||
```bash
|
||||
docker compose cp ./backup influxdb:/var/lib/influxdb2/backup
|
||||
docker compose exec influxdb influx restore /var/lib/influxdb2/backup
|
||||
```
|
||||
|
||||
## Security Considerations
|
||||
|
||||
1. **Change Default Credentials**: Always change the default admin password and token in production
|
||||
2. **Use Strong Tokens**: Generate cryptographically secure tokens for API access
|
||||
3. **Network Security**: Consider using a reverse proxy with HTTPS in production
|
||||
4. **Access Control**: Use InfluxDB's built-in authorization system to limit access
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Container won't start
|
||||
|
||||
Check the logs:
|
||||
|
||||
```bash
|
||||
docker compose logs influxdb
|
||||
```
|
||||
|
||||
### Cannot access web UI
|
||||
|
||||
Ensure port 8086 is not in use:
|
||||
|
||||
```bash
|
||||
netstat -an | grep 8086
|
||||
```
|
||||
|
||||
### Data persistence
|
||||
|
||||
Verify volumes are properly mounted:
|
||||
|
||||
```bash
|
||||
docker compose exec influxdb ls -la /var/lib/influxdb2
|
||||
```
|
||||
|
||||
## References
|
||||
|
||||
- [Official Documentation](https://docs.influxdata.com/influxdb/v2/)
|
||||
- [Flux Query Language](https://docs.influxdata.com/flux/v0/)
|
||||
- [Docker Hub](https://hub.docker.com/_/influxdb)
|
||||
- [GitHub Repository](https://github.com/influxdata/influxdb)
|
||||
|
||||
## License
|
||||
|
||||
InfluxDB is available under the MIT License. See the [LICENSE](https://github.com/influxdata/influxdb/blob/master/LICENSE) file for more information.
|
||||
169
src/influxdb/README.zh.md
Normal file
169
src/influxdb/README.zh.md
Normal file
@@ -0,0 +1,169 @@
|
||||
# InfluxDB
|
||||
|
||||
InfluxDB 是一个高性能的开源时序数据库,专为处理高写入和查询负载而设计。它非常适合存储和分析指标、事件以及实时分析数据。
|
||||
|
||||
## 功能特性
|
||||
|
||||
- **时序优化**:专为时间戳数据而构建
|
||||
- **高性能**:快速的时序数据写入和查询
|
||||
- **类 SQL 查询语言**:Flux 和 InfluxQL 提供灵活的数据查询
|
||||
- **内置 UI**:基于 Web 的数据探索和可视化界面
|
||||
- **保留策略**:自动数据过期和降采样
|
||||
- **多租户**:通过组织和桶实现数据隔离
|
||||
|
||||
## 快速开始
|
||||
|
||||
1. 复制环境配置文件并自定义:
|
||||
|
||||
```bash
|
||||
cp .env.example .env
|
||||
```
|
||||
|
||||
2. 编辑 `.env` 文件配置您的 InfluxDB 实例:
|
||||
- `INFLUXDB_ADMIN_USERNAME`:管理员用户名(默认:admin)
|
||||
- `INFLUXDB_ADMIN_PASSWORD`:管理员密码(默认:changeme123456)
|
||||
- `INFLUXDB_ORG`:组织名称(默认:myorg)
|
||||
- `INFLUXDB_BUCKET`:默认桶名称(默认:mybucket)
|
||||
- `INFLUXDB_ADMIN_TOKEN`:API 访问令牌(默认:mytoken123456)
|
||||
|
||||
3. 启动 InfluxDB:
|
||||
|
||||
```bash
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
4. 访问 InfluxDB UI:`http://localhost:8086`
|
||||
|
||||
## 配置说明
|
||||
|
||||
### 环境变量
|
||||
|
||||
| 变量 | 说明 | 默认值 |
|
||||
| ------------------------- | --------------------------- | ---------------- |
|
||||
| `INFLUXDB_VERSION` | InfluxDB 版本 | `2.8.0` |
|
||||
| `TZ` | 时区 | `UTC` |
|
||||
| `INFLUXDB_INIT_MODE` | 初始化模式(setup/upgrade) | `setup` |
|
||||
| `INFLUXDB_ADMIN_USERNAME` | 管理员用户名 | `admin` |
|
||||
| `INFLUXDB_ADMIN_PASSWORD` | 管理员密码 | `changeme123456` |
|
||||
| `INFLUXDB_ORG` | 组织名称 | `myorg` |
|
||||
| `INFLUXDB_BUCKET` | 默认桶名称 | `mybucket` |
|
||||
| `INFLUXDB_RETENTION` | 保留期限(0 表示永久) | `0` |
|
||||
| `INFLUXDB_ADMIN_TOKEN` | 管理员 API 令牌 | `mytoken123456` |
|
||||
| `INFLUXDB_PORT_OVERRIDE` | 主机端口绑定 | `8086` |
|
||||
|
||||
### 数据卷
|
||||
|
||||
- `influxdb_data`:存储时序数据
|
||||
- `influxdb_config`:存储配置文件
|
||||
|
||||
## 使用方法
|
||||
|
||||
### 访问 Web UI
|
||||
|
||||
在浏览器中打开:
|
||||
|
||||
```text
|
||||
http://localhost:8086
|
||||
```
|
||||
|
||||
使用 `.env` 文件中配置的凭据登录。
|
||||
|
||||
### 使用命令行
|
||||
|
||||
在容器内执行命令:
|
||||
|
||||
```bash
|
||||
docker compose exec influxdb influx
|
||||
```
|
||||
|
||||
### 写入数据
|
||||
|
||||
使用 Flux 查询语言:
|
||||
|
||||
```bash
|
||||
docker compose exec influxdb influx write \
|
||||
--bucket mybucket \
|
||||
--org myorg \
|
||||
'measurement,tag=value field=42'
|
||||
```
|
||||
|
||||
### 查询数据
|
||||
|
||||
使用 CLI 查询数据:
|
||||
|
||||
```bash
|
||||
docker compose exec influxdb influx query \
|
||||
--org myorg \
|
||||
'from(bucket: "mybucket") |> range(start: -1h)'
|
||||
```
|
||||
|
||||
## API 访问
|
||||
|
||||
InfluxDB 提供 RESTful API 用于编程访问:
|
||||
|
||||
```bash
|
||||
curl -X POST "http://localhost:8086/api/v2/query?org=myorg" \
|
||||
-H "Authorization: Token mytoken123456" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"query": "from(bucket: \"mybucket\") |> range(start: -1h)"}'
|
||||
```
|
||||
|
||||
## 备份与恢复
|
||||
|
||||
### 备份
|
||||
|
||||
```bash
|
||||
docker compose exec influxdb influx backup /var/lib/influxdb2/backup
|
||||
docker compose cp influxdb:/var/lib/influxdb2/backup ./backup
|
||||
```
|
||||
|
||||
### 恢复
|
||||
|
||||
```bash
|
||||
docker compose cp ./backup influxdb:/var/lib/influxdb2/backup
|
||||
docker compose exec influxdb influx restore /var/lib/influxdb2/backup
|
||||
```
|
||||
|
||||
## 安全注意事项
|
||||
|
||||
1. **修改默认凭据**:在生产环境中务必修改默认的管理员密码和令牌
|
||||
2. **使用强令牌**:为 API 访问生成加密安全的令牌
|
||||
3. **网络安全**:生产环境中考虑使用带 HTTPS 的反向代理
|
||||
4. **访问控制**:使用 InfluxDB 的内置授权系统限制访问
|
||||
|
||||
## 故障排除
|
||||
|
||||
### 容器无法启动
|
||||
|
||||
查看日志:
|
||||
|
||||
```bash
|
||||
docker compose logs influxdb
|
||||
```
|
||||
|
||||
### 无法访问 Web UI
|
||||
|
||||
确保端口 8086 未被占用:
|
||||
|
||||
```bash
|
||||
netstat -an | grep 8086
|
||||
```
|
||||
|
||||
### 数据持久化
|
||||
|
||||
验证数据卷是否正确挂载:
|
||||
|
||||
```bash
|
||||
docker compose exec influxdb ls -la /var/lib/influxdb2
|
||||
```
|
||||
|
||||
## 参考资源
|
||||
|
||||
- [官方文档](https://docs.influxdata.com/influxdb/v2/)
|
||||
- [Flux 查询语言](https://docs.influxdata.com/flux/v0/)
|
||||
- [Docker Hub](https://hub.docker.com/_/influxdb)
|
||||
- [GitHub 仓库](https://github.com/influxdata/influxdb)
|
||||
|
||||
## 许可证
|
||||
|
||||
InfluxDB 采用 MIT 许可证发布。详情请参阅 [LICENSE](https://github.com/influxdata/influxdb/blob/master/LICENSE) 文件。
|
||||
45
src/influxdb/docker-compose.yaml
Normal file
45
src/influxdb/docker-compose.yaml
Normal file
@@ -0,0 +1,45 @@
|
||||
x-defaults: &defaults
|
||||
restart: unless-stopped
|
||||
logging:
|
||||
driver: json-file
|
||||
options:
|
||||
max-size: 100m
|
||||
max-file: "3"
|
||||
|
||||
services:
|
||||
influxdb:
|
||||
<<: *defaults
|
||||
image: ${GLOBAL_REGISTRY:-}influxdb:${INFLUXDB_VERSION:-2.8.0}
|
||||
environment:
|
||||
TZ: ${TZ:-UTC}
|
||||
# InfluxDB v2 initialization
|
||||
DOCKER_INFLUXDB_INIT_MODE: ${INFLUXDB_INIT_MODE:-setup}
|
||||
DOCKER_INFLUXDB_INIT_USERNAME: ${INFLUXDB_ADMIN_USERNAME:-admin}
|
||||
DOCKER_INFLUXDB_INIT_PASSWORD: ${INFLUXDB_ADMIN_PASSWORD:-changeme123456}
|
||||
DOCKER_INFLUXDB_INIT_ORG: ${INFLUXDB_ORG:-myorg}
|
||||
DOCKER_INFLUXDB_INIT_BUCKET: ${INFLUXDB_BUCKET:-mybucket}
|
||||
DOCKER_INFLUXDB_INIT_RETENTION: ${INFLUXDB_RETENTION:-0}
|
||||
DOCKER_INFLUXDB_INIT_ADMIN_TOKEN: ${INFLUXDB_ADMIN_TOKEN:-mytoken123456}
|
||||
volumes:
|
||||
- influxdb_data:/var/lib/influxdb2
|
||||
- influxdb_config:/etc/influxdb2
|
||||
ports:
|
||||
- "${INFLUXDB_PORT_OVERRIDE:-8086}:8086"
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
cpus: ${INFLUXDB_CPU_LIMIT:-2.0}
|
||||
memory: ${INFLUXDB_MEMORY_LIMIT:-2G}
|
||||
reservations:
|
||||
cpus: ${INFLUXDB_CPU_RESERVATION:-0.5}
|
||||
memory: ${INFLUXDB_MEMORY_RESERVATION:-512M}
|
||||
healthcheck:
|
||||
test: ["CMD", "influx", "ping"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 30s
|
||||
|
||||
volumes:
|
||||
influxdb_data:
|
||||
influxdb_config:
|
||||
106
src/llama.cpp/.env.example
Normal file
106
src/llama.cpp/.env.example
Normal file
@@ -0,0 +1,106 @@
|
||||
# =============================================================================
|
||||
# llama.cpp Configuration
|
||||
# https://github.com/ggml-org/llama.cpp
|
||||
# LLM inference in C/C++ with support for various hardware accelerators
|
||||
# =============================================================================
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# General Settings
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
# Timezone for the container (default: UTC)
|
||||
TZ=UTC
|
||||
|
||||
# Global registry prefix (optional)
|
||||
# Example: docker.io/, ghcr.io/, registry.example.com/
|
||||
GHCR_REGISTRY=ghcr.io/
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Server Configuration
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
# Server image variant
|
||||
# Options: server (CPU), server-cuda (NVIDIA GPU), server-rocm (AMD GPU),
|
||||
# server-musa (Moore Threads GPU), server-intel (Intel GPU),
|
||||
# server-vulkan (Vulkan GPU)
|
||||
LLAMA_CPP_SERVER_VARIANT=server
|
||||
|
||||
# Server port override (default: 8080)
|
||||
LLAMA_CPP_SERVER_PORT_OVERRIDE=8080
|
||||
|
||||
# Model path inside the container
|
||||
# You need to mount your model file to this path
|
||||
# Example: /models/llama-2-7b-chat.Q4_K_M.gguf
|
||||
LLAMA_CPP_MODEL_PATH=/models/model.gguf
|
||||
|
||||
# Context size (number of tokens)
|
||||
# Larger values allow for more context but require more memory
|
||||
# Default: 512, Common values: 512, 2048, 4096, 8192, 16384, 32768
|
||||
LLAMA_CPP_CONTEXT_SIZE=512
|
||||
|
||||
# Number of GPU layers to offload
|
||||
# 0 = CPU only, 99 = all layers on GPU (for GPU variants)
|
||||
# For CPU variant, keep this at 0
|
||||
LLAMA_CPP_GPU_LAYERS=0
|
||||
|
||||
# Number of GPUs to use (for CUDA variant)
|
||||
LLAMA_CPP_GPU_COUNT=1
|
||||
|
||||
# Server CPU limit (in cores)
|
||||
LLAMA_CPP_SERVER_CPU_LIMIT=4.0
|
||||
|
||||
# Server CPU reservation (in cores)
|
||||
LLAMA_CPP_SERVER_CPU_RESERVATION=2.0
|
||||
|
||||
# Server memory limit
|
||||
LLAMA_CPP_SERVER_MEMORY_LIMIT=8G
|
||||
|
||||
# Server memory reservation
|
||||
LLAMA_CPP_SERVER_MEMORY_RESERVATION=4G
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# CLI Configuration (Light variant)
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
# CLI image variant
|
||||
# Options: light (CPU), light-cuda (NVIDIA GPU), light-rocm (AMD GPU),
|
||||
# light-musa (Moore Threads GPU), light-intel (Intel GPU),
|
||||
# light-vulkan (Vulkan GPU)
|
||||
LLAMA_CPP_CLI_VARIANT=light
|
||||
|
||||
# Default prompt for CLI mode
|
||||
LLAMA_CPP_PROMPT=Hello, how are you?
|
||||
|
||||
# CLI CPU limit (in cores)
|
||||
LLAMA_CPP_CLI_CPU_LIMIT=2.0
|
||||
|
||||
# CLI CPU reservation (in cores)
|
||||
LLAMA_CPP_CLI_CPU_RESERVATION=1.0
|
||||
|
||||
# CLI memory limit
|
||||
LLAMA_CPP_CLI_MEMORY_LIMIT=4G
|
||||
|
||||
# CLI memory reservation
|
||||
LLAMA_CPP_CLI_MEMORY_RESERVATION=2G
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Full Toolkit Configuration
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
# Full image variant (includes model conversion tools)
|
||||
# Options: full (CPU), full-cuda (NVIDIA GPU), full-rocm (AMD GPU),
|
||||
# full-musa (Moore Threads GPU), full-intel (Intel GPU),
|
||||
# full-vulkan (Vulkan GPU)
|
||||
LLAMA_CPP_FULL_VARIANT=full
|
||||
|
||||
# Full CPU limit (in cores)
|
||||
LLAMA_CPP_FULL_CPU_LIMIT=2.0
|
||||
|
||||
# Full CPU reservation (in cores)
|
||||
LLAMA_CPP_FULL_CPU_RESERVATION=1.0
|
||||
|
||||
# Full memory limit
|
||||
LLAMA_CPP_FULL_MEMORY_LIMIT=4G
|
||||
|
||||
# Full memory reservation
|
||||
LLAMA_CPP_FULL_MEMORY_RESERVATION=2G
|
||||
245
src/llama.cpp/README.md
Normal file
245
src/llama.cpp/README.md
Normal file
@@ -0,0 +1,245 @@
|
||||
# llama.cpp
|
||||
|
||||
[中文文档](README.zh.md)
|
||||
|
||||
[llama.cpp](https://github.com/ggml-org/llama.cpp) is a high-performance C/C++ implementation for LLM inference with support for various hardware accelerators.
|
||||
|
||||
## Features
|
||||
|
||||
- **Fast Inference**: Optimized C/C++ implementation for efficient LLM inference
|
||||
- **Multiple Backends**: CPU, CUDA (NVIDIA), ROCm (AMD), MUSA (Moore Threads), Intel GPU, Vulkan
|
||||
- **OpenAI-compatible API**: Server mode with OpenAI-compatible REST API
|
||||
- **CLI Support**: Interactive command-line interface for quick testing
|
||||
- **Model Conversion**: Full toolkit includes tools to convert and quantize models
|
||||
- **GGUF Format**: Support for the efficient GGUF model format
|
||||
- **Cross-platform**: Linux (x86-64, ARM64, s390x), Windows, macOS
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- Docker and Docker Compose installed
|
||||
- At least 4GB of RAM (8GB+ recommended)
|
||||
- For GPU variants:
|
||||
- **CUDA**: NVIDIA GPU with [nvidia-container-toolkit](https://github.com/NVIDIA/nvidia-container-toolkit)
|
||||
- **ROCm**: AMD GPU with proper ROCm drivers
|
||||
- **MUSA**: Moore Threads GPU with mt-container-toolkit
|
||||
- GGUF format model file (e.g., from [Hugging Face](https://huggingface.co/models?library=gguf))
|
||||
|
||||
## Quick Start
|
||||
|
||||
### 1. Server Mode (CPU)
|
||||
|
||||
```bash
|
||||
# Copy and configure environment
|
||||
cp .env.example .env
|
||||
|
||||
# Edit .env and set your model path
|
||||
# LLAMA_CPP_MODEL_PATH=/models/your-model.gguf
|
||||
|
||||
# Place your GGUF model in a directory, then update docker-compose.yaml
|
||||
# to mount it, e.g.:
|
||||
# volumes:
|
||||
# - ./models:/models
|
||||
|
||||
# Start the server
|
||||
docker compose --profile server up -d
|
||||
|
||||
# Test the server (OpenAI-compatible API)
|
||||
curl http://localhost:8080/v1/models
|
||||
|
||||
# Chat completion request
|
||||
curl http://localhost:8080/v1/chat/completions \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"messages": [
|
||||
{"role": "user", "content": "Hello!"}
|
||||
]
|
||||
}'
|
||||
```
|
||||
|
||||
### 2. Server Mode with NVIDIA GPU
|
||||
|
||||
```bash
|
||||
# Edit .env
|
||||
# Set LLAMA_CPP_GPU_LAYERS=99 to offload all layers to GPU
|
||||
|
||||
# Start GPU-accelerated server
|
||||
docker compose --profile cuda up -d
|
||||
|
||||
# The server will automatically use NVIDIA GPU
|
||||
```
|
||||
|
||||
### 3. Server Mode with AMD GPU
|
||||
|
||||
```bash
|
||||
# Edit .env
|
||||
# Set LLAMA_CPP_GPU_LAYERS=99 to offload all layers to GPU
|
||||
|
||||
# Start GPU-accelerated server
|
||||
docker compose --profile rocm up -d
|
||||
|
||||
# The server will automatically use AMD GPU
|
||||
```
|
||||
|
||||
### 4. CLI Mode
|
||||
|
||||
```bash
|
||||
# Edit .env and configure model path and prompt
|
||||
|
||||
# Run CLI
|
||||
docker compose --profile cli up
|
||||
|
||||
# For interactive mode, use:
|
||||
docker compose run --rm llama-cpp-cli \
|
||||
-m /models/your-model.gguf \
|
||||
-p "Your prompt here" \
|
||||
-n 512
|
||||
```
|
||||
|
||||
### 5. Full Toolkit (Model Conversion)
|
||||
|
||||
```bash
|
||||
# Start the full container
|
||||
docker compose --profile full up -d
|
||||
|
||||
# Execute commands inside the container
|
||||
docker compose exec llama-cpp-full bash
|
||||
|
||||
# Inside container, you can use conversion tools
|
||||
# Example: Convert a Hugging Face model
|
||||
# python3 convert_hf_to_gguf.py /models/source-model --outfile /models/output.gguf
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
### Environment Variables
|
||||
|
||||
Key environment variables (see [.env.example](.env.example) for all options):
|
||||
|
||||
| Variable | Description | Default |
|
||||
| -------------------------------- | ------------------------------------------------------------- | -------------------- |
|
||||
| `LLAMA_CPP_SERVER_VARIANT` | Server image variant (server, server-cuda, server-rocm, etc.) | `server` |
|
||||
| `LLAMA_CPP_MODEL_PATH` | Model file path inside container | `/models/model.gguf` |
|
||||
| `LLAMA_CPP_CONTEXT_SIZE` | Context window size in tokens | `512` |
|
||||
| `LLAMA_CPP_GPU_LAYERS` | Number of layers to offload to GPU (0=CPU only, 99=all) | `0` |
|
||||
| `LLAMA_CPP_SERVER_PORT_OVERRIDE` | Server port on host | `8080` |
|
||||
| `LLAMA_CPP_SERVER_MEMORY_LIMIT` | Memory limit for server | `8G` |
|
||||
|
||||
### Available Profiles
|
||||
|
||||
- `server`: CPU-only server
|
||||
- `cuda`: NVIDIA GPU server (requires nvidia-container-toolkit)
|
||||
- `rocm`: AMD GPU server (requires ROCm)
|
||||
- `cli`: Command-line interface
|
||||
- `full`: Full toolkit with model conversion tools
|
||||
- `gpu`: Generic GPU profile (includes cuda and rocm)
|
||||
|
||||
### Image Variants
|
||||
|
||||
Each variant comes in multiple flavors:
|
||||
|
||||
- **server**: Only `llama-server` executable (API server)
|
||||
- **light**: Only `llama-cli` and `llama-completion` executables
|
||||
- **full**: Complete toolkit including model conversion tools
|
||||
|
||||
Backend options:
|
||||
|
||||
- Base (CPU)
|
||||
- `-cuda` (NVIDIA GPU)
|
||||
- `-rocm` (AMD GPU)
|
||||
- `-musa` (Moore Threads GPU)
|
||||
- `-intel` (Intel GPU with SYCL)
|
||||
- `-vulkan` (Vulkan GPU)
|
||||
|
||||
## Server API
|
||||
|
||||
The server provides an OpenAI-compatible API:
|
||||
|
||||
- `GET /health` - Health check
|
||||
- `GET /v1/models` - List available models
|
||||
- `POST /v1/chat/completions` - Chat completion
|
||||
- `POST /v1/completions` - Text completion
|
||||
- `POST /v1/embeddings` - Generate embeddings
|
||||
|
||||
See the [llama.cpp server documentation](https://github.com/ggml-org/llama.cpp/blob/master/examples/server/README.md) for full API details.
|
||||
|
||||
## Model Sources
|
||||
|
||||
Download GGUF models from:
|
||||
|
||||
- [Hugging Face GGUF Models](https://huggingface.co/models?library=gguf)
|
||||
- [TheBloke's GGUF Collection](https://huggingface.co/TheBloke)
|
||||
- Convert your own models using the full toolkit
|
||||
|
||||
Popular quantization formats:
|
||||
|
||||
- `Q4_K_M`: Good balance of quality and size (recommended)
|
||||
- `Q5_K_M`: Higher quality, larger size
|
||||
- `Q8_0`: Very high quality, large size
|
||||
- `Q2_K`: Smallest size, lower quality
|
||||
|
||||
## Resource Requirements
|
||||
|
||||
Minimum requirements by model size:
|
||||
|
||||
| Model Size | RAM (CPU) | VRAM (GPU) | Context Size |
|
||||
| ---------- | --------- | ---------- | ------------ |
|
||||
| 7B Q4_K_M | 6GB | 4GB | 2048 |
|
||||
| 13B Q4_K_M | 10GB | 8GB | 2048 |
|
||||
| 34B Q4_K_M | 24GB | 20GB | 2048 |
|
||||
| 70B Q4_K_M | 48GB | 40GB | 2048 |
|
||||
|
||||
Larger context sizes require proportionally more memory.
|
||||
|
||||
## Performance Tuning
|
||||
|
||||
For CPU inference:
|
||||
|
||||
- Increase `LLAMA_CPP_SERVER_CPU_LIMIT` for more cores
|
||||
- Optimize threads with `-t` flag (default: auto)
|
||||
|
||||
For GPU inference:
|
||||
|
||||
- Set `LLAMA_CPP_GPU_LAYERS=99` to offload all layers
|
||||
- Increase context size for longer conversations
|
||||
- Monitor GPU memory usage
|
||||
|
||||
## Security Notes
|
||||
|
||||
- The server binds to `0.0.0.0` by default - ensure proper network security
|
||||
- No authentication is enabled by default
|
||||
- Consider using a reverse proxy (nginx, Caddy) for production deployments
|
||||
- Limit resource usage to prevent system exhaustion
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Out of Memory
|
||||
|
||||
- Reduce `LLAMA_CPP_CONTEXT_SIZE`
|
||||
- Use a smaller quantized model (e.g., Q4 instead of Q8)
|
||||
- Reduce `LLAMA_CPP_GPU_LAYERS` if using GPU
|
||||
|
||||
### GPU Not Detected
|
||||
|
||||
**NVIDIA**: Verify nvidia-container-toolkit is installed:
|
||||
|
||||
```bash
|
||||
docker run --rm --gpus all nvidia/cuda:12.4.0-base-ubuntu22.04 nvidia-smi
|
||||
```
|
||||
|
||||
**AMD**: Ensure ROCm drivers and `/dev/kfd`, `/dev/dri` are accessible.
|
||||
|
||||
### Slow Inference
|
||||
|
||||
- Check CPU/GPU utilization
|
||||
- Increase resource limits in `.env`
|
||||
- For GPU: Verify all layers are offloaded (`LLAMA_CPP_GPU_LAYERS=99`)
|
||||
|
||||
## Documentation
|
||||
|
||||
- [llama.cpp GitHub](https://github.com/ggml-org/llama.cpp)
|
||||
- [Docker Documentation](https://github.com/ggml-org/llama.cpp/blob/master/docs/docker.md)
|
||||
- [Server API Docs](https://github.com/ggml-org/llama.cpp/blob/master/examples/server/README.md)
|
||||
|
||||
## License
|
||||
|
||||
llama.cpp is released under the MIT License. See the [LICENSE](https://github.com/ggml-org/llama.cpp/blob/master/LICENSE) file for details.
|
||||
244
src/llama.cpp/README.zh.md
Normal file
244
src/llama.cpp/README.zh.md
Normal file
@@ -0,0 +1,244 @@
|
||||
# llama.cpp
|
||||
|
||||
[English Documentation](README.md)
|
||||
|
||||
[llama.cpp](https://github.com/ggml-org/llama.cpp) 是一个高性能的 C/C++ 实现的大语言模型推理引擎,支持多种硬件加速器。
|
||||
|
||||
## 功能特性
|
||||
|
||||
- **高速推理**:优化的 C/C++ 实现,提供高效的 LLM 推理
|
||||
- **多种后端**:支持 CPU、CUDA(NVIDIA)、ROCm(AMD)、MUSA(摩尔线程)、Intel GPU、Vulkan
|
||||
- **OpenAI 兼容 API**:服务器模式提供 OpenAI 兼容的 REST API
|
||||
- **CLI 支持**:交互式命令行界面,方便快速测试
|
||||
- **模型转换**:完整工具包包含模型转换和量化工具
|
||||
- **GGUF 格式**:支持高效的 GGUF 模型格式
|
||||
- **跨平台**:支持 Linux(x86-64、ARM64、s390x)、Windows、macOS
|
||||
|
||||
## 前置要求
|
||||
|
||||
- 已安装 Docker 和 Docker Compose
|
||||
- 至少 4GB 内存(推荐 8GB 以上)
|
||||
- GPU 版本需要:
|
||||
- **CUDA**:NVIDIA GPU 及 [nvidia-container-toolkit](https://github.com/NVIDIA/nvidia-container-toolkit)
|
||||
- **ROCm**:AMD GPU 及相应的 ROCm 驱动
|
||||
- **MUSA**:摩尔线程 GPU 及 mt-container-toolkit
|
||||
- GGUF 格式的模型文件(例如从 [Hugging Face](https://huggingface.co/models?library=gguf) 下载)
|
||||
|
||||
## 快速开始
|
||||
|
||||
### 1. 服务器模式(CPU)
|
||||
|
||||
```bash
|
||||
# 复制并配置环境变量
|
||||
cp .env.example .env
|
||||
|
||||
# 编辑 .env 并设置模型路径
|
||||
# LLAMA_CPP_MODEL_PATH=/models/your-model.gguf
|
||||
|
||||
# 将 GGUF 模型放在目录中,然后更新 docker-compose.yaml 挂载,例如:
|
||||
# volumes:
|
||||
# - ./models:/models
|
||||
|
||||
# 启动服务器
|
||||
docker compose --profile server up -d
|
||||
|
||||
# 测试服务器(OpenAI 兼容 API)
|
||||
curl http://localhost:8080/v1/models
|
||||
|
||||
# 聊天补全请求
|
||||
curl http://localhost:8080/v1/chat/completions \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"messages": [
|
||||
{"role": "user", "content": "你好!"}
|
||||
]
|
||||
}'
|
||||
```
|
||||
|
||||
### 2. 服务器模式(NVIDIA GPU)
|
||||
|
||||
```bash
|
||||
# 编辑 .env
|
||||
# 设置 LLAMA_CPP_GPU_LAYERS=99 将所有层卸载到 GPU
|
||||
|
||||
# 启动 GPU 加速服务器
|
||||
docker compose --profile cuda up -d
|
||||
|
||||
# 服务器将自动使用 NVIDIA GPU
|
||||
```
|
||||
|
||||
### 3. 服务器模式(AMD GPU)
|
||||
|
||||
```bash
|
||||
# 编辑 .env
|
||||
# 设置 LLAMA_CPP_GPU_LAYERS=99 将所有层卸载到 GPU
|
||||
|
||||
# 启动 GPU 加速服务器
|
||||
docker compose --profile rocm up -d
|
||||
|
||||
# 服务器将自动使用 AMD GPU
|
||||
```
|
||||
|
||||
### 4. CLI 模式
|
||||
|
||||
```bash
|
||||
# 编辑 .env 并配置模型路径和提示词
|
||||
|
||||
# 运行 CLI
|
||||
docker compose --profile cli up
|
||||
|
||||
# 交互模式:
|
||||
docker compose run --rm llama-cpp-cli \
|
||||
-m /models/your-model.gguf \
|
||||
-p "你的提示词" \
|
||||
-n 512
|
||||
```
|
||||
|
||||
### 5. 完整工具包(模型转换)
|
||||
|
||||
```bash
|
||||
# 启动完整容器
|
||||
docker compose --profile full up -d
|
||||
|
||||
# 在容器内执行命令
|
||||
docker compose exec llama-cpp-full bash
|
||||
|
||||
# 在容器内可以使用转换工具
|
||||
# 示例:转换 Hugging Face 模型
|
||||
# python3 convert_hf_to_gguf.py /models/source-model --outfile /models/output.gguf
|
||||
```
|
||||
|
||||
## 配置说明
|
||||
|
||||
### 环境变量
|
||||
|
||||
主要环境变量(完整选项请查看 [.env.example](.env.example)):
|
||||
|
||||
| 变量 | 说明 | 默认值 |
|
||||
| -------------------------------- | ----------------------------------------------------- | -------------------- |
|
||||
| `LLAMA_CPP_SERVER_VARIANT` | 服务器镜像变体(server、server-cuda、server-rocm 等) | `server` |
|
||||
| `LLAMA_CPP_MODEL_PATH` | 容器内模型文件路径 | `/models/model.gguf` |
|
||||
| `LLAMA_CPP_CONTEXT_SIZE` | 上下文窗口大小(token 数) | `512` |
|
||||
| `LLAMA_CPP_GPU_LAYERS` | 卸载到 GPU 的层数(0=仅 CPU,99=全部) | `0` |
|
||||
| `LLAMA_CPP_SERVER_PORT_OVERRIDE` | 主机端口 | `8080` |
|
||||
| `LLAMA_CPP_SERVER_MEMORY_LIMIT` | 服务器内存限制 | `8G` |
|
||||
|
||||
### 可用配置文件
|
||||
|
||||
- `server`:仅 CPU 服务器
|
||||
- `cuda`:NVIDIA GPU 服务器(需要 nvidia-container-toolkit)
|
||||
- `rocm`:AMD GPU 服务器(需要 ROCm)
|
||||
- `cli`:命令行界面
|
||||
- `full`:包含模型转换工具的完整工具包
|
||||
- `gpu`:通用 GPU 配置(包括 cuda 和 rocm)
|
||||
|
||||
### 镜像变体
|
||||
|
||||
每个变体都有多种类型:
|
||||
|
||||
- **server**:仅包含 `llama-server` 可执行文件(API 服务器)
|
||||
- **light**:仅包含 `llama-cli` 和 `llama-completion` 可执行文件
|
||||
- **full**:完整工具包,包括模型转换工具
|
||||
|
||||
后端选项:
|
||||
|
||||
- 基础版(CPU)
|
||||
- `-cuda`(NVIDIA GPU)
|
||||
- `-rocm`(AMD GPU)
|
||||
- `-musa`(摩尔线程 GPU)
|
||||
- `-intel`(Intel GPU,支持 SYCL)
|
||||
- `-vulkan`(Vulkan GPU)
|
||||
|
||||
## 服务器 API
|
||||
|
||||
服务器提供 OpenAI 兼容的 API:
|
||||
|
||||
- `GET /health` - 健康检查
|
||||
- `GET /v1/models` - 列出可用模型
|
||||
- `POST /v1/chat/completions` - 聊天补全
|
||||
- `POST /v1/completions` - 文本补全
|
||||
- `POST /v1/embeddings` - 生成嵌入向量
|
||||
|
||||
完整 API 详情请参阅 [llama.cpp 服务器文档](https://github.com/ggml-org/llama.cpp/blob/master/examples/server/README.md)。
|
||||
|
||||
## 模型来源
|
||||
|
||||
下载 GGUF 模型:
|
||||
|
||||
- [Hugging Face GGUF 模型](https://huggingface.co/models?library=gguf)
|
||||
- [TheBloke 的 GGUF 合集](https://huggingface.co/TheBloke)
|
||||
- 使用完整工具包转换您自己的模型
|
||||
|
||||
常用量化格式:
|
||||
|
||||
- `Q4_K_M`:质量和大小的良好平衡(推荐)
|
||||
- `Q5_K_M`:更高质量,更大体积
|
||||
- `Q8_0`:非常高的质量,大体积
|
||||
- `Q2_K`:最小体积,较低质量
|
||||
|
||||
## 资源需求
|
||||
|
||||
按模型大小的最低要求:
|
||||
|
||||
| 模型大小 | 内存(CPU) | 显存(GPU) | 上下文大小 |
|
||||
| ---------- | ----------- | ----------- | ---------- |
|
||||
| 7B Q4_K_M | 6GB | 4GB | 2048 |
|
||||
| 13B Q4_K_M | 10GB | 8GB | 2048 |
|
||||
| 34B Q4_K_M | 24GB | 20GB | 2048 |
|
||||
| 70B Q4_K_M | 48GB | 40GB | 2048 |
|
||||
|
||||
更大的上下文大小需要成比例的更多内存。
|
||||
|
||||
## 性能调优
|
||||
|
||||
CPU 推理:
|
||||
|
||||
- 增加 `LLAMA_CPP_SERVER_CPU_LIMIT` 以使用更多核心
|
||||
- 使用 `-t` 参数优化线程数(默认:自动)
|
||||
|
||||
GPU 推理:
|
||||
|
||||
- 设置 `LLAMA_CPP_GPU_LAYERS=99` 卸载所有层
|
||||
- 增加上下文大小以支持更长对话
|
||||
- 监控 GPU 内存使用
|
||||
|
||||
## 安全注意事项
|
||||
|
||||
- 服务器默认绑定到 `0.0.0.0` - 请确保网络安全
|
||||
- 默认未启用身份验证
|
||||
- 生产环境建议使用反向代理(nginx、Caddy)
|
||||
- 限制资源使用以防止系统资源耗尽
|
||||
|
||||
## 故障排除
|
||||
|
||||
### 内存不足
|
||||
|
||||
- 减小 `LLAMA_CPP_CONTEXT_SIZE`
|
||||
- 使用更小的量化模型(例如 Q4 而不是 Q8)
|
||||
- 减少 `LLAMA_CPP_GPU_LAYERS`(如果使用 GPU)
|
||||
|
||||
### GPU 未检测到
|
||||
|
||||
**NVIDIA**:验证 nvidia-container-toolkit 是否已安装:
|
||||
|
||||
```bash
|
||||
docker run --rm --gpus all nvidia/cuda:12.4.0-base-ubuntu22.04 nvidia-smi
|
||||
```
|
||||
|
||||
**AMD**:确保 ROCm 驱动已安装且 `/dev/kfd`、`/dev/dri` 可访问。
|
||||
|
||||
### 推理速度慢
|
||||
|
||||
- 检查 CPU/GPU 利用率
|
||||
- 增加 `.env` 中的资源限制
|
||||
- GPU:验证所有层都已卸载(`LLAMA_CPP_GPU_LAYERS=99`)
|
||||
|
||||
## 文档
|
||||
|
||||
- [llama.cpp GitHub](https://github.com/ggml-org/llama.cpp)
|
||||
- [Docker 文档](https://github.com/ggml-org/llama.cpp/blob/master/docs/docker.md)
|
||||
- [服务器 API 文档](https://github.com/ggml-org/llama.cpp/blob/master/examples/server/README.md)
|
||||
|
||||
## 许可证
|
||||
|
||||
llama.cpp 使用 MIT 许可证发布。详情请参阅 [LICENSE](https://github.com/ggml-org/llama.cpp/blob/master/LICENSE) 文件。
|
||||
210
src/llama.cpp/docker-compose.yaml
Normal file
210
src/llama.cpp/docker-compose.yaml
Normal file
@@ -0,0 +1,210 @@
|
||||
# Docker Compose configuration for llama.cpp
|
||||
# https://github.com/ggml-org/llama.cpp
|
||||
# LLM inference in C/C++ with support for various hardware accelerators
|
||||
|
||||
x-defaults: &defaults
|
||||
restart: unless-stopped
|
||||
logging:
|
||||
driver: json-file
|
||||
options:
|
||||
max-size: 100m
|
||||
max-file: "3"
|
||||
|
||||
services:
|
||||
# llama.cpp server - OpenAI-compatible API server
|
||||
# Variant: server (CPU), server-cuda (NVIDIA GPU), server-rocm (AMD GPU)
|
||||
llama-cpp-server:
|
||||
<<: *defaults
|
||||
image: ${GHCR_REGISTRY:-ghcr.io/}ggml-org/llama.cpp:${LLAMA_CPP_SERVER_VARIANT:-server}
|
||||
ports:
|
||||
- "${LLAMA_CPP_SERVER_PORT_OVERRIDE:-8080}:8080"
|
||||
volumes:
|
||||
- llama_cpp_models:/models
|
||||
command:
|
||||
- "-m"
|
||||
- "${LLAMA_CPP_MODEL_PATH:-/models/model.gguf}"
|
||||
- "--port"
|
||||
- "8080"
|
||||
- "--host"
|
||||
- "0.0.0.0"
|
||||
- "-n"
|
||||
- "${LLAMA_CPP_CONTEXT_SIZE:-512}"
|
||||
- "--n-gpu-layers"
|
||||
- "${LLAMA_CPP_GPU_LAYERS:-0}"
|
||||
environment:
|
||||
- TZ=${TZ:-UTC}
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"wget",
|
||||
"--quiet",
|
||||
"--tries=1",
|
||||
"--spider",
|
||||
"http://localhost:8080/health",
|
||||
]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 30s
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
cpus: ${LLAMA_CPP_SERVER_CPU_LIMIT:-4.0}
|
||||
memory: ${LLAMA_CPP_SERVER_MEMORY_LIMIT:-8G}
|
||||
reservations:
|
||||
cpus: ${LLAMA_CPP_SERVER_CPU_RESERVATION:-2.0}
|
||||
memory: ${LLAMA_CPP_SERVER_MEMORY_RESERVATION:-4G}
|
||||
profiles:
|
||||
- server
|
||||
|
||||
# llama.cpp server with NVIDIA GPU support
|
||||
llama-cpp-server-cuda:
|
||||
<<: *defaults
|
||||
image: ${GHCR_REGISTRY:-ghcr.io/}ggml-org/llama.cpp:server-cuda
|
||||
ports:
|
||||
- "${LLAMA_CPP_SERVER_PORT_OVERRIDE:-8080}:8080"
|
||||
volumes:
|
||||
- llama_cpp_models:/models
|
||||
command:
|
||||
- "-m"
|
||||
- "${LLAMA_CPP_MODEL_PATH:-/models/model.gguf}"
|
||||
- "--port"
|
||||
- "8080"
|
||||
- "--host"
|
||||
- "0.0.0.0"
|
||||
- "-n"
|
||||
- "${LLAMA_CPP_CONTEXT_SIZE:-512}"
|
||||
- "--n-gpu-layers"
|
||||
- "${LLAMA_CPP_GPU_LAYERS:-99}"
|
||||
environment:
|
||||
- TZ=${TZ:-UTC}
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"wget",
|
||||
"--quiet",
|
||||
"--tries=1",
|
||||
"--spider",
|
||||
"http://localhost:8080/health",
|
||||
]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 30s
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
cpus: ${LLAMA_CPP_SERVER_CPU_LIMIT:-4.0}
|
||||
memory: ${LLAMA_CPP_SERVER_MEMORY_LIMIT:-8G}
|
||||
reservations:
|
||||
cpus: ${LLAMA_CPP_SERVER_CPU_RESERVATION:-2.0}
|
||||
memory: ${LLAMA_CPP_SERVER_MEMORY_RESERVATION:-4G}
|
||||
devices:
|
||||
- driver: nvidia
|
||||
count: ${LLAMA_CPP_GPU_COUNT:-1}
|
||||
capabilities: [gpu]
|
||||
profiles:
|
||||
- gpu
|
||||
- cuda
|
||||
|
||||
# llama.cpp server with AMD ROCm GPU support
|
||||
llama-cpp-server-rocm:
|
||||
<<: *defaults
|
||||
image: ${GHCR_REGISTRY:-ghcr.io/}ggml-org/llama.cpp:server-rocm
|
||||
ports:
|
||||
- "${LLAMA_CPP_SERVER_PORT_OVERRIDE:-8080}:8080"
|
||||
volumes:
|
||||
- llama_cpp_models:/models
|
||||
devices:
|
||||
- /dev/kfd
|
||||
- /dev/dri
|
||||
command:
|
||||
- "-m"
|
||||
- "${LLAMA_CPP_MODEL_PATH:-/models/model.gguf}"
|
||||
- "--port"
|
||||
- "8080"
|
||||
- "--host"
|
||||
- "0.0.0.0"
|
||||
- "-n"
|
||||
- "${LLAMA_CPP_CONTEXT_SIZE:-512}"
|
||||
- "--n-gpu-layers"
|
||||
- "${LLAMA_CPP_GPU_LAYERS:-99}"
|
||||
environment:
|
||||
- TZ=${TZ:-UTC}
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"wget",
|
||||
"--quiet",
|
||||
"--tries=1",
|
||||
"--spider",
|
||||
"http://localhost:8080/health",
|
||||
]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 30s
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
cpus: ${LLAMA_CPP_SERVER_CPU_LIMIT:-4.0}
|
||||
memory: ${LLAMA_CPP_SERVER_MEMORY_LIMIT:-8G}
|
||||
reservations:
|
||||
cpus: ${LLAMA_CPP_SERVER_CPU_RESERVATION:-2.0}
|
||||
memory: ${LLAMA_CPP_SERVER_MEMORY_RESERVATION:-4G}
|
||||
profiles:
|
||||
- gpu
|
||||
- rocm
|
||||
|
||||
# llama.cpp CLI (light) - Interactive command-line interface
|
||||
llama-cpp-cli:
|
||||
<<: *defaults
|
||||
image: ${GHCR_REGISTRY:-ghcr.io/}ggml-org/llama.cpp:${LLAMA_CPP_CLI_VARIANT:-light}
|
||||
volumes:
|
||||
- llama_cpp_models:/models
|
||||
entrypoint: /app/llama-cli
|
||||
command:
|
||||
- "-m"
|
||||
- "${LLAMA_CPP_MODEL_PATH:-/models/model.gguf}"
|
||||
- "-p"
|
||||
- "${LLAMA_CPP_PROMPT:-Hello, how are you?}"
|
||||
- "-n"
|
||||
- "${LLAMA_CPP_CONTEXT_SIZE:-512}"
|
||||
environment:
|
||||
- TZ=${TZ:-UTC}
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
cpus: ${LLAMA_CPP_CLI_CPU_LIMIT:-2.0}
|
||||
memory: ${LLAMA_CPP_CLI_MEMORY_LIMIT:-4G}
|
||||
reservations:
|
||||
cpus: ${LLAMA_CPP_CLI_CPU_RESERVATION:-1.0}
|
||||
memory: ${LLAMA_CPP_CLI_MEMORY_RESERVATION:-2G}
|
||||
profiles:
|
||||
- cli
|
||||
|
||||
# llama.cpp full - Complete toolkit including model conversion tools
|
||||
llama-cpp-full:
|
||||
<<: *defaults
|
||||
image: ${GHCR_REGISTRY:-ghcr.io/}ggml-org/llama.cpp:${LLAMA_CPP_FULL_VARIANT:-full}
|
||||
volumes:
|
||||
- llama_cpp_models:/models
|
||||
command: ["sleep", "infinity"]
|
||||
environment:
|
||||
- TZ=${TZ:-UTC}
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
cpus: ${LLAMA_CPP_FULL_CPU_LIMIT:-2.0}
|
||||
memory: ${LLAMA_CPP_FULL_MEMORY_LIMIT:-4G}
|
||||
reservations:
|
||||
cpus: ${LLAMA_CPP_FULL_CPU_RESERVATION:-1.0}
|
||||
memory: ${LLAMA_CPP_FULL_MEMORY_RESERVATION:-2G}
|
||||
profiles:
|
||||
- full
|
||||
|
||||
volumes:
|
||||
llama_cpp_models:
|
||||
27
src/lmdeploy/.env.example
Normal file
27
src/lmdeploy/.env.example
Normal file
@@ -0,0 +1,27 @@
|
||||
# LMDeploy Version
|
||||
# Find more tags at: https://hub.docker.com/r/openmmlab/lmdeploy/tags
|
||||
LMDEPLOY_VERSION=v0.11.1-cu12.8
|
||||
|
||||
# Host port override
|
||||
LMDEPLOY_PORT_OVERRIDE=23333
|
||||
|
||||
# Model path or HuggingFace model ID
|
||||
# Examples:
|
||||
# - internlm/internlm2-chat-1_8b
|
||||
# - Qwen/Qwen2.5-7B-Instruct
|
||||
LMDEPLOY_MODEL=internlm/internlm2-chat-1_8b
|
||||
|
||||
# HuggingFace token for private models
|
||||
HF_TOKEN=
|
||||
|
||||
# Resource limits
|
||||
LMDEPLOY_CPU_LIMIT=4.0
|
||||
LMDEPLOY_MEMORY_LIMIT=8G
|
||||
LMDEPLOY_CPU_RESERVATION=2.0
|
||||
LMDEPLOY_MEMORY_RESERVATION=4G
|
||||
|
||||
# Shared memory size (required for some models)
|
||||
LMDEPLOY_SHM_SIZE=4g
|
||||
|
||||
# Timezone
|
||||
TZ=UTC
|
||||
31
src/lmdeploy/README.md
Normal file
31
src/lmdeploy/README.md
Normal file
@@ -0,0 +1,31 @@
|
||||
# LMDeploy Docker Compose
|
||||
|
||||
[LMDeploy](https://github.com/InternLM/lmdeploy) is a toolkit for compressing, deploying, and serving LLMs.
|
||||
|
||||
## Quick Start
|
||||
|
||||
1. (Optional) Configure the model and port in `.env`.
|
||||
2. Start the service:
|
||||
|
||||
```bash
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
3. Access the OpenAI compatible API at `http://localhost:23333/v1`.
|
||||
|
||||
## Configuration
|
||||
|
||||
| Environment Variable | Default | Description |
|
||||
| ------------------------ | ------------------------------ | ------------------------------------ |
|
||||
| `LMDEPLOY_VERSION` | `v0.11.1-cu12.8` | LMDeploy image version |
|
||||
| `LMDEPLOY_PORT_OVERRIDE` | `23333` | Host port for the API server |
|
||||
| `LMDEPLOY_MODEL` | `internlm/internlm2-chat-1_8b` | HuggingFace model ID or local path |
|
||||
| `HF_TOKEN` | | HuggingFace token for private models |
|
||||
|
||||
## Monitoring Health
|
||||
|
||||
The service includes a health check that verifies if the OpenAI `/v1/models` endpoint is responsive.
|
||||
|
||||
## GPU Support
|
||||
|
||||
By default, this configuration reserves 1 NVIDIA GPU. Ensure you have the [NVIDIA Container Toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html) installed on your host.
|
||||
31
src/lmdeploy/README.zh.md
Normal file
31
src/lmdeploy/README.zh.md
Normal file
@@ -0,0 +1,31 @@
|
||||
# LMDeploy Docker Compose
|
||||
|
||||
[LMDeploy](https://github.com/InternLM/lmdeploy) 是一个用于压缩、部署和服务大语言模型(LLM)的工具包。
|
||||
|
||||
## 快速开始
|
||||
|
||||
1. (可选)在 `.env` 中配置模型和端口。
|
||||
2. 启动服务:
|
||||
|
||||
```bash
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
3. 通过 `http://localhost:23333/v1` 访问与 OpenAI 兼容的 API。
|
||||
|
||||
## 配置项
|
||||
|
||||
| 环境变量 | 默认值 | 说明 |
|
||||
| ------------------------ | ------------------------------ | ------------------------------------ |
|
||||
| `LMDEPLOY_VERSION` | `v0.11.1-cu12.8` | LMDeploy 镜像版本 |
|
||||
| `LMDEPLOY_PORT_OVERRIDE` | `23333` | API 服务器的主机端口 |
|
||||
| `LMDEPLOY_MODEL` | `internlm/internlm2-chat-1_8b` | HuggingFace 模型 ID 或本地路径 |
|
||||
| `HF_TOKEN` | | 用于访问私有模型的 HuggingFace Token |
|
||||
|
||||
## 健康检查
|
||||
|
||||
该配置包含健康检查,用于验证 OpenAI `/v1/models` 接口是否响应。
|
||||
|
||||
## GPU 支持
|
||||
|
||||
默认情况下,此配置会预留 1 个 NVIDIA GPU。请确保您的主机已安装 [NVIDIA Container Toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html)。
|
||||
50
src/lmdeploy/docker-compose.yaml
Normal file
50
src/lmdeploy/docker-compose.yaml
Normal file
@@ -0,0 +1,50 @@
|
||||
x-defaults: &defaults
|
||||
restart: unless-stopped
|
||||
logging:
|
||||
driver: json-file
|
||||
options:
|
||||
max-size: 100m
|
||||
max-file: "3"
|
||||
|
||||
services:
|
||||
lmdeploy:
|
||||
<<: *defaults
|
||||
image: ${GLOBAL_REGISTRY:-}openmmlab/lmdeploy:${LMDEPLOY_VERSION:-v0.11.1-cu12.8}
|
||||
ports:
|
||||
- "${LMDEPLOY_PORT_OVERRIDE:-23333}:23333"
|
||||
volumes:
|
||||
- lmdeploy_data:/root/.cache
|
||||
environment:
|
||||
- TZ=${TZ:-UTC}
|
||||
- HF_TOKEN=${HF_TOKEN:-}
|
||||
command:
|
||||
- lmdeploy
|
||||
- serve
|
||||
- api_server
|
||||
- ${LMDEPLOY_MODEL:-internlm/internlm2-chat-1_8b}
|
||||
- --server-name
|
||||
- "0.0.0.0"
|
||||
- --server-port
|
||||
- "23333"
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:23333/v1/models"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 60s
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
cpus: ${LMDEPLOY_CPU_LIMIT:-4.0}
|
||||
memory: ${LMDEPLOY_MEMORY_LIMIT:-8G}
|
||||
reservations:
|
||||
cpus: ${LMDEPLOY_CPU_RESERVATION:-2.0}
|
||||
memory: ${LMDEPLOY_MEMORY_RESERVATION:-4G}
|
||||
devices:
|
||||
- driver: nvidia
|
||||
count: 1
|
||||
capabilities: [gpu]
|
||||
shm_size: ${LMDEPLOY_SHM_SIZE:-4g}
|
||||
|
||||
volumes:
|
||||
lmdeploy_data:
|
||||
@@ -14,9 +14,6 @@ x-mongo: &mongo
|
||||
MONGO_INITDB_ROOT_USERNAME: ${MONGO_INITDB_ROOT_USERNAME:-root}
|
||||
MONGO_INITDB_ROOT_PASSWORD: ${MONGO_INITDB_ROOT_PASSWORD:-password}
|
||||
MONGO_INITDB_DATABASE: ${MONGO_INITDB_DATABASE:-admin}
|
||||
volumes:
|
||||
- ./secrets/rs0.key:/data/rs0.key:ro
|
||||
- mongo_data:/data/db
|
||||
entrypoint:
|
||||
- bash
|
||||
- -c
|
||||
@@ -49,6 +46,9 @@ services:
|
||||
<<: *mongo
|
||||
ports:
|
||||
- "${MONGO_PORT_OVERRIDE_1:-27017}:27017"
|
||||
volumes:
|
||||
- mongo_data:/data/db
|
||||
- ./secrets/rs0.key:/data/rs0.key:ro
|
||||
|
||||
mongo-init:
|
||||
<<: *defaults
|
||||
@@ -66,6 +66,8 @@ services:
|
||||
MONGO_HOST: ${MONGO_HOST:-host.docker.internal}
|
||||
volumes:
|
||||
- ./secrets/rs0.key:/data/rs0.key:ro
|
||||
extra_hosts:
|
||||
- "host.docker.internal:host-gateway"
|
||||
entrypoint:
|
||||
- bash
|
||||
- -c
|
||||
|
||||
@@ -14,8 +14,6 @@ x-mongo: &mongo
|
||||
MONGO_INITDB_ROOT_USERNAME: ${MONGO_INITDB_ROOT_USERNAME:-root}
|
||||
MONGO_INITDB_ROOT_PASSWORD: ${MONGO_INITDB_ROOT_PASSWORD:-password}
|
||||
MONGO_INITDB_DATABASE: ${MONGO_INITDB_DATABASE:-admin}
|
||||
volumes:
|
||||
- ./secrets/rs0.key:/data/rs0.key:ro
|
||||
entrypoint:
|
||||
- bash
|
||||
- -c
|
||||
@@ -47,16 +45,25 @@ services:
|
||||
<<: *mongo
|
||||
ports:
|
||||
- "${MONGO_PORT_OVERRIDE_1:-27017}:27017"
|
||||
volumes:
|
||||
- mongo1_data:/data/db
|
||||
- ./secrets/rs0.key:/data/rs0.key:ro
|
||||
|
||||
mongo2:
|
||||
<<: *mongo
|
||||
ports:
|
||||
- "${MONGO_PORT_OVERRIDE_2:-27018}:27017"
|
||||
volumes:
|
||||
- mongo2_data:/data/db
|
||||
- ./secrets/rs0.key:/data/rs0.key:ro
|
||||
|
||||
mongo3:
|
||||
<<: *mongo
|
||||
ports:
|
||||
- "${MONGO_PORT_OVERRIDE_3:-27019}:27017"
|
||||
volumes:
|
||||
- mongo3_data:/data/db
|
||||
- ./secrets/rs0.key:/data/rs0.key:ro
|
||||
|
||||
mongo-init:
|
||||
<<: *defaults
|
||||
@@ -78,6 +85,8 @@ services:
|
||||
MONGO_PORT_2: ${MONGO_PORT_OVERRIDE_2:-27018}
|
||||
MONGO_PORT_3: ${MONGO_PORT_OVERRIDE_3:-27019}
|
||||
MONGO_HOST: ${MONGO_HOST:-host.docker.internal}
|
||||
extra_hosts:
|
||||
- "host.docker.internal:host-gateway"
|
||||
volumes:
|
||||
- ./secrets/rs0.key:/data/rs0.key:ro
|
||||
entrypoint:
|
||||
@@ -122,3 +131,8 @@ services:
|
||||
reservations:
|
||||
cpus: ${MONGO_REPLICA_INIT_CPU_RESERVATION:-0.50}
|
||||
memory: ${MONGO_REPLICA_INIT_MEMORY_RESERVATION:-1024M}
|
||||
|
||||
volumes:
|
||||
mongo1_data:
|
||||
mongo2_data:
|
||||
mongo3_data:
|
||||
|
||||
31
src/opencode/.env.example
Normal file
31
src/opencode/.env.example
Normal file
@@ -0,0 +1,31 @@
|
||||
# OpenCode Version
|
||||
OPENCODE_VERSION=1.1.27
|
||||
|
||||
# Host Port Override
|
||||
OPENCODE_PORT_OVERRIDE=4096
|
||||
|
||||
# Project Directory to mount (absolute or relative path)
|
||||
# This is where OpenCode will perform coding tasks
|
||||
OPENCODE_PROJECT_DIR=./project
|
||||
|
||||
# Timezone
|
||||
TZ=UTC
|
||||
|
||||
# LLM Provider API Keys
|
||||
# You need at least one of these to use OpenCode
|
||||
ANTHROPIC_API_KEY=
|
||||
OPENAI_API_KEY=
|
||||
GEMINI_API_KEY=
|
||||
DEEPSEEK_API_KEY=
|
||||
GROQ_API_KEY=
|
||||
TOGETHER_API_KEY=
|
||||
MISTRAL_API_KEY=
|
||||
|
||||
# Optional: Inline JSON config content
|
||||
# OPENCODE_CONFIG_CONTENT={"theme": "opencode", "autoupdate": false}
|
||||
|
||||
# Resource Limits
|
||||
OPENCODE_CPU_LIMIT=1.0
|
||||
OPENCODE_MEMORY_LIMIT=2G
|
||||
OPENCODE_CPU_RESERVATION=0.25
|
||||
OPENCODE_MEMORY_RESERVATION=512M
|
||||
42
src/opencode/README.md
Normal file
42
src/opencode/README.md
Normal file
@@ -0,0 +1,42 @@
|
||||
# OpenCode
|
||||
|
||||
[English](./README.md) | [中文](./README.zh.md)
|
||||
|
||||
[OpenCode](https://github.com/anomalyco/opencode) is the open source AI coding agent built for the terminal and web. It allows you to use various LLM providers to automate coding tasks in your local or remote projects.
|
||||
|
||||
## Usage
|
||||
|
||||
1. Copy `.env.example` to `.env`.
|
||||
2. Set your preferred LLM provider API key in `.env` (e.g., `ANTHROPIC_API_KEY`).
|
||||
3. Set `OPENCODE_PROJECT_DIR` to the path of the project you want the agent to work on.
|
||||
4. Run the service:
|
||||
|
||||
```bash
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
5. Access the web interface at `http://localhost:4096`.
|
||||
|
||||
## Configuration
|
||||
|
||||
- `OPENCODE_VERSION`: The version of the OpenCode image (default: `1.1.27`).
|
||||
- `OPENCODE_PORT_OVERRIDE`: The host port to expose the web interface (default: `4096`).
|
||||
- `OPENCODE_PROJECT_DIR`: Path to the project codebase you want the agent to have access to.
|
||||
- `ANTHROPIC_API_KEY`: API key for Anthropic Claude models.
|
||||
- `OPENAI_API_KEY`: API key for OpenAI models.
|
||||
- `GEMINI_API_KEY`: API key for Google Gemini models.
|
||||
- `DEEPSEEK_API_KEY`: API key for DeepSeek models.
|
||||
|
||||
## Volumes
|
||||
|
||||
- `opencode_data`: Stores configuration, session data, and cache.
|
||||
- Mounts the target project directory to `/app`.
|
||||
|
||||
## Resources
|
||||
|
||||
Default limits:
|
||||
|
||||
- CPU: 1.0
|
||||
- Memory: 2G
|
||||
|
||||
You can override these in your `.env` file using `OPENCODE_CPU_LIMIT` and `OPENCODE_MEMORY_LIMIT`.
|
||||
42
src/opencode/README.zh.md
Normal file
42
src/opencode/README.zh.md
Normal file
@@ -0,0 +1,42 @@
|
||||
# OpenCode
|
||||
|
||||
[English](./README.md) | [中文](./README.zh.md)
|
||||
|
||||
[OpenCode](https://github.com/anomalyco/opencode) 是一个为终端和 Web 构建的开源 AI 编程助手。它允许你使用多种大语言模型(LLM)提供商来自动执行本地或远程项目中的编码任务。
|
||||
|
||||
## 使用方法
|
||||
|
||||
1. 将 `.env.example` 复制为 `.env`。
|
||||
2. 在 `.env` 中设置你偏好的 LLM 提供商 API 密钥(例如 `ANTHROPIC_API_KEY`)。
|
||||
3. 将 `OPENCODE_PROJECT_DIR` 设置为你希望助手工作的项目路径。
|
||||
4. 启动服务:
|
||||
|
||||
```bash
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
5. 在浏览器中访问 `http://localhost:4096` 进入 Web 界面。
|
||||
|
||||
## 配置项
|
||||
|
||||
- `OPENCODE_VERSION`:OpenCode 镜像版本(默认为 `1.1.27`)。
|
||||
- `OPENCODE_PORT_OVERRIDE`:映射到宿主机的 Web 端口(默认为 `4096`)。
|
||||
- `OPENCODE_PROJECT_DIR`:助手有权访问的项目代码库路径。
|
||||
- `ANTHROPIC_API_KEY`:Anthropic Claude 模型的 API 密钥。
|
||||
- `OPENAI_API_KEY`:OpenAI 模型的 API 密钥。
|
||||
- `GEMINI_API_KEY`:Google Gemini 模型的 API 密钥。
|
||||
- `DEEPSEEK_API_KEY`:DeepSeek 模型的 API 密钥。
|
||||
|
||||
## 数据卷
|
||||
|
||||
- `opencode_data`:用于存储配置、会话数据和缓存。
|
||||
- 将目标项目目录挂载到容器内的 `/app` 路径。
|
||||
|
||||
## 资源限制
|
||||
|
||||
默认限制:
|
||||
|
||||
- CPU:1.0
|
||||
- 内存:2G
|
||||
|
||||
你可以通过 `.env` 文件中的 `OPENCODE_CPU_LIMIT` 和 `OPENCODE_MEMORY_LIMIT` 来覆盖这些默认值。
|
||||
54
src/opencode/docker-compose.yaml
Normal file
54
src/opencode/docker-compose.yaml
Normal file
@@ -0,0 +1,54 @@
|
||||
x-defaults: &defaults
|
||||
restart: unless-stopped
|
||||
logging:
|
||||
driver: json-file
|
||||
options:
|
||||
max-size: 100m
|
||||
max-file: "3"
|
||||
|
||||
services:
|
||||
opencode:
|
||||
<<: *defaults
|
||||
image: ${GLOBAL_REGISTRY:-}ghcr.io/anomalyco/opencode:${OPENCODE_VERSION:-1.1.27}
|
||||
command: web --hostname 0.0.0.0 --port 4096
|
||||
ports:
|
||||
- "${OPENCODE_PORT_OVERRIDE:-4096}:4096"
|
||||
volumes:
|
||||
- opencode_data:/root/.opencode
|
||||
- ${OPENCODE_PROJECT_DIR:-./project}:/app
|
||||
environment:
|
||||
- TZ=${TZ:-UTC}
|
||||
- ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY:-}
|
||||
- OPENAI_API_KEY=${OPENAI_API_KEY:-}
|
||||
- GEMINI_API_KEY=${GEMINI_API_KEY:-}
|
||||
- DEEPSEEK_API_KEY=${DEEPSEEK_API_KEY:-}
|
||||
- GROQ_API_KEY=${GROQ_API_KEY:-}
|
||||
- TOGETHER_API_KEY=${TOGETHER_API_KEY:-}
|
||||
- MISTRAL_API_KEY=${MISTRAL_API_KEY:-}
|
||||
- OPENCODE_CONFIG_CONTENT=${OPENCODE_CONFIG_CONTENT:-}
|
||||
working_dir: /app
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"wget",
|
||||
"--quiet",
|
||||
"--tries=1",
|
||||
"--spider",
|
||||
"http://localhost:4096/",
|
||||
]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 30s
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
cpus: ${OPENCODE_CPU_LIMIT:-1.0}
|
||||
memory: ${OPENCODE_MEMORY_LIMIT:-2G}
|
||||
reservations:
|
||||
cpus: ${OPENCODE_CPU_RESERVATION:-0.25}
|
||||
memory: ${OPENCODE_MEMORY_RESERVATION:-512M}
|
||||
|
||||
volumes:
|
||||
opencode_data:
|
||||
@@ -28,7 +28,15 @@ services:
|
||||
cpus: ${OPENLIST_CPU_RESERVATION:-0.25}
|
||||
memory: ${OPENLIST_MEMORY_RESERVATION:-256M}
|
||||
healthcheck:
|
||||
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:5244/"]
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"wget",
|
||||
"--no-verbose",
|
||||
"--tries=1",
|
||||
"--spider",
|
||||
"http://localhost:5244/",
|
||||
]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
|
||||
@@ -40,7 +40,8 @@ services:
|
||||
cpus: ${OPENSEARCH_CPU_RESERVATION:-1.0}
|
||||
memory: ${OPENSEARCH_MEMORY_RESERVATION:-1G}
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "curl -f http://localhost:9200/_cluster/health || exit 1"]
|
||||
test:
|
||||
["CMD-SHELL", "curl -f http://localhost:9200/_cluster/health || exit 1"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
@@ -67,7 +68,15 @@ services:
|
||||
cpus: ${OPENSEARCH_DASHBOARDS_CPU_RESERVATION:-0.5}
|
||||
memory: ${OPENSEARCH_DASHBOARDS_MEMORY_RESERVATION:-512M}
|
||||
healthcheck:
|
||||
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:5601/api/status"]
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"wget",
|
||||
"--no-verbose",
|
||||
"--tries=1",
|
||||
"--spider",
|
||||
"http://localhost:5601/api/status",
|
||||
]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
COMPOSE_PROFILES=sqlite
|
||||
|
||||
# Phoenix version
|
||||
PHOENIX_VERSION=12.28.1-nonroot
|
||||
PHOENIX_VERSION=12.31.2-nonroot
|
||||
|
||||
# Timezone
|
||||
TZ=UTC
|
||||
|
||||
@@ -32,7 +32,7 @@ This project supports two modes of operation via Docker Compose profiles:
|
||||
| Variable Name | Description | Default Value |
|
||||
| -------------------------------- | ---------------------------------------- | ----------------- |
|
||||
| COMPOSE_PROFILES | Active profiles (`sqlite` or `postgres`) | `sqlite` |
|
||||
| PHOENIX_VERSION | Phoenix image version | `12.28.1-nonroot` |
|
||||
| PHOENIX_VERSION | Phoenix image version | `12.31.2-nonroot` |
|
||||
| PHOENIX_PORT_OVERRIDE | Host port for Phoenix UI and HTTP API | `6006` |
|
||||
| PHOENIX_GRPC_PORT_OVERRIDE | Host port for OTLP gRPC collector | `4317` |
|
||||
| PHOENIX_PROMETHEUS_PORT_OVERRIDE | Host port for Prometheus metrics | `9090` |
|
||||
|
||||
@@ -32,7 +32,7 @@ Arize Phoenix 是一个开源的 AI 可观测性平台,专为 LLM 应用设计
|
||||
| 变量名 | 描述 | 默认值 |
|
||||
| -------------------------------- | ---------------------------------------- | ----------------- |
|
||||
| COMPOSE_PROFILES | 激活的配置文件(`sqlite` 或 `postgres`) | `sqlite` |
|
||||
| PHOENIX_VERSION | Phoenix 镜像版本 | `12.28.1-nonroot` |
|
||||
| PHOENIX_VERSION | Phoenix 镜像版本 | `12.31.2-nonroot` |
|
||||
| PHOENIX_PORT_OVERRIDE | Phoenix UI 和 HTTP API 的主机端口 | `6006` |
|
||||
| PHOENIX_GRPC_PORT_OVERRIDE | OTLP gRPC 采集器的主机端口 | `4317` |
|
||||
| PHOENIX_PROMETHEUS_PORT_OVERRIDE | Prometheus 指标的主机端口 | `9090` |
|
||||
|
||||
@@ -11,7 +11,7 @@ x-defaults: &defaults
|
||||
|
||||
x-phoenix-common: &phoenix-common
|
||||
<<: *defaults
|
||||
image: ${GLOBAL_REGISTRY:-}arizephoenix/phoenix:${PHOENIX_VERSION:-12.28.1-nonroot}
|
||||
image: ${GLOBAL_REGISTRY:-}arizephoenix/phoenix:${PHOENIX_VERSION:-12.31.2-nonroot}
|
||||
ports:
|
||||
- "${PHOENIX_PORT_OVERRIDE:-6006}:6006" # UI and OTLP HTTP collector
|
||||
- "${PHOENIX_GRPC_PORT_OVERRIDE:-4317}:4317" # OTLP gRPC collector
|
||||
|
||||
15
src/pogocache/.env.example
Normal file
15
src/pogocache/.env.example
Normal file
@@ -0,0 +1,15 @@
|
||||
# Pogocache Version
|
||||
POGOCACHE_VERSION=1.3.1
|
||||
|
||||
# Host port override
|
||||
POGOCACHE_PORT_OVERRIDE=9401
|
||||
|
||||
# Resource limits
|
||||
POGOCACHE_CPU_LIMIT=0.50
|
||||
POGOCACHE_MEMORY_LIMIT=512M
|
||||
POGOCACHE_CPU_RESERVATION=0.10
|
||||
POGOCACHE_MEMORY_RESERVATION=128M
|
||||
|
||||
# Extra arguments for pogocache
|
||||
# Example: --auth mypassword --threads 4
|
||||
POGOCACHE_EXTRA_ARGS=
|
||||
35
src/pogocache/README.md
Normal file
35
src/pogocache/README.md
Normal file
@@ -0,0 +1,35 @@
|
||||
# Pogocache
|
||||
|
||||
[Pogocache](https://github.com/tidwall/pogocache) is fast caching software built from scratch with a focus on low latency and cpu efficiency. It is a high-performance, multi-protocol Redis alternative.
|
||||
|
||||
## Features
|
||||
|
||||
- **Fast**: Faster than Memcached, Valkey, Redis, Dragonfly, and Garnet.
|
||||
- **Multi-protocol**: Supports Redis RESP, Memcached, PostgreSQL wire protocol, and HTTP.
|
||||
- **Persistence**: Supports AOF-style persistence.
|
||||
- **Resource Efficient**: Low CPU and memory overhead.
|
||||
|
||||
## Deployment
|
||||
|
||||
```bash
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
| Variable | Default | Description |
|
||||
| ------------------------- | ------- | --------------------------------------------- |
|
||||
| `POGOCACHE_VERSION` | `1.3.1` | Pogocache image version |
|
||||
| `POGOCACHE_PORT_OVERRIDE` | `9401` | Host port for Pogocache |
|
||||
| `POGOCACHE_EXTRA_ARGS` | | Additional CLI arguments (e.g. `--auth pass`) |
|
||||
|
||||
## Accessing Pogocache
|
||||
|
||||
- **Redis**: `redis-cli -p 9401`
|
||||
- **Postgres**: `psql -h localhost -p 9401`
|
||||
- **HTTP**: `curl http://localhost:9401/key`
|
||||
- **Memcached**: `telnet localhost 9401`
|
||||
|
||||
## Persistence
|
||||
|
||||
By default, the data is persisted to a named volume `pogocache_data` at `/data/pogocache.db`.
|
||||
35
src/pogocache/README.zh.md
Normal file
35
src/pogocache/README.zh.md
Normal file
@@ -0,0 +1,35 @@
|
||||
# Pogocache
|
||||
|
||||
[Pogocache](https://github.com/tidwall/pogocache) 是一款从零开始构建的高速缓存软件,专注于低延迟和 CPU 效率。它是一个高性能、多协议的 Redis 替代方案。
|
||||
|
||||
## 特性
|
||||
|
||||
- **极速**:比 Memcached、Valkey、Redis、Dragonfly 和 Garnet 更快。
|
||||
- **多协议支持**:支持 Redis RESP、Memcached、PostgreSQL 线缆协议和 HTTP。
|
||||
- **持久化**:支持 AOF 风格的持久化。
|
||||
- **资源高效**:极低的 CPU 和内存开销。
|
||||
|
||||
## 部署
|
||||
|
||||
```bash
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
## 配置说明
|
||||
|
||||
| 变量名 | 默认值 | 描述 |
|
||||
| ------------------------- | ------- | -------------------------------------- |
|
||||
| `POGOCACHE_VERSION` | `1.3.1` | Pogocache 镜像版本 |
|
||||
| `POGOCACHE_PORT_OVERRIDE` | `9401` | 主机端口 |
|
||||
| `POGOCACHE_EXTRA_ARGS` | | 额外的命令行参数(例如 `--auth pass`) |
|
||||
|
||||
## 访问方式
|
||||
|
||||
- **Redis**:`redis-cli -p 9401`
|
||||
- **Postgres**:`psql -h localhost -p 9401`
|
||||
- **HTTP**:`curl http://localhost:9401/key`
|
||||
- **Memcached**:`telnet localhost 9401`
|
||||
|
||||
## 持久化
|
||||
|
||||
默认情况下,数据持久化到命名卷 `pogocache_data` 中的 `/data/pogocache.db`。
|
||||
42
src/pogocache/docker-compose.yaml
Normal file
42
src/pogocache/docker-compose.yaml
Normal file
@@ -0,0 +1,42 @@
|
||||
# Docker Compose for Pogocache
|
||||
# Pogocache is fast caching software built from scratch with a focus on low latency and cpu efficiency.
|
||||
# See: https://github.com/tidwall/pogocache
|
||||
|
||||
x-defaults: &defaults
|
||||
restart: unless-stopped
|
||||
logging:
|
||||
driver: json-file
|
||||
options:
|
||||
max-size: 10m
|
||||
max-file: "3"
|
||||
|
||||
services:
|
||||
pogocache:
|
||||
<<: *defaults
|
||||
image: ${GLOBAL_REGISTRY:-}pogocache/pogocache:${POGOCACHE_VERSION:-1.3.1}
|
||||
ports:
|
||||
- "${POGOCACHE_PORT_OVERRIDE:-9401}:9401"
|
||||
environment:
|
||||
- TZ=${TZ:-UTC}
|
||||
volumes:
|
||||
- pogocache_data:/data
|
||||
command: >
|
||||
${POGOCACHE_EXTRA_ARGS:-}
|
||||
--persist /data/pogocache.db
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "nc -z localhost 9401 || exit 1"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
start_period: 5s
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
cpus: ${POGOCACHE_CPU_LIMIT:-0.50}
|
||||
memory: ${POGOCACHE_MEMORY_LIMIT:-512M}
|
||||
reservations:
|
||||
cpus: ${POGOCACHE_CPU_RESERVATION:-0.10}
|
||||
memory: ${POGOCACHE_MEMORY_RESERVATION:-128M}
|
||||
|
||||
volumes:
|
||||
pogocache_data:
|
||||
@@ -1,7 +1,7 @@
|
||||
# Renovate Configuration
|
||||
|
||||
# Image version
|
||||
RENOVATE_VERSION=42.52.5-full
|
||||
RENOVATE_VERSION=42.85.4-full
|
||||
|
||||
# Global registry prefix (optional, e.g., your.registry.com/)
|
||||
GLOBAL_REGISTRY=
|
||||
|
||||
@@ -53,7 +53,7 @@ Key environment variables in `.env`:
|
||||
|
||||
| Variable | Description | Default |
|
||||
| ----------------------- | ----------------------- | -------------- |
|
||||
| `RENOVATE_VERSION` | Renovate image version | `42.52.5-full` |
|
||||
| `RENOVATE_VERSION` | Renovate image version | `42.85.4-full` |
|
||||
| `RENOVATE_PLATFORM` | Platform type | `github` |
|
||||
| `RENOVATE_TOKEN` | Authentication token | **(required)** |
|
||||
| `RENOVATE_REPOSITORIES` | Repositories to process | `''` |
|
||||
|
||||
@@ -53,7 +53,7 @@ Renovate 是一个自动化依赖更新工具,当有新版本可用时,它
|
||||
|
||||
| 变量 | 描述 | 默认值 |
|
||||
| ----------------------- | ----------------- | -------------- |
|
||||
| `RENOVATE_VERSION` | Renovate 镜像版本 | `42.52.5-full` |
|
||||
| `RENOVATE_VERSION` | Renovate 镜像版本 | `42.85.4-full` |
|
||||
| `RENOVATE_PLATFORM` | 平台类型 | `github` |
|
||||
| `RENOVATE_TOKEN` | 身份验证令牌 | **(必需)** |
|
||||
| `RENOVATE_REPOSITORIES` | 要处理的仓库 | `''` |
|
||||
|
||||
@@ -12,7 +12,7 @@ x-defaults: &defaults
|
||||
services:
|
||||
renovate:
|
||||
<<: *defaults
|
||||
image: ${GLOBAL_REGISTRY:-}renovate/renovate:${RENOVATE_VERSION:-42.52.5-full}
|
||||
image: ${GLOBAL_REGISTRY:-}renovate/renovate:${RENOVATE_VERSION:-42.85.4-full}
|
||||
|
||||
# Renovate runs as a scheduled job, not a continuous service
|
||||
# Use 'docker compose run --rm renovate' to execute manually
|
||||
|
||||
48
src/selenium/.env.example
Normal file
48
src/selenium/.env.example
Normal file
@@ -0,0 +1,48 @@
|
||||
# Selenium Standalone Configuration
|
||||
|
||||
# Image Registry (optional)
|
||||
# GLOBAL_REGISTRY=
|
||||
|
||||
# Selenium Version (stable version tag recommended)
|
||||
# Visit https://hub.docker.com/r/selenium/standalone-chrome/tags for available versions
|
||||
# Format: <browser-version>-<date> or <browser-version>-chromedriver-<driver-version>-grid-<grid-version>-<date>
|
||||
SELENIUM_VERSION=144.0-20260120
|
||||
|
||||
# Shared Memory Size (required for browser stability)
|
||||
# Chrome and Firefox need sufficient shared memory to prevent crashes
|
||||
SELENIUM_SHM_SIZE=2g
|
||||
|
||||
# Port Configuration
|
||||
# Selenium Grid HTTP port
|
||||
SELENIUM_GRID_PORT_OVERRIDE=4444
|
||||
# VNC port for viewing browser sessions (browser debugger)
|
||||
SELENIUM_VNC_PORT_OVERRIDE=7900
|
||||
|
||||
# Timezone
|
||||
TZ=UTC
|
||||
|
||||
# Screen Resolution Settings
|
||||
SE_SCREEN_WIDTH=1920
|
||||
SE_SCREEN_HEIGHT=1080
|
||||
SE_SCREEN_DEPTH=24
|
||||
SE_SCREEN_DPI=96
|
||||
|
||||
# VNC Configuration
|
||||
# Password for VNC access (default: secret)
|
||||
SE_VNC_PASSWORD=secret
|
||||
|
||||
# Session Configuration
|
||||
# Maximum concurrent sessions per container
|
||||
SE_NODE_MAX_SESSIONS=1
|
||||
# Session timeout in seconds (default: 300)
|
||||
SE_NODE_SESSION_TIMEOUT=300
|
||||
|
||||
# Xvfb Configuration
|
||||
# Start virtual display server (required for headless mode in Chrome/Chromium v127+)
|
||||
SE_START_XVFB=true
|
||||
|
||||
# Resource Limits
|
||||
SELENIUM_CPU_LIMIT=2.0
|
||||
SELENIUM_MEMORY_LIMIT=2G
|
||||
SELENIUM_CPU_RESERVATION=1.0
|
||||
SELENIUM_MEMORY_RESERVATION=1G
|
||||
281
src/selenium/README.md
Normal file
281
src/selenium/README.md
Normal file
@@ -0,0 +1,281 @@
|
||||
# Selenium Standalone with Chrome
|
||||
|
||||
[](https://hub.docker.com/r/selenium/standalone-chrome)
|
||||
[](https://hub.docker.com/r/selenium/standalone-chrome)
|
||||
[](https://github.com/SeleniumHQ/docker-selenium/blob/trunk/LICENSE.md)
|
||||
|
||||
Selenium Grid in Standalone mode with Chrome browser for browser automation at scale.
|
||||
|
||||
## Quick Start
|
||||
|
||||
```bash
|
||||
# Start the service
|
||||
docker compose up -d
|
||||
|
||||
# Verify the service is running
|
||||
docker compose ps
|
||||
|
||||
# View logs
|
||||
docker compose logs -f
|
||||
|
||||
# Stop the service
|
||||
docker compose down
|
||||
```
|
||||
|
||||
## Service Information
|
||||
|
||||
### Ports
|
||||
|
||||
| Port | Service | Description |
|
||||
| ---- | ------------- | -------------------------------------------- |
|
||||
| 4444 | Selenium Grid | HTTP endpoint for WebDriver |
|
||||
| 7900 | noVNC | Browser viewing interface (password: secret) |
|
||||
|
||||
### Default Credentials
|
||||
|
||||
- VNC Password: `secret` (configurable via `SE_VNC_PASSWORD`)
|
||||
|
||||
### Volumes
|
||||
|
||||
- `selenium_downloads`: Browser downloads directory (`/home/seluser/Downloads`)
|
||||
|
||||
## Configuration
|
||||
|
||||
### Environment Variables
|
||||
|
||||
All configuration can be customized via the `.env` file:
|
||||
|
||||
```bash
|
||||
# Copy the example configuration
|
||||
cp .env.example .env
|
||||
|
||||
# Edit the configuration
|
||||
nano .env
|
||||
```
|
||||
|
||||
Key configurations:
|
||||
|
||||
| Variable | Default | Description |
|
||||
| ----------------------------- | ---------------- | --------------------------------------------------- |
|
||||
| `SELENIUM_VERSION` | `144.0-20260120` | Docker image tag (Chrome version + date) |
|
||||
| `SELENIUM_SHM_SIZE` | `2g` | Shared memory size (required for browser stability) |
|
||||
| `SELENIUM_GRID_PORT_OVERRIDE` | `4444` | Grid HTTP endpoint port |
|
||||
| `SELENIUM_VNC_PORT_OVERRIDE` | `7900` | noVNC viewer port |
|
||||
| `SE_SCREEN_WIDTH` | `1920` | Browser screen width |
|
||||
| `SE_SCREEN_HEIGHT` | `1080` | Browser screen height |
|
||||
| `SE_NODE_MAX_SESSIONS` | `1` | Max concurrent sessions per container |
|
||||
| `SE_NODE_SESSION_TIMEOUT` | `300` | Session timeout in seconds |
|
||||
|
||||
For a complete list of environment variables, see the [Selenium Docker documentation](https://github.com/SeleniumHQ/docker-selenium/blob/trunk/ENV_VARIABLES.md).
|
||||
|
||||
## Usage
|
||||
|
||||
### Basic WebDriver Test (Python)
|
||||
|
||||
```python
|
||||
from selenium import webdriver
|
||||
from selenium.webdriver.chrome.options import Options
|
||||
|
||||
# Configure Chrome options
|
||||
options = Options()
|
||||
|
||||
# Connect to Selenium Grid
|
||||
driver = webdriver.Remote(
|
||||
command_executor='http://localhost:4444',
|
||||
options=options
|
||||
)
|
||||
|
||||
# Run your test
|
||||
driver.get('https://www.selenium.dev/')
|
||||
print(driver.title)
|
||||
|
||||
# Clean up
|
||||
driver.quit()
|
||||
```
|
||||
|
||||
### Basic WebDriver Test (Node.js)
|
||||
|
||||
```javascript
|
||||
const { Builder } = require('selenium-webdriver');
|
||||
const chrome = require('selenium-webdriver/chrome');
|
||||
|
||||
(async function example() {
|
||||
let driver = await new Builder()
|
||||
.forBrowser('chrome')
|
||||
.usingServer('http://localhost:4444')
|
||||
.build();
|
||||
|
||||
try {
|
||||
await driver.get('https://www.selenium.dev/');
|
||||
console.log(await driver.getTitle());
|
||||
} finally {
|
||||
await driver.quit();
|
||||
}
|
||||
})();
|
||||
```
|
||||
|
||||
### Viewing Browser Sessions
|
||||
|
||||
You can watch tests execute in real-time using noVNC:
|
||||
|
||||
1. Open your browser to `http://localhost:7900/?autoconnect=1&resize=scale&password=secret`
|
||||
2. The default VNC password is `secret`
|
||||
3. You'll see the browser session in real-time
|
||||
|
||||
Alternatively, use a VNC client to connect to `localhost:5900` (if exposed).
|
||||
|
||||
## Advanced Configuration
|
||||
|
||||
### Changing Browser Version
|
||||
|
||||
To use a specific Chrome version, update the `SELENIUM_VERSION` in your `.env` file:
|
||||
|
||||
```bash
|
||||
# Use Chrome 143.0
|
||||
SELENIUM_VERSION=143.0-20260120
|
||||
|
||||
# Or use a specific Selenium Grid version
|
||||
SELENIUM_VERSION=144.0-chromedriver-144.0-grid-4.40.0-20260120
|
||||
```
|
||||
|
||||
Visit [Docker Hub](https://hub.docker.com/r/selenium/standalone-chrome/tags) for available versions.
|
||||
|
||||
### Increasing Concurrent Sessions
|
||||
|
||||
To run multiple concurrent sessions in one container (not recommended for production):
|
||||
|
||||
```bash
|
||||
SE_NODE_MAX_SESSIONS=5
|
||||
```
|
||||
|
||||
**Note:** For better stability, scale containers instead:
|
||||
|
||||
```bash
|
||||
docker compose up -d --scale selenium-chrome=3
|
||||
```
|
||||
|
||||
### Retrieving Downloaded Files
|
||||
|
||||
To access files downloaded during tests, mount the downloads directory:
|
||||
|
||||
```yaml
|
||||
volumes:
|
||||
- ./downloads:/home/seluser/Downloads
|
||||
```
|
||||
|
||||
**Linux users:** Set proper permissions before mounting:
|
||||
|
||||
```bash
|
||||
mkdir -p ./downloads
|
||||
sudo chown 1200:1201 ./downloads
|
||||
```
|
||||
|
||||
### Running in Headless Mode
|
||||
|
||||
For newer Chrome versions (127+), headless mode requires Xvfb:
|
||||
|
||||
```bash
|
||||
SE_START_XVFB=true
|
||||
```
|
||||
|
||||
Then configure headless in your test:
|
||||
|
||||
```python
|
||||
options = Options()
|
||||
options.add_argument('--headless=new')
|
||||
```
|
||||
|
||||
### Custom Screen Resolution
|
||||
|
||||
Adjust screen resolution for your test needs:
|
||||
|
||||
```bash
|
||||
SE_SCREEN_WIDTH=1366
|
||||
SE_SCREEN_HEIGHT=768
|
||||
SE_SCREEN_DEPTH=24
|
||||
SE_SCREEN_DPI=74
|
||||
```
|
||||
|
||||
## Health Check
|
||||
|
||||
The container includes a built-in health check that polls the Grid status endpoint every 30 seconds:
|
||||
|
||||
```bash
|
||||
# Check container health
|
||||
docker compose ps
|
||||
|
||||
# Or inspect the health status
|
||||
docker inspect --format='{{json .State.Health.Status}}' <container-id>
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Browser Crashes
|
||||
|
||||
If you see errors like "Chrome failed to start" or "invalid argument: can't kill an exited process":
|
||||
|
||||
1. **Ensure sufficient shared memory:** The default `2g` should work for most cases
|
||||
|
||||
```bash
|
||||
SELENIUM_SHM_SIZE=2g
|
||||
```
|
||||
|
||||
2. **Check headless mode configuration:** Make sure `SE_START_XVFB=true` if using headless mode with Chrome 127+
|
||||
|
||||
### Permission Issues (Linux)
|
||||
|
||||
When mounting volumes on Linux, ensure correct permissions:
|
||||
|
||||
```bash
|
||||
# For downloads directory
|
||||
mkdir -p ./downloads
|
||||
sudo chown 1200:1201 ./downloads
|
||||
|
||||
# Check user/group IDs in container
|
||||
docker compose exec selenium-chrome id
|
||||
```
|
||||
|
||||
### Resource Constraints
|
||||
|
||||
If tests are slow or containers are being OOM killed:
|
||||
|
||||
```bash
|
||||
# Increase resource limits
|
||||
SELENIUM_CPU_LIMIT=4.0
|
||||
SELENIUM_MEMORY_LIMIT=4G
|
||||
```
|
||||
|
||||
### VNC Connection Issues
|
||||
|
||||
If you can't connect to VNC:
|
||||
|
||||
1. Check that port 7900 is not in use
|
||||
2. Verify the VNC password is correct (default: `secret`)
|
||||
3. Try disabling VNC authentication: `SE_VNC_NO_PASSWORD=true`
|
||||
|
||||
## Multi-Browser Support
|
||||
|
||||
For running multiple browser types (Chrome, Firefox, Edge), consider using:
|
||||
|
||||
- **Hub & Nodes architecture:** See `docker-compose-grid.yaml` example
|
||||
- **Dynamic Grid:** Automatically spawns containers on demand
|
||||
- **Selenium Grid 4:** Full distributed mode with Router, Distributor, etc.
|
||||
|
||||
## Additional Resources
|
||||
|
||||
- [Selenium Documentation](https://www.selenium.dev/documentation/)
|
||||
- [Docker Selenium GitHub](https://github.com/SeleniumHQ/docker-selenium)
|
||||
- [Selenium Grid Configuration](https://www.selenium.dev/documentation/grid/)
|
||||
- [Environment Variables Reference](https://github.com/SeleniumHQ/docker-selenium/blob/trunk/ENV_VARIABLES.md)
|
||||
|
||||
## Security Notes
|
||||
|
||||
- **VNC Password:** Change the default `secret` password in production
|
||||
- **Network Exposure:** Do not expose Selenium Grid directly to the internet
|
||||
- **Resource Limits:** Always set CPU and memory limits to prevent resource exhaustion
|
||||
- **User Permissions:** Selenium runs as non-root user `seluser` (UID 1200, GID 1201)
|
||||
|
||||
## License
|
||||
|
||||
This configuration is provided under the Apache License 2.0, following the Selenium project's licensing.
|
||||
The Selenium Docker images are maintained by the SeleniumHQ team and community contributors.
|
||||
281
src/selenium/README.zh.md
Normal file
281
src/selenium/README.zh.md
Normal file
@@ -0,0 +1,281 @@
|
||||
# Selenium Standalone Chrome
|
||||
|
||||
[](https://hub.docker.com/r/selenium/standalone-chrome)
|
||||
[](https://hub.docker.com/r/selenium/standalone-chrome)
|
||||
[](https://github.com/SeleniumHQ/docker-selenium/blob/trunk/LICENSE.md)
|
||||
|
||||
Selenium Grid 独立模式,配备 Chrome 浏览器,用于大规模浏览器自动化。
|
||||
|
||||
## 快速开始
|
||||
|
||||
```bash
|
||||
# 启动服务
|
||||
docker compose up -d
|
||||
|
||||
# 验证服务运行状态
|
||||
docker compose ps
|
||||
|
||||
# 查看日志
|
||||
docker compose logs -f
|
||||
|
||||
# 停止服务
|
||||
docker compose down
|
||||
```
|
||||
|
||||
## 服务信息
|
||||
|
||||
### 端口
|
||||
|
||||
| 端口 | 服务 | 说明 |
|
||||
| ---- | ------------- | ------------------------------ |
|
||||
| 4444 | Selenium Grid | WebDriver HTTP 端点 |
|
||||
| 7900 | noVNC | 浏览器查看界面(密码:secret) |
|
||||
|
||||
### 默认凭据
|
||||
|
||||
- VNC 密码:`secret`(可通过 `SE_VNC_PASSWORD` 配置)
|
||||
|
||||
### 数据卷
|
||||
|
||||
- `selenium_downloads`:浏览器下载目录(`/home/seluser/Downloads`)
|
||||
|
||||
## 配置说明
|
||||
|
||||
### 环境变量
|
||||
|
||||
所有配置都可以通过 `.env` 文件自定义:
|
||||
|
||||
```bash
|
||||
# 复制示例配置文件
|
||||
cp .env.example .env
|
||||
|
||||
# 编辑配置
|
||||
nano .env
|
||||
```
|
||||
|
||||
主要配置:
|
||||
|
||||
| 变量 | 默认值 | 说明 |
|
||||
| ----------------------------- | ---------------- | ------------------------------------- |
|
||||
| `SELENIUM_VERSION` | `144.0-20260120` | Docker 镜像标签(Chrome 版本 + 日期) |
|
||||
| `SELENIUM_SHM_SIZE` | `2g` | 共享内存大小(浏览器稳定性所需) |
|
||||
| `SELENIUM_GRID_PORT_OVERRIDE` | `4444` | Grid HTTP 端点端口 |
|
||||
| `SELENIUM_VNC_PORT_OVERRIDE` | `7900` | noVNC 查看器端口 |
|
||||
| `SE_SCREEN_WIDTH` | `1920` | 浏览器屏幕宽度 |
|
||||
| `SE_SCREEN_HEIGHT` | `1080` | 浏览器屏幕高度 |
|
||||
| `SE_NODE_MAX_SESSIONS` | `1` | 每个容器最大并发会话数 |
|
||||
| `SE_NODE_SESSION_TIMEOUT` | `300` | 会话超时时间(秒) |
|
||||
|
||||
完整的环境变量列表请参考 [Selenium Docker 文档](https://github.com/SeleniumHQ/docker-selenium/blob/trunk/ENV_VARIABLES.md)。
|
||||
|
||||
## 使用方法
|
||||
|
||||
### 基础 WebDriver 测试(Python)
|
||||
|
||||
```python
|
||||
from selenium import webdriver
|
||||
from selenium.webdriver.chrome.options import Options
|
||||
|
||||
# 配置 Chrome 选项
|
||||
options = Options()
|
||||
|
||||
# 连接到 Selenium Grid
|
||||
driver = webdriver.Remote(
|
||||
command_executor='http://localhost:4444',
|
||||
options=options
|
||||
)
|
||||
|
||||
# 运行测试
|
||||
driver.get('https://www.selenium.dev/')
|
||||
print(driver.title)
|
||||
|
||||
# 清理资源
|
||||
driver.quit()
|
||||
```
|
||||
|
||||
### 基础 WebDriver 测试(Node.js)
|
||||
|
||||
```javascript
|
||||
const { Builder } = require('selenium-webdriver');
|
||||
const chrome = require('selenium-webdriver/chrome');
|
||||
|
||||
(async function example() {
|
||||
let driver = await new Builder()
|
||||
.forBrowser('chrome')
|
||||
.usingServer('http://localhost:4444')
|
||||
.build();
|
||||
|
||||
try {
|
||||
await driver.get('https://www.selenium.dev/');
|
||||
console.log(await driver.getTitle());
|
||||
} finally {
|
||||
await driver.quit();
|
||||
}
|
||||
})();
|
||||
```
|
||||
|
||||
### 查看浏览器会话
|
||||
|
||||
您可以使用 noVNC 实时查看测试执行过程:
|
||||
|
||||
1. 在浏览器中打开 `http://localhost:7900/?autoconnect=1&resize=scale&password=secret`
|
||||
2. 默认 VNC 密码是 `secret`
|
||||
3. 您将实时看到浏览器会话
|
||||
|
||||
或者,使用 VNC 客户端连接到 `localhost:5900`(如果已暴露)。
|
||||
|
||||
## 高级配置
|
||||
|
||||
### 更改浏览器版本
|
||||
|
||||
要使用特定的 Chrome 版本,请在 `.env` 文件中更新 `SELENIUM_VERSION`:
|
||||
|
||||
```bash
|
||||
# 使用 Chrome 143.0
|
||||
SELENIUM_VERSION=143.0-20260120
|
||||
|
||||
# 或使用特定的 Selenium Grid 版本
|
||||
SELENIUM_VERSION=144.0-chromedriver-144.0-grid-4.40.0-20260120
|
||||
```
|
||||
|
||||
访问 [Docker Hub](https://hub.docker.com/r/selenium/standalone-chrome/tags) 查看可用版本。
|
||||
|
||||
### 增加并发会话数
|
||||
|
||||
在单个容器中运行多个并发会话(生产环境不推荐):
|
||||
|
||||
```bash
|
||||
SE_NODE_MAX_SESSIONS=5
|
||||
```
|
||||
|
||||
**注意:** 为了更好的稳定性,建议通过扩展容器来实现:
|
||||
|
||||
```bash
|
||||
docker compose up -d --scale selenium-chrome=3
|
||||
```
|
||||
|
||||
### 获取下载的文件
|
||||
|
||||
要访问测试期间下载的文件,挂载下载目录:
|
||||
|
||||
```yaml
|
||||
volumes:
|
||||
- ./downloads:/home/seluser/Downloads
|
||||
```
|
||||
|
||||
**Linux 用户:** 挂载前设置正确的权限:
|
||||
|
||||
```bash
|
||||
mkdir -p ./downloads
|
||||
sudo chown 1200:1201 ./downloads
|
||||
```
|
||||
|
||||
### 无头模式运行
|
||||
|
||||
对于新版 Chrome(127+),无头模式需要 Xvfb:
|
||||
|
||||
```bash
|
||||
SE_START_XVFB=true
|
||||
```
|
||||
|
||||
然后在测试中配置无头模式:
|
||||
|
||||
```python
|
||||
options = Options()
|
||||
options.add_argument('--headless=new')
|
||||
```
|
||||
|
||||
### 自定义屏幕分辨率
|
||||
|
||||
根据测试需求调整屏幕分辨率:
|
||||
|
||||
```bash
|
||||
SE_SCREEN_WIDTH=1366
|
||||
SE_SCREEN_HEIGHT=768
|
||||
SE_SCREEN_DEPTH=24
|
||||
SE_SCREEN_DPI=74
|
||||
```
|
||||
|
||||
## 健康检查
|
||||
|
||||
容器包含内置的健康检查,每 30 秒轮询 Grid 状态端点:
|
||||
|
||||
```bash
|
||||
# 检查容器健康状态
|
||||
docker compose ps
|
||||
|
||||
# 或检查健康状态详情
|
||||
docker inspect --format='{{json .State.Health.Status}}' <container-id>
|
||||
```
|
||||
|
||||
## 故障排除
|
||||
|
||||
### 浏览器崩溃
|
||||
|
||||
如果看到 "Chrome failed to start" 或 "invalid argument: can't kill an exited process" 等错误:
|
||||
|
||||
1. **确保足够的共享内存:** 默认的 `2g` 应该适用于大多数情况
|
||||
|
||||
```bash
|
||||
SELENIUM_SHM_SIZE=2g
|
||||
```
|
||||
|
||||
2. **检查无头模式配置:** 如果在 Chrome 127+ 中使用无头模式,请确保 `SE_START_XVFB=true`
|
||||
|
||||
### 权限问题(Linux)
|
||||
|
||||
在 Linux 上挂载卷时,确保正确的权限:
|
||||
|
||||
```bash
|
||||
# 对于下载目录
|
||||
mkdir -p ./downloads
|
||||
sudo chown 1200:1201 ./downloads
|
||||
|
||||
# 检查容器中的用户/组 ID
|
||||
docker compose exec selenium-chrome id
|
||||
```
|
||||
|
||||
### 资源限制
|
||||
|
||||
如果测试缓慢或容器被 OOM 终止:
|
||||
|
||||
```bash
|
||||
# 增加资源限制
|
||||
SELENIUM_CPU_LIMIT=4.0
|
||||
SELENIUM_MEMORY_LIMIT=4G
|
||||
```
|
||||
|
||||
### VNC 连接问题
|
||||
|
||||
如果无法连接到 VNC:
|
||||
|
||||
1. 检查端口 7900 是否被占用
|
||||
2. 验证 VNC 密码是否正确(默认:`secret`)
|
||||
3. 尝试禁用 VNC 认证:`SE_VNC_NO_PASSWORD=true`
|
||||
|
||||
## 多浏览器支持
|
||||
|
||||
要运行多种浏览器类型(Chrome、Firefox、Edge),请考虑使用:
|
||||
|
||||
- **Hub & Nodes 架构:** 参见 `docker-compose-grid.yaml` 示例
|
||||
- **动态 Grid:** 按需自动生成容器
|
||||
- **Selenium Grid 4:** 完整的分布式模式,包含 Router、Distributor 等
|
||||
|
||||
## 其他资源
|
||||
|
||||
- [Selenium 文档](https://www.selenium.dev/documentation/)
|
||||
- [Docker Selenium GitHub](https://github.com/SeleniumHQ/docker-selenium)
|
||||
- [Selenium Grid 配置](https://www.selenium.dev/documentation/grid/)
|
||||
- [环境变量参考](https://github.com/SeleniumHQ/docker-selenium/blob/trunk/ENV_VARIABLES.md)
|
||||
|
||||
## 安全注意事项
|
||||
|
||||
- **VNC 密码:** 生产环境中更改默认的 `secret` 密码
|
||||
- **网络暴露:** 不要将 Selenium Grid 直接暴露到互联网
|
||||
- **资源限制:** 始终设置 CPU 和内存限制以防止资源耗尽
|
||||
- **用户权限:** Selenium 以非 root 用户 `seluser` 运行(UID 1200,GID 1201)
|
||||
|
||||
## 许可证
|
||||
|
||||
本配置遵循 Apache License 2.0 提供,与 Selenium 项目的许可保持一致。
|
||||
Selenium Docker 镜像由 SeleniumHQ 团队和社区贡献者维护。
|
||||
50
src/selenium/docker-compose.yaml
Normal file
50
src/selenium/docker-compose.yaml
Normal file
@@ -0,0 +1,50 @@
|
||||
# Selenium Standalone with Chrome
|
||||
# This configuration runs Selenium Grid in Standalone mode with Chrome browser
|
||||
# Suitable for single-browser automation needs
|
||||
|
||||
x-defaults: &defaults
|
||||
restart: unless-stopped
|
||||
logging:
|
||||
driver: json-file
|
||||
options:
|
||||
max-size: 100m
|
||||
max-file: "3"
|
||||
|
||||
services:
|
||||
selenium-chrome:
|
||||
<<: *defaults
|
||||
image: ${GLOBAL_REGISTRY:-}selenium/standalone-chrome:${SELENIUM_VERSION:-144.0-20260120}
|
||||
shm_size: ${SELENIUM_SHM_SIZE:-2g}
|
||||
ports:
|
||||
- "${SELENIUM_GRID_PORT_OVERRIDE:-4444}:4444"
|
||||
- "${SELENIUM_VNC_PORT_OVERRIDE:-7900}:7900"
|
||||
volumes:
|
||||
- selenium_downloads:/home/seluser/Downloads
|
||||
environment:
|
||||
- TZ=${TZ:-UTC}
|
||||
- SE_SCREEN_WIDTH=${SE_SCREEN_WIDTH:-1920}
|
||||
- SE_SCREEN_HEIGHT=${SE_SCREEN_HEIGHT:-1080}
|
||||
- SE_SCREEN_DEPTH=${SE_SCREEN_DEPTH:-24}
|
||||
- SE_SCREEN_DPI=${SE_SCREEN_DPI:-96}
|
||||
- SE_VNC_PASSWORD=${SE_VNC_PASSWORD:-secret}
|
||||
- SE_NODE_MAX_SESSIONS=${SE_NODE_MAX_SESSIONS:-1}
|
||||
- SE_NODE_SESSION_TIMEOUT=${SE_NODE_SESSION_TIMEOUT:-300}
|
||||
- SE_START_XVFB=${SE_START_XVFB:-true}
|
||||
healthcheck:
|
||||
test:
|
||||
["CMD", "/opt/bin/check-grid.sh", "--host", "0.0.0.0", "--port", "4444"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 30s
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
cpus: ${SELENIUM_CPU_LIMIT:-2.0}
|
||||
memory: ${SELENIUM_MEMORY_LIMIT:-2G}
|
||||
reservations:
|
||||
cpus: ${SELENIUM_CPU_RESERVATION:-1.0}
|
||||
memory: ${SELENIUM_MEMORY_RESERVATION:-1G}
|
||||
|
||||
volumes:
|
||||
selenium_downloads:
|
||||
Reference in New Issue
Block a user