From ea1ca927c8e744429735894fe79688a706ea9285 Mon Sep 17 00:00:00 2001
From: Sun-ZhenXing <1006925066@qq.com>
Date: Tue, 14 Apr 2026 15:22:06 +0800
Subject: [PATCH] feat: add multica/
---
README.md | 1 +
README.zh.md | 21 +-
apps/openlit/assets/clickhouse-config.xml | 74 ++++
apps/openlit/assets/clickhouse-init.sh | 326 ++++++++++++++++++
.../openlit/assets/otel-collector-config.yaml | 54 +++
apps/openlit/docker-compose.yaml | 3 +
builds/multica/.env.example | 55 +++
builds/multica/README.md | 77 +++++
builds/multica/README.zh.md | 77 +++++
builds/multica/docker-compose.yaml | 109 ++++++
10 files changed, 787 insertions(+), 10 deletions(-)
create mode 100644 apps/openlit/assets/clickhouse-config.xml
create mode 100644 apps/openlit/assets/clickhouse-init.sh
create mode 100644 apps/openlit/assets/otel-collector-config.yaml
create mode 100644 builds/multica/.env.example
create mode 100644 builds/multica/README.md
create mode 100644 builds/multica/README.zh.md
create mode 100644 builds/multica/docker-compose.yaml
diff --git a/README.md b/README.md
index ed0f476..1bd4e0d 100644
--- a/README.md
+++ b/README.md
@@ -38,6 +38,7 @@ These services require building custom Docker images from source.
| [IOPaint](./builds/io-paint) | 1.6.0 |
| [K3s inside DinD](./builds/k3s-inside-dind) | 0.2.2 |
| [MinerU vLLM](./builds/mineru) | 3.0.1 |
+| [Multica](./builds/multica) | v0.1.32 |
| [OpenFang](./builds/openfang) | 0.1.0 |
| [Paperclip](./builds/paperclip) | main |
diff --git a/README.zh.md b/README.zh.md
index bfbf237..986db23 100644
--- a/README.zh.md
+++ b/README.zh.md
@@ -30,16 +30,17 @@ docker compose exec redis redis-cli ping
这些服务需要从源代码构建自定义 Docker 镜像。
-| 服务 | 版本 |
-| ------------------------------------------- | ------ |
-| [Debian DinD](./builds/debian-dind) | 0.1.2 |
-| [DeerFlow](./builds/deer-flow) | 2.0 |
-| [goose](./builds/goose) | 1.18.0 |
-| [IOPaint](./builds/io-paint) | 1.6.0 |
-| [K3s inside DinD](./builds/k3s-inside-dind) | 0.2.2 |
-| [MinerU vLLM](./builds/mineru) | 3.0.1 |
-| [OpenFang](./builds/openfang) | 0.1.0 |
-| [Paperclip](./builds/paperclip) | main |
+| 服务 | 版本 |
+| ------------------------------------------- | ------- |
+| [Debian DinD](./builds/debian-dind) | 0.1.2 |
+| [DeerFlow](./builds/deer-flow) | 2.0 |
+| [goose](./builds/goose) | 1.18.0 |
+| [IOPaint](./builds/io-paint) | 1.6.0 |
+| [K3s inside DinD](./builds/k3s-inside-dind) | 0.2.2 |
+| [MinerU vLLM](./builds/mineru) | 3.0.1 |
+| [Multica](./builds/multica) | v0.1.32 |
+| [OpenFang](./builds/openfang) | 0.1.0 |
+| [Paperclip](./builds/paperclip) | main |
## 已经支持的服务
diff --git a/apps/openlit/assets/clickhouse-config.xml b/apps/openlit/assets/clickhouse-config.xml
new file mode 100644
index 0000000..3967c33
--- /dev/null
+++ b/apps/openlit/assets/clickhouse-config.xml
@@ -0,0 +1,74 @@
+
+
+
+
+ warning
+ true
+
+
+
+
+
+ 6
+
+ 120000
+
+ 604800
+
+
+
+
+
+ warning
+
+ 604800
+
+ 120000
+
+
+
+
+
+ 1000
+
+ 60000
+
+ 604800
+
+
+
+
+ warning
+
+
+
+
+ 60000
+ 120000
+
+ 604800
+
+
+
+
+ 60000
+ 120000
+
+ 604800
+
+
+
+
+ warning
+ 120000
+
+ 604800
+
+
+
+
+ 120000
+
+ 604800
+
+
diff --git a/apps/openlit/assets/clickhouse-init.sh b/apps/openlit/assets/clickhouse-init.sh
new file mode 100644
index 0000000..0a0897d
--- /dev/null
+++ b/apps/openlit/assets/clickhouse-init.sh
@@ -0,0 +1,326 @@
+#!/bin/bash
+set -e
+
+echo "==================== ClickHouse Initialization ===================="
+
+
+clickhouse-client --query "CREATE DATABASE IF NOT EXISTS ${CLICKHOUSE_DATABASE}"
+
+echo "✅ Database $CLICKHOUSE_DATABASE created successfully"
+echo ""
+echo "Creating OTEL tables required by OpenTelemetry Collector..."
+
+clickhouse-client --database="${CLICKHOUSE_DATABASE}" --query "
+CREATE TABLE IF NOT EXISTS otel_traces
+(
+ \`Timestamp\` DateTime64(9) CODEC(Delta(8), ZSTD(1)),
+ \`TraceId\` String CODEC(ZSTD(1)),
+ \`SpanId\` String CODEC(ZSTD(1)),
+ \`ParentSpanId\` String CODEC(ZSTD(1)),
+ \`TraceState\` String CODEC(ZSTD(1)),
+ \`SpanName\` LowCardinality(String) CODEC(ZSTD(1)),
+ \`SpanKind\` LowCardinality(String) CODEC(ZSTD(1)),
+ \`ServiceName\` LowCardinality(String) CODEC(ZSTD(1)),
+ \`ResourceAttributes\` Map(LowCardinality(String), String) CODEC(ZSTD(1)),
+ \`ScopeName\` String CODEC(ZSTD(1)),
+ \`ScopeVersion\` String CODEC(ZSTD(1)),
+ \`SpanAttributes\` Map(LowCardinality(String), String) CODEC(ZSTD(1)),
+ \`Duration\` UInt64 CODEC(ZSTD(1)),
+ \`StatusCode\` LowCardinality(String) CODEC(ZSTD(1)),
+ \`StatusMessage\` String CODEC(ZSTD(1)),
+ \`Events.Timestamp\` Array(DateTime64(9)) CODEC(ZSTD(1)),
+ \`Events.Name\` Array(LowCardinality(String)) CODEC(ZSTD(1)),
+ \`Events.Attributes\` Array(Map(LowCardinality(String), String)) CODEC(ZSTD(1)),
+ \`Links.TraceId\` Array(String) CODEC(ZSTD(1)),
+ \`Links.SpanId\` Array(String) CODEC(ZSTD(1)),
+ \`Links.TraceState\` Array(String) CODEC(ZSTD(1)),
+ \`Links.Attributes\` Array(Map(LowCardinality(String), String)) CODEC(ZSTD(1)),
+ INDEX idx_trace_id TraceId TYPE bloom_filter(0.001) GRANULARITY 1,
+ INDEX idx_res_attr_key mapKeys(ResourceAttributes) TYPE bloom_filter(0.01) GRANULARITY 1,
+ INDEX idx_res_attr_value mapValues(ResourceAttributes) TYPE bloom_filter(0.01) GRANULARITY 1,
+ INDEX idx_span_attr_key mapKeys(SpanAttributes) TYPE bloom_filter(0.01) GRANULARITY 1,
+ INDEX idx_span_attr_value mapValues(SpanAttributes) TYPE bloom_filter(0.01) GRANULARITY 1,
+ INDEX idx_duration Duration TYPE minmax GRANULARITY 1
+)
+ENGINE = MergeTree
+PARTITION BY toDate(Timestamp)
+ORDER BY (ServiceName, SpanName, toDateTime(Timestamp))
+TTL toDateTime(Timestamp) + toIntervalHour(730)
+SETTINGS index_granularity = 8192, ttl_only_drop_parts = 1
+"
+
+clickhouse-client --database="${CLICKHOUSE_DATABASE}" --query "
+CREATE TABLE IF NOT EXISTS otel_logs
+(
+ \`Timestamp\` DateTime64(9) CODEC(Delta(8), ZSTD(1)),
+ \`TimestampTime\` DateTime DEFAULT toDateTime(Timestamp),
+ \`TraceId\` String CODEC(ZSTD(1)),
+ \`SpanId\` String CODEC(ZSTD(1)),
+ \`TraceFlags\` UInt8,
+ \`SeverityText\` LowCardinality(String) CODEC(ZSTD(1)),
+ \`SeverityNumber\` UInt8,
+ \`ServiceName\` LowCardinality(String) CODEC(ZSTD(1)),
+ \`Body\` String CODEC(ZSTD(1)),
+ \`ResourceSchemaUrl\` LowCardinality(String) CODEC(ZSTD(1)),
+ \`ResourceAttributes\` Map(LowCardinality(String), String) CODEC(ZSTD(1)),
+ \`ScopeSchemaUrl\` LowCardinality(String) CODEC(ZSTD(1)),
+ \`ScopeName\` String CODEC(ZSTD(1)),
+ \`ScopeVersion\` LowCardinality(String) CODEC(ZSTD(1)),
+ \`ScopeAttributes\` Map(LowCardinality(String), String) CODEC(ZSTD(1)),
+ \`LogAttributes\` Map(LowCardinality(String), String) CODEC(ZSTD(1)),
+ INDEX idx_trace_id TraceId TYPE bloom_filter(0.001) GRANULARITY 1,
+ INDEX idx_res_attr_key mapKeys(ResourceAttributes) TYPE bloom_filter(0.01) GRANULARITY 1,
+ INDEX idx_res_attr_value mapValues(ResourceAttributes) TYPE bloom_filter(0.01) GRANULARITY 1,
+ INDEX idx_scope_attr_key mapKeys(ScopeAttributes) TYPE bloom_filter(0.01) GRANULARITY 1,
+ INDEX idx_scope_attr_value mapValues(ScopeAttributes) TYPE bloom_filter(0.01) GRANULARITY 1,
+ INDEX idx_log_attr_key mapKeys(LogAttributes) TYPE bloom_filter(0.01) GRANULARITY 1,
+ INDEX idx_log_attr_value mapValues(LogAttributes) TYPE bloom_filter(0.01) GRANULARITY 1,
+ INDEX idx_body Body TYPE tokenbf_v1(32768, 3, 0) GRANULARITY 8
+)
+ENGINE = MergeTree
+PARTITION BY toDate(TimestampTime)
+PRIMARY KEY (ServiceName, TimestampTime)
+ORDER BY (ServiceName, TimestampTime, Timestamp)
+TTL TimestampTime + toIntervalHour(730)
+SETTINGS index_granularity = 8192, ttl_only_drop_parts = 1
+"
+
+clickhouse-client --database="${CLICKHOUSE_DATABASE}" --query "
+CREATE TABLE IF NOT EXISTS otel_metrics_gauge
+(
+ \`ResourceAttributes\` Map(LowCardinality(String), String) CODEC(ZSTD(1)),
+ \`ResourceSchemaUrl\` String CODEC(ZSTD(1)),
+ \`ScopeName\` String CODEC(ZSTD(1)),
+ \`ScopeVersion\` String CODEC(ZSTD(1)),
+ \`ScopeAttributes\` Map(LowCardinality(String), String) CODEC(ZSTD(1)),
+ \`ScopeDroppedAttrCount\` UInt32 CODEC(ZSTD(1)),
+ \`ScopeSchemaUrl\` String CODEC(ZSTD(1)),
+ \`ServiceName\` LowCardinality(String) CODEC(ZSTD(1)),
+ \`MetricName\` String CODEC(ZSTD(1)),
+ \`MetricDescription\` String CODEC(ZSTD(1)),
+ \`MetricUnit\` String CODEC(ZSTD(1)),
+ \`Attributes\` Map(LowCardinality(String), String) CODEC(ZSTD(1)),
+ \`StartTimeUnix\` DateTime64(9) CODEC(Delta(8), ZSTD(1)),
+ \`TimeUnix\` DateTime64(9) CODEC(Delta(8), ZSTD(1)),
+ \`Value\` Float64 CODEC(ZSTD(1)),
+ \`Flags\` UInt32 CODEC(ZSTD(1)),
+ \`Exemplars.FilteredAttributes\` Array(Map(LowCardinality(String), String)) CODEC(ZSTD(1)),
+ \`Exemplars.TimeUnix\` Array(DateTime64(9)) CODEC(ZSTD(1)),
+ \`Exemplars.Value\` Array(Float64) CODEC(ZSTD(1)),
+ \`Exemplars.SpanId\` Array(String) CODEC(ZSTD(1)),
+ \`Exemplars.TraceId\` Array(String) CODEC(ZSTD(1)),
+ INDEX idx_res_attr_key mapKeys(ResourceAttributes) TYPE bloom_filter(0.01) GRANULARITY 1,
+ INDEX idx_res_attr_value mapValues(ResourceAttributes) TYPE bloom_filter(0.01) GRANULARITY 1,
+ INDEX idx_scope_attr_key mapKeys(ScopeAttributes) TYPE bloom_filter(0.01) GRANULARITY 1,
+ INDEX idx_scope_attr_value mapValues(ScopeAttributes) TYPE bloom_filter(0.01) GRANULARITY 1,
+ INDEX idx_attr_key mapKeys(Attributes) TYPE bloom_filter(0.01) GRANULARITY 1,
+ INDEX idx_attr_value mapValues(Attributes) TYPE bloom_filter(0.01) GRANULARITY 1
+)
+ENGINE = MergeTree
+PARTITION BY toDate(TimeUnix)
+ORDER BY (ServiceName, MetricName, Attributes, toUnixTimestamp64Nano(TimeUnix))
+TTL toDateTime(TimeUnix) + toIntervalHour(730)
+SETTINGS index_granularity = 8192, ttl_only_drop_parts = 1
+"
+
+clickhouse-client --database="${CLICKHOUSE_DATABASE}" --query "
+CREATE TABLE IF NOT EXISTS otel_metrics_sum
+(
+ \`ResourceAttributes\` Map(LowCardinality(String), String) CODEC(ZSTD(1)),
+ \`ResourceSchemaUrl\` String CODEC(ZSTD(1)),
+ \`ScopeName\` String CODEC(ZSTD(1)),
+ \`ScopeVersion\` String CODEC(ZSTD(1)),
+ \`ScopeAttributes\` Map(LowCardinality(String), String) CODEC(ZSTD(1)),
+ \`ScopeDroppedAttrCount\` UInt32 CODEC(ZSTD(1)),
+ \`ScopeSchemaUrl\` String CODEC(ZSTD(1)),
+ \`ServiceName\` LowCardinality(String) CODEC(ZSTD(1)),
+ \`MetricName\` String CODEC(ZSTD(1)),
+ \`MetricDescription\` String CODEC(ZSTD(1)),
+ \`MetricUnit\` String CODEC(ZSTD(1)),
+ \`Attributes\` Map(LowCardinality(String), String) CODEC(ZSTD(1)),
+ \`StartTimeUnix\` DateTime64(9) CODEC(Delta(8), ZSTD(1)),
+ \`TimeUnix\` DateTime64(9) CODEC(Delta(8), ZSTD(1)),
+ \`Value\` Float64 CODEC(ZSTD(1)),
+ \`Flags\` UInt32 CODEC(ZSTD(1)),
+ \`Exemplars.FilteredAttributes\` Array(Map(LowCardinality(String), String)) CODEC(ZSTD(1)),
+ \`Exemplars.TimeUnix\` Array(DateTime64(9)) CODEC(ZSTD(1)),
+ \`Exemplars.Value\` Array(Float64) CODEC(ZSTD(1)),
+ \`Exemplars.SpanId\` Array(String) CODEC(ZSTD(1)),
+ \`Exemplars.TraceId\` Array(String) CODEC(ZSTD(1)),
+ \`AggregationTemporality\` Int32 CODEC(ZSTD(1)),
+ \`IsMonotonic\` Bool CODEC(Delta(1), ZSTD(1)),
+ INDEX idx_res_attr_key mapKeys(ResourceAttributes) TYPE bloom_filter(0.01) GRANULARITY 1,
+ INDEX idx_res_attr_value mapValues(ResourceAttributes) TYPE bloom_filter(0.01) GRANULARITY 1,
+ INDEX idx_scope_attr_key mapKeys(ScopeAttributes) TYPE bloom_filter(0.01) GRANULARITY 1,
+ INDEX idx_scope_attr_value mapValues(ScopeAttributes) TYPE bloom_filter(0.01) GRANULARITY 1,
+ INDEX idx_attr_key mapKeys(Attributes) TYPE bloom_filter(0.01) GRANULARITY 1,
+ INDEX idx_attr_value mapValues(Attributes) TYPE bloom_filter(0.01) GRANULARITY 1
+)
+ENGINE = MergeTree
+PARTITION BY toDate(TimeUnix)
+ORDER BY (ServiceName, MetricName, Attributes, toUnixTimestamp64Nano(TimeUnix))
+TTL toDateTime(TimeUnix) + toIntervalHour(730)
+SETTINGS index_granularity = 8192, ttl_only_drop_parts = 1
+"
+
+clickhouse-client --database="${CLICKHOUSE_DATABASE}" --query "
+CREATE TABLE IF NOT EXISTS otel_metrics_histogram
+(
+ \`ResourceAttributes\` Map(LowCardinality(String), String) CODEC(ZSTD(1)),
+ \`ResourceSchemaUrl\` String CODEC(ZSTD(1)),
+ \`ScopeName\` String CODEC(ZSTD(1)),
+ \`ScopeVersion\` String CODEC(ZSTD(1)),
+ \`ScopeAttributes\` Map(LowCardinality(String), String) CODEC(ZSTD(1)),
+ \`ScopeDroppedAttrCount\` UInt32 CODEC(ZSTD(1)),
+ \`ScopeSchemaUrl\` String CODEC(ZSTD(1)),
+ \`ServiceName\` LowCardinality(String) CODEC(ZSTD(1)),
+ \`MetricName\` String CODEC(ZSTD(1)),
+ \`MetricDescription\` String CODEC(ZSTD(1)),
+ \`MetricUnit\` String CODEC(ZSTD(1)),
+ \`Attributes\` Map(LowCardinality(String), String) CODEC(ZSTD(1)),
+ \`StartTimeUnix\` DateTime64(9) CODEC(Delta(8), ZSTD(1)),
+ \`TimeUnix\` DateTime64(9) CODEC(Delta(8), ZSTD(1)),
+ \`Count\` UInt64 CODEC(Delta(8), ZSTD(1)),
+ \`Sum\` Float64 CODEC(ZSTD(1)),
+ \`BucketCounts\` Array(UInt64) CODEC(ZSTD(1)),
+ \`ExplicitBounds\` Array(Float64) CODEC(ZSTD(1)),
+ \`Exemplars.FilteredAttributes\` Array(Map(LowCardinality(String), String)) CODEC(ZSTD(1)),
+ \`Exemplars.TimeUnix\` Array(DateTime64(9)) CODEC(ZSTD(1)),
+ \`Exemplars.Value\` Array(Float64) CODEC(ZSTD(1)),
+ \`Exemplars.SpanId\` Array(String) CODEC(ZSTD(1)),
+ \`Exemplars.TraceId\` Array(String) CODEC(ZSTD(1)),
+ \`Flags\` UInt32 CODEC(ZSTD(1)),
+ \`Min\` Float64 CODEC(ZSTD(1)),
+ \`Max\` Float64 CODEC(ZSTD(1)),
+ \`AggregationTemporality\` Int32 CODEC(ZSTD(1)),
+ INDEX idx_res_attr_key mapKeys(ResourceAttributes) TYPE bloom_filter(0.01) GRANULARITY 1,
+ INDEX idx_res_attr_value mapValues(ResourceAttributes) TYPE bloom_filter(0.01) GRANULARITY 1,
+ INDEX idx_scope_attr_key mapKeys(ScopeAttributes) TYPE bloom_filter(0.01) GRANULARITY 1,
+ INDEX idx_scope_attr_value mapValues(ScopeAttributes) TYPE bloom_filter(0.01) GRANULARITY 1,
+ INDEX idx_attr_key mapKeys(Attributes) TYPE bloom_filter(0.01) GRANULARITY 1,
+ INDEX idx_attr_value mapValues(Attributes) TYPE bloom_filter(0.01) GRANULARITY 1
+)
+ENGINE = MergeTree
+PARTITION BY toDate(TimeUnix)
+ORDER BY (ServiceName, MetricName, Attributes, toUnixTimestamp64Nano(TimeUnix))
+TTL toDateTime(TimeUnix) + toIntervalHour(730)
+SETTINGS index_granularity = 8192, ttl_only_drop_parts = 1
+"
+
+clickhouse-client --database="${CLICKHOUSE_DATABASE}" --query "
+CREATE TABLE IF NOT EXISTS otel_metrics_summary
+(
+ \`ResourceAttributes\` Map(LowCardinality(String), String) CODEC(ZSTD(1)),
+ \`ResourceSchemaUrl\` String CODEC(ZSTD(1)),
+ \`ScopeName\` String CODEC(ZSTD(1)),
+ \`ScopeVersion\` String CODEC(ZSTD(1)),
+ \`ScopeAttributes\` Map(LowCardinality(String), String) CODEC(ZSTD(1)),
+ \`ScopeDroppedAttrCount\` UInt32 CODEC(ZSTD(1)),
+ \`ScopeSchemaUrl\` String CODEC(ZSTD(1)),
+ \`ServiceName\` LowCardinality(String) CODEC(ZSTD(1)),
+ \`MetricName\` String CODEC(ZSTD(1)),
+ \`MetricDescription\` String CODEC(ZSTD(1)),
+ \`MetricUnit\` String CODEC(ZSTD(1)),
+ \`Attributes\` Map(LowCardinality(String), String) CODEC(ZSTD(1)),
+ \`StartTimeUnix\` DateTime64(9) CODEC(Delta(8), ZSTD(1)),
+ \`TimeUnix\` DateTime64(9) CODEC(Delta(8), ZSTD(1)),
+ \`Count\` UInt64 CODEC(Delta(8), ZSTD(1)),
+ \`Sum\` Float64 CODEC(ZSTD(1)),
+ \`ValueAtQuantiles.Quantile\` Array(Float64) CODEC(ZSTD(1)),
+ \`ValueAtQuantiles.Value\` Array(Float64) CODEC(ZSTD(1)),
+ \`Flags\` UInt32 CODEC(ZSTD(1)),
+ INDEX idx_res_attr_key mapKeys(ResourceAttributes) TYPE bloom_filter(0.01) GRANULARITY 1,
+ INDEX idx_res_attr_value mapValues(ResourceAttributes) TYPE bloom_filter(0.01) GRANULARITY 1,
+ INDEX idx_scope_attr_key mapKeys(ScopeAttributes) TYPE bloom_filter(0.01) GRANULARITY 1,
+ INDEX idx_scope_attr_value mapValues(ScopeAttributes) TYPE bloom_filter(0.01) GRANULARITY 1,
+ INDEX idx_attr_key mapKeys(Attributes) TYPE bloom_filter(0.01) GRANULARITY 1,
+ INDEX idx_attr_value mapValues(Attributes) TYPE bloom_filter(0.01) GRANULARITY 1
+)
+ENGINE = MergeTree
+PARTITION BY toDate(TimeUnix)
+ORDER BY (ServiceName, MetricName, Attributes, toUnixTimestamp64Nano(TimeUnix))
+TTL toDateTime(TimeUnix) + toIntervalHour(730)
+SETTINGS index_granularity = 8192, ttl_only_drop_parts = 1
+"
+
+clickhouse-client --database="${CLICKHOUSE_DATABASE}" --query "
+CREATE TABLE IF NOT EXISTS otel_metrics_exponential_histogram
+(
+ \`ResourceAttributes\` Map(LowCardinality(String), String) CODEC(ZSTD(1)),
+ \`ResourceSchemaUrl\` String CODEC(ZSTD(1)),
+ \`ScopeName\` String CODEC(ZSTD(1)),
+ \`ScopeVersion\` String CODEC(ZSTD(1)),
+ \`ScopeAttributes\` Map(LowCardinality(String), String) CODEC(ZSTD(1)),
+ \`ScopeDroppedAttrCount\` UInt32 CODEC(ZSTD(1)),
+ \`ScopeSchemaUrl\` String CODEC(ZSTD(1)),
+ \`ServiceName\` LowCardinality(String) CODEC(ZSTD(1)),
+ \`MetricName\` String CODEC(ZSTD(1)),
+ \`MetricDescription\` String CODEC(ZSTD(1)),
+ \`MetricUnit\` String CODEC(ZSTD(1)),
+ \`Attributes\` Map(LowCardinality(String), String) CODEC(ZSTD(1)),
+ \`StartTimeUnix\` DateTime64(9) CODEC(Delta(8), ZSTD(1)),
+ \`TimeUnix\` DateTime64(9) CODEC(Delta(8), ZSTD(1)),
+ \`Count\` UInt64 CODEC(Delta(8), ZSTD(1)),
+ \`Sum\` Float64 CODEC(ZSTD(1)),
+ \`Scale\` Int32 CODEC(ZSTD(1)),
+ \`ZeroCount\` UInt64 CODEC(ZSTD(1)),
+ \`PositiveOffset\` Int32 CODEC(ZSTD(1)),
+ \`PositiveBucketCounts\` Array(UInt64) CODEC(ZSTD(1)),
+ \`NegativeOffset\` Int32 CODEC(ZSTD(1)),
+ \`NegativeBucketCounts\` Array(UInt64) CODEC(ZSTD(1)),
+ \`Exemplars.FilteredAttributes\` Array(Map(LowCardinality(String), String)) CODEC(ZSTD(1)),
+ \`Exemplars.TimeUnix\` Array(DateTime64(9)) CODEC(ZSTD(1)),
+ \`Exemplars.Value\` Array(Float64) CODEC(ZSTD(1)),
+ \`Exemplars.SpanId\` Array(String) CODEC(ZSTD(1)),
+ \`Exemplars.TraceId\` Array(String) CODEC(ZSTD(1)),
+ \`Flags\` UInt32 CODEC(ZSTD(1)),
+ \`Min\` Float64 CODEC(ZSTD(1)),
+ \`Max\` Float64 CODEC(ZSTD(1)),
+ \`AggregationTemporality\` Int32 CODEC(ZSTD(1)),
+ INDEX idx_res_attr_key mapKeys(ResourceAttributes) TYPE bloom_filter(0.01) GRANULARITY 1,
+ INDEX idx_res_attr_value mapValues(ResourceAttributes) TYPE bloom_filter(0.01) GRANULARITY 1,
+ INDEX idx_scope_attr_key mapKeys(ScopeAttributes) TYPE bloom_filter(0.01) GRANULARITY 1,
+ INDEX idx_scope_attr_value mapValues(ScopeAttributes) TYPE bloom_filter(0.01) GRANULARITY 1,
+ INDEX idx_attr_key mapKeys(Attributes) TYPE bloom_filter(0.01) GRANULARITY 1,
+ INDEX idx_attr_value mapValues(Attributes) TYPE bloom_filter(0.01) GRANULARITY 1
+)
+ENGINE = MergeTree
+PARTITION BY toDate(TimeUnix)
+ORDER BY (ServiceName, MetricName, Attributes, toUnixTimestamp64Nano(TimeUnix))
+TTL toDateTime(TimeUnix) + toIntervalHour(730)
+SETTINGS index_granularity = 8192, ttl_only_drop_parts = 1
+"
+
+clickhouse-client --database="${CLICKHOUSE_DATABASE}" --query "
+CREATE TABLE IF NOT EXISTS otel_traces_trace_id_ts
+(
+ \`TraceId\` String CODEC(ZSTD(1)),
+ \`Start\` DateTime CODEC(Delta(4), ZSTD(1)),
+ \`End\` DateTime CODEC(Delta(4), ZSTD(1)),
+ INDEX idx_trace_id TraceId TYPE bloom_filter(0.01) GRANULARITY 1
+)
+ENGINE = MergeTree
+PARTITION BY toDate(Start)
+ORDER BY (TraceId, Start)
+TTL toDateTime(Start) + toIntervalHour(730)
+SETTINGS index_granularity = 8192, ttl_only_drop_parts = 1
+"
+
+clickhouse-client --database="${CLICKHOUSE_DATABASE}" --query "
+CREATE MATERIALIZED VIEW IF NOT EXISTS otel_traces_trace_id_ts_mv TO otel_traces_trace_id_ts
+(
+ \`TraceId\` String,
+ \`Start\` DateTime64(9),
+ \`End\` DateTime64(9)
+)
+AS SELECT
+ TraceId,
+ min(Timestamp) AS Start,
+ max(Timestamp) AS End
+FROM otel_traces
+WHERE TraceId != ''
+GROUP BY TraceId
+"
+
+echo "✅ All 9 OTEL tables created successfully"
+echo "===================================================================="
\ No newline at end of file
diff --git a/apps/openlit/assets/otel-collector-config.yaml b/apps/openlit/assets/otel-collector-config.yaml
new file mode 100644
index 0000000..66c2676
--- /dev/null
+++ b/apps/openlit/assets/otel-collector-config.yaml
@@ -0,0 +1,54 @@
+file_format: '1.0'
+
+receivers:
+ otlp:
+ protocols:
+ grpc:
+ endpoint: 0.0.0.0:4317
+ http:
+ endpoint: 0.0.0.0:4318
+
+processors:
+ batch:
+ memory_limiter:
+ # 80% of maximum memory up to 2G
+ limit_mib: 1500
+ # 25% of limit up to 2G
+ spike_limit_mib: 512
+ check_interval: 5s
+
+exporters:
+ clickhouse:
+ endpoint: tcp://${env:INIT_DB_HOST}:9000?dial_timeout=10s
+ database: ${env:INIT_DB_DATABASE}
+ username: ${env:INIT_DB_USERNAME}
+ password: ${env:INIT_DB_PASSWORD}
+ ttl: 730h
+ logs_table_name: otel_logs
+ traces_table_name: otel_traces
+ # Metrics use separate tables by type: otel_metrics_gauge, otel_metrics_sum,
+ # otel_metrics_histogram, otel_metrics_summary, otel_metrics_exponential_histogram
+ timeout: 5s
+ retry_on_failure:
+ enabled: true
+ initial_interval: 5s
+ max_interval: 30s
+ max_elapsed_time: 300s
+
+service:
+ pipelines:
+ logs:
+ receivers: [otlp]
+ processors: [batch]
+ exporters: [clickhouse]
+ traces:
+ receivers: [otlp]
+ processors: [memory_limiter, batch]
+ exporters: [clickhouse]
+ metrics:
+ receivers: [otlp]
+ processors: [memory_limiter, batch]
+ exporters: [clickhouse]
+ # telemetry:
+ # metrics:
+ # address: localhost:8888
diff --git a/apps/openlit/docker-compose.yaml b/apps/openlit/docker-compose.yaml
index 7dd5a68..9bfc12a 100644
--- a/apps/openlit/docker-compose.yaml
+++ b/apps/openlit/docker-compose.yaml
@@ -23,6 +23,8 @@ services:
- CLICKHOUSE_ALWAYS_RUN_INITDB_SCRIPTS=true
volumes:
- clickhouse_data:/var/lib/clickhouse
+ - ./assets/clickhouse-config.xml:/etc/clickhouse-server/config.d/custom-config.xml:ro
+ - ./assets/clickhouse-init.sh:/docker-entrypoint-initdb.d/init.sh:ro
ports:
- '${CLICKHOUSE_HTTP_PORT_OVERRIDE:-8123}:8123'
- '${CLICKHOUSE_NATIVE_PORT_OVERRIDE:-9000}:9000'
@@ -77,6 +79,7 @@ services:
condition: service_healthy
volumes:
- openlit_data:/app/client/data
+ - ./assets/otel-collector-config.yaml:/etc/otel/otel-collector-config.yaml:ro
healthcheck:
test: [CMD, wget, --quiet, --tries=1, --spider, 'http://localhost:${OPENLIT_INTERNAL_PORT:-3000}/health']
interval: 30s
diff --git a/builds/multica/.env.example b/builds/multica/.env.example
new file mode 100644
index 0000000..3a915b6
--- /dev/null
+++ b/builds/multica/.env.example
@@ -0,0 +1,55 @@
+# Source build configuration
+MULTICA_VERSION=v0.1.32
+MULTICA_PGVECTOR_VERSION=pg17
+
+# Ports
+MULTICA_BACKEND_PORT_OVERRIDE=8080
+MULTICA_FRONTEND_PORT_OVERRIDE=3000
+
+# PostgreSQL
+MULTICA_POSTGRES_DB=multica
+MULTICA_POSTGRES_USER=multica
+MULTICA_POSTGRES_PASSWORD=multica
+
+# Authentication & Security (CHANGEME: update JWT_SECRET for production)
+MULTICA_JWT_SECRET=change-me-in-production
+
+# Frontend origin (used by backend for CORS and cookie settings)
+MULTICA_FRONTEND_ORIGIN=http://localhost:3000
+MULTICA_APP_URL=http://localhost:3000
+MULTICA_CORS_ALLOWED_ORIGINS=
+MULTICA_COOKIE_DOMAIN=
+
+# Email via Resend (optional)
+MULTICA_RESEND_API_KEY=
+MULTICA_RESEND_FROM_EMAIL=noreply@multica.ai
+
+# Google OAuth (optional)
+MULTICA_GOOGLE_CLIENT_ID=
+MULTICA_GOOGLE_CLIENT_SECRET=
+MULTICA_GOOGLE_REDIRECT_URI=http://localhost:3000/auth/callback
+
+# Resources - PostgreSQL
+MULTICA_POSTGRES_CPU_LIMIT=1.00
+MULTICA_POSTGRES_MEMORY_LIMIT=1G
+MULTICA_POSTGRES_CPU_RESERVATION=0.25
+MULTICA_POSTGRES_MEMORY_RESERVATION=256M
+
+# Resources - Backend
+MULTICA_BACKEND_CPU_LIMIT=2.00
+MULTICA_BACKEND_MEMORY_LIMIT=2G
+MULTICA_BACKEND_CPU_RESERVATION=0.50
+MULTICA_BACKEND_MEMORY_RESERVATION=512M
+
+# Resources - Frontend
+MULTICA_FRONTEND_CPU_LIMIT=1.00
+MULTICA_FRONTEND_MEMORY_LIMIT=1G
+MULTICA_FRONTEND_CPU_RESERVATION=0.25
+MULTICA_FRONTEND_MEMORY_RESERVATION=256M
+
+# Logging
+MULTICA_LOG_MAX_SIZE=100m
+MULTICA_LOG_MAX_FILE=3
+
+# Timezone
+TZ=UTC
diff --git a/builds/multica/README.md b/builds/multica/README.md
new file mode 100644
index 0000000..9e79cbf
--- /dev/null
+++ b/builds/multica/README.md
@@ -0,0 +1,77 @@
+# Multica
+
+[English](./README.md) | [中文](./README.zh.md)
+
+Multica is an open-source managed agents platform that turns coding agents into real teammates. Assign tasks, track progress, and compound reusable skills — works with Claude Code, Codex, OpenClaw, and OpenCode. This Compose setup builds the Go backend and Next.js frontend from source, starts PostgreSQL with pgvector, and exposes both services.
+
+## Services
+
+- **multica-backend**: Go backend (Chi router, sqlc, gorilla/websocket) with auto-migration on startup
+- **multica-frontend**: Next.js 16 web application (App Router, standalone output)
+- **multica-postgres**: PostgreSQL 17 with pgvector extension
+
+## Quick Start
+
+1. Copy the example environment file:
+
+ ```bash
+ cp .env.example .env
+ ```
+
+2. Edit `.env` and change `MULTICA_JWT_SECRET` to a secure random value:
+
+ ```bash
+ MULTICA_JWT_SECRET=$(openssl rand -base64 32)
+ ```
+
+3. Start the stack (first run builds images from source — this takes several minutes):
+
+ ```bash
+ docker compose up -d
+ ```
+
+4. Open Multica:
+
+ - Frontend:
+ - Backend API:
+
+## Default Ports
+
+| Service | Port | Description |
+| -------- | ---- | ---------------------- |
+| Frontend | 3000 | Web UI |
+| Backend | 8080 | REST API and WebSocket |
+| Postgres | 5432 | Internal only |
+
+## Important Environment Variables
+
+| Variable | Description | Default |
+| -------------------------------- | ------------------------------------------ | ------------------------- |
+| `MULTICA_VERSION` | Git ref used for source builds | `v0.1.32` |
+| `MULTICA_BACKEND_PORT_OVERRIDE` | Host port for the backend API | `8080` |
+| `MULTICA_FRONTEND_PORT_OVERRIDE` | Host port for the web UI | `3000` |
+| `MULTICA_JWT_SECRET` | JWT signing secret (change for production) | `change-me-in-production` |
+| `MULTICA_POSTGRES_PASSWORD` | PostgreSQL password | `multica` |
+| `MULTICA_FRONTEND_ORIGIN` | Frontend URL for CORS and cookies | `http://localhost:3000` |
+| `MULTICA_GOOGLE_CLIENT_ID` | Google OAuth client ID (optional) | - |
+| `MULTICA_GOOGLE_CLIENT_SECRET` | Google OAuth client secret (optional) | - |
+| `MULTICA_RESEND_API_KEY` | Resend API key for email (optional) | - |
+| `TZ` | Container timezone | `UTC` |
+
+## Storage
+
+| Volume | Description |
+| ---------------- | --------------- |
+| `multica_pgdata` | PostgreSQL data |
+
+## Security Notes
+
+- Always change `MULTICA_JWT_SECRET` before exposing the service.
+- Change `MULTICA_POSTGRES_PASSWORD` for production deployments.
+- Google OAuth and email (Resend) are optional; the platform works without them.
+- The first build downloads the full Multica repository from GitHub and builds Docker images, so it requires internet access and may take several minutes.
+
+## References
+
+- [Multica Repository](https://github.com/multica-ai/multica)
+- [Self-Hosting Guide](https://github.com/multica-ai/multica/blob/main/SELF_HOSTING.md)
diff --git a/builds/multica/README.zh.md b/builds/multica/README.zh.md
new file mode 100644
index 0000000..2d1c407
--- /dev/null
+++ b/builds/multica/README.zh.md
@@ -0,0 +1,77 @@
+# Multica
+
+[English](./README.md) | [中文](./README.zh.md)
+
+Multica 是一个开源的托管 Agent 平台,能将编码 Agent 变成真正的团队成员。分配任务、跟踪进度、积累可复用技能——支持 Claude Code、Codex、OpenClaw 和 OpenCode。此 Compose 配置从源码构建 Go 后端和 Next.js 前端,启动带有 pgvector 扩展的 PostgreSQL,并暴露两个服务。
+
+## 服务
+
+- **multica-backend**:Go 后端(Chi 路由、sqlc、gorilla/websocket),启动时自动执行数据库迁移
+- **multica-frontend**:Next.js 16 Web 应用(App Router,standalone 输出)
+- **multica-postgres**:PostgreSQL 17,包含 pgvector 扩展
+
+## 快速开始
+
+1. 复制环境变量示例文件:
+
+ ```bash
+ cp .env.example .env
+ ```
+
+2. 编辑 `.env`,将 `MULTICA_JWT_SECRET` 修改为安全的随机值:
+
+ ```bash
+ MULTICA_JWT_SECRET=$(openssl rand -base64 32)
+ ```
+
+3. 启动服务(首次运行会从源码构建镜像,需要几分钟):
+
+ ```bash
+ docker compose up -d
+ ```
+
+4. 打开 Multica:
+
+ - 前端界面:
+ - 后端 API:
+
+## 默认端口
+
+| 服务 | 端口 | 说明 |
+| -------- | ---- | --------------------- |
+| Frontend | 3000 | Web 界面 |
+| Backend | 8080 | REST API 和 WebSocket |
+| Postgres | 5432 | 仅内部访问 |
+
+## 关键环境变量
+
+| 变量 | 说明 | 默认值 |
+| -------------------------------- | ---------------------------------- | ------------------------- |
+| `MULTICA_VERSION` | 用于源码构建的 Git 引用 | `v0.1.32` |
+| `MULTICA_BACKEND_PORT_OVERRIDE` | 后端 API 对外端口 | `8080` |
+| `MULTICA_FRONTEND_PORT_OVERRIDE` | Web 界面对外端口 | `3000` |
+| `MULTICA_JWT_SECRET` | JWT 签名密钥(生产环境必须修改) | `change-me-in-production` |
+| `MULTICA_POSTGRES_PASSWORD` | PostgreSQL 密码 | `multica` |
+| `MULTICA_FRONTEND_ORIGIN` | 前端 URL,用于 CORS 和 Cookie 设置 | `http://localhost:3000` |
+| `MULTICA_GOOGLE_CLIENT_ID` | Google OAuth 客户端 ID(可选) | - |
+| `MULTICA_GOOGLE_CLIENT_SECRET` | Google OAuth 客户端密钥(可选) | - |
+| `MULTICA_RESEND_API_KEY` | Resend 邮件服务的 API Key(可选) | - |
+| `TZ` | 容器时区 | `UTC` |
+
+## 存储
+
+| 卷 | 说明 |
+| ---------------- | --------------- |
+| `multica_pgdata` | PostgreSQL 数据 |
+
+## 安全说明
+
+- 在对外暴露服务前,务必修改 `MULTICA_JWT_SECRET`。
+- 生产环境部署时请修改 `MULTICA_POSTGRES_PASSWORD`。
+- Google OAuth 和邮件服务(Resend)均为可选配置,平台在没有它们的情况下也能正常运行。
+- 首次构建需要从 GitHub 下载完整的 Multica 仓库并构建 Docker 镜像,因此需要联网,可能需要几分钟。
+
+## 参考资料
+
+- [Multica 仓库](https://github.com/multica-ai/multica)
+- [自托管指南](https://github.com/multica-ai/multica/blob/main/SELF_HOSTING.md)
diff --git a/builds/multica/docker-compose.yaml b/builds/multica/docker-compose.yaml
new file mode 100644
index 0000000..1fe2ecf
--- /dev/null
+++ b/builds/multica/docker-compose.yaml
@@ -0,0 +1,109 @@
+x-defaults: &defaults
+ restart: unless-stopped
+ logging:
+ driver: json-file
+ options:
+ max-size: ${MULTICA_LOG_MAX_SIZE:-100m}
+ max-file: '${MULTICA_LOG_MAX_FILE:-3}'
+
+services:
+ multica-postgres:
+ <<: *defaults
+ image: ${GLOBAL_REGISTRY:-}pgvector/pgvector:${MULTICA_PGVECTOR_VERSION:-pg17}
+ environment:
+ - TZ=${TZ:-UTC}
+ - POSTGRES_DB=${MULTICA_POSTGRES_DB:-multica}
+ - POSTGRES_USER=${MULTICA_POSTGRES_USER:-multica}
+ - POSTGRES_PASSWORD=${MULTICA_POSTGRES_PASSWORD:-multica}
+ volumes:
+ - multica_pgdata:/var/lib/postgresql/data
+ healthcheck:
+ test: [CMD-SHELL, pg_isready -U $$POSTGRES_USER -d $$POSTGRES_DB]
+ interval: 10s
+ timeout: 5s
+ retries: 5
+ start_period: 10s
+ deploy:
+ resources:
+ limits:
+ cpus: ${MULTICA_POSTGRES_CPU_LIMIT:-1.00}
+ memory: ${MULTICA_POSTGRES_MEMORY_LIMIT:-1G}
+ reservations:
+ cpus: ${MULTICA_POSTGRES_CPU_RESERVATION:-0.25}
+ memory: ${MULTICA_POSTGRES_MEMORY_RESERVATION:-256M}
+
+ multica-backend:
+ <<: *defaults
+ build:
+ context: https://github.com/multica-ai/multica.git#${MULTICA_VERSION:-v0.1.32}
+ dockerfile: Dockerfile
+ depends_on:
+ multica-postgres:
+ condition: service_healthy
+ ports:
+ - '${MULTICA_BACKEND_PORT_OVERRIDE:-8080}:8080'
+ environment:
+ - TZ=${TZ:-UTC}
+ - DATABASE_URL=postgres://${MULTICA_POSTGRES_USER:-multica}:${MULTICA_POSTGRES_PASSWORD:-multica}@multica-postgres:5432/${MULTICA_POSTGRES_DB:-multica}?sslmode=disable
+ - PORT=8080
+ - JWT_SECRET=${MULTICA_JWT_SECRET:-change-me-in-production}
+ - FRONTEND_ORIGIN=${MULTICA_FRONTEND_ORIGIN:-http://localhost:3000}
+ - CORS_ALLOWED_ORIGINS=${MULTICA_CORS_ALLOWED_ORIGINS:-}
+ - MULTICA_APP_URL=${MULTICA_APP_URL:-http://localhost:3000}
+ - RESEND_API_KEY=${MULTICA_RESEND_API_KEY:-}
+ - RESEND_FROM_EMAIL=${MULTICA_RESEND_FROM_EMAIL:-noreply@multica.ai}
+ - GOOGLE_CLIENT_ID=${MULTICA_GOOGLE_CLIENT_ID:-}
+ - GOOGLE_CLIENT_SECRET=${MULTICA_GOOGLE_CLIENT_SECRET:-}
+ - GOOGLE_REDIRECT_URI=${MULTICA_GOOGLE_REDIRECT_URI:-http://localhost:3000/auth/callback}
+ - COOKIE_DOMAIN=${MULTICA_COOKIE_DOMAIN:-}
+ healthcheck:
+ test:
+ - CMD-SHELL
+ - wget --no-verbose --tries=1 --spider http://127.0.0.1:8080/ || exit 1
+ interval: 30s
+ timeout: 10s
+ retries: 5
+ start_period: 60s
+ deploy:
+ resources:
+ limits:
+ cpus: ${MULTICA_BACKEND_CPU_LIMIT:-2.00}
+ memory: ${MULTICA_BACKEND_MEMORY_LIMIT:-2G}
+ reservations:
+ cpus: ${MULTICA_BACKEND_CPU_RESERVATION:-0.50}
+ memory: ${MULTICA_BACKEND_MEMORY_RESERVATION:-512M}
+
+ multica-frontend:
+ <<: *defaults
+ build:
+ context: https://github.com/multica-ai/multica.git#${MULTICA_VERSION:-v0.1.32}
+ dockerfile: Dockerfile.web
+ args:
+ REMOTE_API_URL: http://multica-backend:8080
+ NEXT_PUBLIC_GOOGLE_CLIENT_ID: ${MULTICA_GOOGLE_CLIENT_ID:-}
+ depends_on:
+ - multica-backend
+ ports:
+ - '${MULTICA_FRONTEND_PORT_OVERRIDE:-3000}:3000'
+ environment:
+ - TZ=${TZ:-UTC}
+ - HOSTNAME=0.0.0.0
+ healthcheck:
+ test:
+ - CMD-SHELL
+ - wget --no-verbose --tries=1 --spider http://127.0.0.1:3000/ || exit 1
+ interval: 30s
+ timeout: 10s
+ retries: 5
+ start_period: 60s
+ deploy:
+ resources:
+ limits:
+ cpus: ${MULTICA_FRONTEND_CPU_LIMIT:-1.00}
+ memory: ${MULTICA_FRONTEND_MEMORY_LIMIT:-1G}
+ reservations:
+ cpus: ${MULTICA_FRONTEND_CPU_RESERVATION:-0.25}
+ memory: ${MULTICA_FRONTEND_MEMORY_RESERVATION:-256M}
+
+volumes:
+ multica_pgdata: