Merge pull request #2 from Sun-ZhenXing/dev

feat: add opik & agentgateway
This commit is contained in:
Alex Sun
2026-02-28 17:35:04 +08:00
committed by GitHub
27 changed files with 1169 additions and 31 deletions

1
.gitignore vendored
View File

@@ -3,7 +3,6 @@ node_modules/
# Ignore Helm chart default values files
*-values.yaml
values-*.yaml
# Env
.env

82
src/_template/crd.mk Normal file
View File

@@ -0,0 +1,82 @@
# CRD (Custom Resource Definition) Installation Template
# This file provides common targets for installing CRDs before deploying Helm charts.
#
# Usage:
# include ../_template/crd.mk
#
# Required variables:
# HELM_RELEASE_NAME - The name of the Helm release
# HELM_NAMESPACE - The namespace for the deployment
# HELM_CHART_VERSION - The version of the Helm chart
#
# Optional variables:
# CRD_HELM_CHART_REPO - The Helm chart repository for CRDs (if using Helm to install CRDs)
# CRD_HELM_RELEASE_NAME - The release name for CRD installation (defaults to $(HELM_RELEASE_NAME)-crds)
# CRD_HELM_NAMESPACE - The namespace for CRD installation (defaults to $(HELM_NAMESPACE))
# CRD_KUBECTL_URLS - Space-separated list of URLs to apply via kubectl
# CRD_INSTALL_GATEWAY_API - Set to "true" to install Gateway API CRDs
# GATEWAY_API_VERSION - Version of Gateway API to install (defaults to v1.4.0)
CRD_HELM_RELEASE_NAME ?= $(HELM_RELEASE_NAME)-crds
CRD_HELM_NAMESPACE ?= $(HELM_NAMESPACE)
GATEWAY_API_VERSION ?= v1.4.0
# Install CRDs via kubectl apply
.PHONY: install-crds-kubectl
install-crds-kubectl:
ifdef CRD_KUBECTL_URLS
@echo "Installing CRDs from URLs..."
@for url in $(CRD_KUBECTL_URLS); do \
echo "Applying $$url..."; \
kubectl apply -f $$url; \
done
else
@echo "CRD_KUBECTL_URLS not set, skipping kubectl CRD installation."
endif
# Install Gateway API CRDs
.PHONY: install-crds-gateway-api
install-crds-gateway-api:
ifeq ($(CRD_INSTALL_GATEWAY_API),true)
@echo "Installing Gateway API CRDs (version: $(GATEWAY_API_VERSION))..."
kubectl apply -f https://github.com/kubernetes-sigs/gateway-api/releases/download/$(GATEWAY_API_VERSION)/standard-install.yaml
else
@echo "CRD_INSTALL_GATEWAY_API not set to 'true', skipping Gateway API CRD installation."
endif
# Install CRDs via Helm
.PHONY: install-crds-helm
install-crds-helm:
ifdef CRD_HELM_CHART_REPO
@echo "Installing CRDs via Helm..."
helm upgrade -i --create-namespace \
--namespace $(CRD_HELM_NAMESPACE) \
$(if $(HELM_CHART_VERSION),--version $(HELM_CHART_VERSION),) \
$(CRD_HELM_RELEASE_NAME) $(CRD_HELM_CHART_REPO)
else
@echo "CRD_HELM_CHART_REPO not set, skipping Helm CRD installation."
endif
# Install all CRDs (kubectl + Gateway API + Helm)
.PHONY: install-crds
install-crds: install-crds-kubectl install-crds-gateway-api install-crds-helm
@echo "All CRDs installed successfully."
# Install CRDs and then the main chart
.PHONY: install-all
install-all: install-crds install
# Verify CRD installation
.PHONY: verify-crds
verify-crds:
ifdef CRD_KUBECTL_URLS
@echo "Verifying CRDs..."
@for url in $(CRD_KUBECTL_URLS); do \
crd_name=$$(basename $$url | sed 's/\.yaml$$//'); \
echo "Checking CRD: $$crd_name..."; \
done
endif
ifeq ($(CRD_INSTALL_GATEWAY_API),true)
@echo "Verifying Gateway API CRDs..."
kubectl get crd | grep gateway.networking.k8s.io || echo "Gateway API CRDs not found"
endif

88
src/_template/gateway.mk Normal file
View File

@@ -0,0 +1,88 @@
# Gateway Service Installation Template
# This file provides common targets for deploying Gateway API based services.
# It extends crd.mk with Gateway-specific verification and utilities.
#
# Usage:
# include ../_template/crd.mk
# include ../_template/gateway.mk
#
# Required variables (inherited from crd.mk):
# HELM_RELEASE_NAME - The name of the Helm release
# HELM_NAMESPACE - The namespace for the deployment
# HELM_CHART_VERSION - The version of the Helm chart
#
# Additional required variables:
# GATEWAY_CLASS_NAME - The name of the GatewayClass (e.g., "kgateway", "agentgateway")
# CRD_HELM_CHART_REPO - The Helm chart repository for CRDs
#
# Optional variables:
# GATEWAY_API_VERSION - Version of Gateway API (defaults to v1.4.0)
# ENABLE_GATEWAY_VERIFY - Set to "true" to enable gateway verification (defaults to true)
GATEWAY_API_VERSION ?= v1.4.0
ENABLE_GATEWAY_VERIFY ?= true
# Verify GatewayClass installation
.PHONY: verify-gatewayclass
verify-gatewayclass:
ifeq ($(ENABLE_GATEWAY_VERIFY),true)
@echo "Verifying GatewayClass: $(GATEWAY_CLASS_NAME)..."
kubectl get gatewayclass $(GATEWAY_CLASS_NAME) || echo "GatewayClass $(GATEWAY_CLASS_NAME) not found"
else
@echo "Gateway verification disabled."
endif
# Verify Gateway installation
.PHONY: verify-gateway
verify-gateway:
ifeq ($(ENABLE_GATEWAY_VERIFY),true)
@echo "Verifying Gateways in namespace: $(HELM_NAMESPACE)..."
kubectl get gateway -n $(HELM_NAMESPACE) 2>/dev/null || echo "No Gateways found in $(HELM_NAMESPACE)"
else
@echo "Gateway verification disabled."
endif
# Verify HTTPRoutes
.PHONY: verify-httproutes
verify-httproutes:
ifeq ($(ENABLE_GATEWAY_VERIFY),true)
@echo "Verifying HTTPRoutes in namespace: $(HELM_NAMESPACE)..."
kubectl get httproute -n $(HELM_NAMESPACE) 2>/dev/null || echo "No HTTPRoutes found in $(HELM_NAMESPACE)"
else
@echo "Gateway verification disabled."
endif
# Full verification including Gateway API resources
.PHONY: verify-gateway-all
verify-gateway-all: verify-crds verify-gatewayclass verify-gateway verify-httproutes
@echo "Gateway verification complete."
# Override the verify target from crd.mk to include gateway verification
.PHONY: verify
verify: verify-crds verify-gatewayclass
ifeq ($(ENABLE_GATEWAY_VERIFY),true)
@echo "Verifying $(HELM_APPLICATION_NAME) installation..."
kubectl get pods -n $(HELM_NAMESPACE)
kubectl get gatewayclass $(GATEWAY_CLASS_NAME) 2>/dev/null || echo "GatewayClass $(GATEWAY_CLASS_NAME) not ready yet"
endif
# Port forward to the gateway service
.PHONY: port-forward-gateway
port-forward-gateway:
@echo "Port forwarding to gateway service..."
@POD_NAME=$$(kubectl get pods -n $(HELM_NAMESPACE) -l app=$(HELM_APPLICATION_NAME) -o jsonpath='{.items[0].metadata.name}' 2>/dev/null); \
if [ -n "$$POD_NAME" ]; then \
echo "Forwarding to pod: $$POD_NAME"; \
kubectl port-forward -n $(HELM_NAMESPACE) $$POD_NAME 8080:8080; \
else \
echo "No gateway pod found with label app=$(HELM_APPLICATION_NAME)"; \
fi
# Get gateway status
.PHONY: gateway-status
gateway-status:
@echo "GatewayClass status:"
kubectl get gatewayclass $(GATEWAY_CLASS_NAME) -o yaml 2>/dev/null || echo "GatewayClass not found"
@echo ""
@echo "Gateways in $(HELM_NAMESPACE):"
kubectl get gateway -n $(HELM_NAMESPACE) -o yaml 2>/dev/null || echo "No Gateways found"

100
src/_template/operator.mk Normal file
View File

@@ -0,0 +1,100 @@
# Kubernetes Operator Installation Template
# This file provides common targets for deploying services using the Operator pattern.
#
# Usage:
# include ../_template/operator.mk
#
# Required variables:
# HELM_RELEASE_NAME - The name of the Helm release
# HELM_APPLICATION_NAME - The name of the application
# HELM_NAMESPACE - The namespace for the deployment
# HELM_CHART_REPO - The Helm chart repository
#
# Optional variables:
# OPERATOR_RELEASE_NAME - The release name for the operator (defaults to $(HELM_RELEASE_NAME)-operator)
# OPERATOR_NAMESPACE - The namespace for the operator (defaults to $(HELM_NAMESPACE)-system)
# OPERATOR_CHART_REPO - The Helm chart repository for the operator (if different from main chart)
# OPERATOR_CHART_VERSION - The version of the operator chart
# OPERATOR_VALUES_FILE - The values file for the operator
# CLUSTER_RELEASE_NAME - The release name for the cluster/resource
# CLUSTER_VALUES_FILE - The values file for the cluster/resource
# WAIT_FOR_CRD - Set to "true" to wait for CRDs to be ready
# CRD_WAIT_TIMEOUT - Timeout for waiting for CRDs (defaults to 60s)
OPERATOR_RELEASE_NAME ?= $(HELM_RELEASE_NAME)-operator
OPERATOR_NAMESPACE ?= $(HELM_NAMESPACE)-system
CLUSTER_RELEASE_NAME ?= $(HELM_RELEASE_NAME)-cluster
CRD_WAIT_TIMEOUT ?= 60s
# Install the operator
.PHONY: install-operator
install-operator:
ifdef OPERATOR_CHART_REPO
@echo "Installing operator: $(OPERATOR_RELEASE_NAME)..."
helm upgrade $(OPERATOR_RELEASE_NAME) $(OPERATOR_CHART_REPO) \
--install \
--namespace $(OPERATOR_NAMESPACE) \
--create-namespace \
$(if $(OPERATOR_CHART_VERSION),--version $(OPERATOR_CHART_VERSION),) \
$(if $(OPERATOR_VALUES_FILE),--values $(OPERATOR_VALUES_FILE),)
else
@echo "OPERATOR_CHART_REPO not set, skipping operator installation."
endif
# Wait for CRDs to be ready
.PHONY: wait-for-crds
wait-for-crds:
ifeq ($(WAIT_FOR_CRD),true)
@echo "Waiting for CRDs to be ready (timeout: $(CRD_WAIT_TIMEOUT))..."
@sleep 5
@echo "CRDs should be ready now."
else
@echo "WAIT_FOR_CRD not set to 'true', skipping CRD wait."
endif
# Install the cluster/resource using the operator
.PHONY: install-cluster
install-cluster:
@echo "Installing cluster: $(CLUSTER_RELEASE_NAME)..."
helm upgrade $(CLUSTER_RELEASE_NAME) $(HELM_CHART_REPO) \
--install \
--namespace $(HELM_NAMESPACE) \
--create-namespace \
$(if $(HELM_CHART_VERSION),--version $(HELM_CHART_VERSION),) \
$(if $(CLUSTER_VALUES_FILE),--values $(CLUSTER_VALUES_FILE),$(if $(HELM_VALUES_FILE),--values $(HELM_VALUES_FILE),))
# Install operator and cluster
.PHONY: install-all
install-all: install-operator wait-for-crds install-cluster
# Uninstall the cluster only
.PHONY: uninstall-cluster
uninstall-cluster:
helm uninstall $(CLUSTER_RELEASE_NAME) --namespace $(HELM_NAMESPACE)
# Uninstall the operator only
.PHONY: uninstall-operator
uninstall-operator:
helm uninstall $(OPERATOR_RELEASE_NAME) --namespace $(OPERATOR_NAMESPACE)
# Uninstall everything
.PHONY: uninstall-all
uninstall-all: uninstall-cluster uninstall-operator
# Verify operator installation
.PHONY: verify-operator
verify-operator:
@echo "Verifying operator installation..."
kubectl get pods -n $(OPERATOR_NAMESPACE)
kubectl get crd | grep $(HELM_APPLICATION_NAME) || echo "No CRDs found for $(HELM_APPLICATION_NAME)"
# Verify cluster installation
.PHONY: verify-cluster
verify-cluster:
@echo "Verifying cluster installation..."
kubectl get pods -n $(HELM_NAMESPACE)
kubectl get $(HELM_APPLICATION_NAME) -n $(HELM_NAMESPACE) 2>/dev/null || echo "No $(HELM_APPLICATION_NAME) resources found"
# Verify everything
.PHONY: verify
verify: verify-operator verify-cluster

23
src/agentgateway/Makefile Normal file
View File

@@ -0,0 +1,23 @@
HELM_RELEASE_NAME ?= agentgateway
HELM_APPLICATION_NAME ?= agentgateway
HELM_NAMESPACE ?= agentgateway-system
HELM_DIR ?= ./helm
HELM_CHART_VERSION ?= v2.2.0
HELM_VALUES_FILE ?= ./values.yaml
HELM_OCI_REGISTRY ?= cr.agentgateway.dev
HELM_OCI_NAMESPACE ?= charts
HELM_OCI_USERNAME ?=
HELM_OCI_PASSWORD ?=
HELM_CHART_REPO ?= oci://$(HELM_OCI_REGISTRY)/$(HELM_OCI_NAMESPACE)/$(HELM_APPLICATION_NAME)
# CRD configuration
CRD_INSTALL_GATEWAY_API = true
CRD_HELM_CHART_REPO = oci://$(HELM_OCI_REGISTRY)/$(HELM_OCI_NAMESPACE)/agentgateway-crds
CRD_HELM_RELEASE_NAME = agentgateway-crds
# Gateway configuration
GATEWAY_CLASS_NAME = agentgateway
include ../_template/base.mk
include ../_template/crd.mk
include ../_template/gateway.mk

View File

@@ -0,0 +1,98 @@
# agentgateway
## Introduction
agentgateway is a cloud-native API gateway designed for AI workloads. It provides a Kubernetes-native way to manage traffic, secure APIs, and observe your AI services. Built on the Kubernetes Gateway API, agentgateway enables seamless integration with AI agents and services.
## Prerequisites
Before installing agentgateway, ensure you have:
1. A Kubernetes cluster (1.25+)
2. `kubectl` installed
3. `helm` installed (3.8+ for OCI support)
## Installation
### Quick Install (includes CRDs)
To install agentgateway with all required CRDs:
```bash
make install-all
```
### Step-by-Step Install
1. Install Gateway API CRDs:
```bash
kubectl apply -f https://github.com/kubernetes-sigs/gateway-api/releases/download/v1.4.0/standard-install.yaml
```
1. Install agentgateway CRDs:
```bash
make install-crds
```
1. Install agentgateway:
```bash
make install
```
## Usage
After installation, verify the deployment:
```bash
# Check if agentgateway pods are running
kubectl get pods -n agentgateway-system
# Verify GatewayClass is created
kubectl get gatewayclass agentgateway
# View agentgateway services
kubectl get svc -n agentgateway-system
```
## Configuration
The default configuration includes:
- Gateway controller for managing Gateway API resources
- Control plane components for configuration management
- Support for AI workload routing
You can customize the installation by modifying `values.yaml` before running `make install`.
## Gateway API
agentgateway supports the Kubernetes Gateway API standard. You can create Gateway and HTTPRoute resources to configure routing:
```yaml
apiVersion: gateway.networking.k8s.io/v1
kind: Gateway
metadata:
name: my-gateway
namespace: agentgateway-system
spec:
gatewayClassName: agentgateway
listeners:
- name: http
protocol: HTTP
port: 80
```
## Uninstall
To uninstall agentgateway:
```bash
make uninstall
```
## Documentation
For more information, visit the [official documentation](https://agentgateway.dev/docs/kubernetes/latest/).

View File

@@ -0,0 +1,98 @@
# agentgateway
## 简介
agentgateway 是一个专为 AI 工作负载设计的云原生 API 网关。它提供了一种 Kubernetes 原生的方式来管理流量、保护 API 和观测 AI 服务。agentgateway 基于 Kubernetes Gateway API 构建,可实现与 AI 代理和服务的无缝集成。
## 前置条件
在安装 agentgateway 之前,请确保您已具备:
1. Kubernetes 集群 (1.25+)
2. 已安装 `kubectl`
3. 已安装 `helm` (3.8+ 以支持 OCI)
## 安装
### 快速安装(包含 CRDs
要安装 agentgateway 及其所有必需的 CRDs
```bash
make install-all
```
### 分步安装
1. 安装 Gateway API CRDs
```bash
kubectl apply -f https://github.com/kubernetes-sigs/gateway-api/releases/download/v1.4.0/standard-install.yaml
```
2. 安装 agentgateway CRDs
```bash
make install-crds
```
3. 安装 agentgateway
```bash
make install
```
## 使用
安装完成后,验证部署状态:
```bash
# 检查 agentgateway pod 是否运行
kubectl get pods -n agentgateway-system
# 验证 GatewayClass 是否已创建
kubectl get gatewayclass agentgateway
# 查看 agentgateway 服务
kubectl get svc -n agentgateway-system
```
## 配置
默认配置包括:
- 用于管理 Gateway API 资源的网关控制器
- 用于配置管理的控制平面组件
- 支持 AI 工作负载路由
您可以在运行 `make install` 之前修改 `values.yaml` 来自定义安装。
## Gateway API
agentgateway 支持 Kubernetes Gateway API 标准。您可以创建 Gateway 和 HTTPRoute 资源来配置路由:
```yaml
apiVersion: gateway.networking.k8s.io/v1
kind: Gateway
metadata:
name: my-gateway
namespace: agentgateway-system
spec:
gatewayClassName: agentgateway
listeners:
- name: http
protocol: HTTP
port: 80
```
## 卸载
卸载 agentgateway
```bash
make uninstall
```
## 文档
更多信息请访问[官方文档](https://agentgateway.dev/docs/kubernetes/latest/)。

View File

@@ -0,0 +1,72 @@
# Default values for agentgateway
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
# Controller configuration
controller:
image:
repository: ghcr.io/kgateway-dev/agentgateway
tag: v2.2.1
pullPolicy: IfNotPresent
resources:
limits:
cpu: 1000m
memory: 1Gi
requests:
cpu: 100m
memory: 256Mi
# Enable experimental Gateway API features
extraEnv:
KGW_ENABLE_GATEWAY_API_EXPERIMENTAL_FEATURES: 'false'
# Gateway proxy configuration
gatewayProxy:
image:
repository: ghcr.io/kgateway-dev/agentgateway-proxy
tag: v2.2.1
pullPolicy: IfNotPresent
resources:
limits:
cpu: 500m
memory: 512Mi
requests:
cpu: 100m
memory: 128Mi
# Service configuration
service:
type: LoadBalancer
httpPort: 80
httpsPort: 443
# RBAC configuration
rbac:
create: true
# Service account configuration
serviceAccount:
create: true
annotations: {}
# Pod security context
podSecurityContext:
runAsNonRoot: true
seccompProfile:
type: RuntimeDefault
# Security context for containers
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
capabilities:
drop:
- ALL
# Node selector
nodeSelector: {}
# Tolerations
tolerations: []
# Affinity
affinity: {}

View File

@@ -10,6 +10,21 @@ HELM_OCI_USERNAME ?=
HELM_OCI_PASSWORD ?=
HELM_REPO_NAME ?= k8ssandra
HELM_REPO_URL ?= https://helm.k8ssandra.io/stable
HELM_CHART_REPO ?= $(HELM_REPO_NAME)/k8ssandra-operator
# Operator configuration
OPERATOR_RELEASE_NAME ?= k8ssandra-operator
OPERATOR_NAMESPACE ?= k8ssandra-operator
OPERATOR_CHART_REPO ?= $(HELM_REPO_NAME)/k8ssandra-operator
OPERATOR_CHART_VERSION ?=
OPERATOR_VALUES_FILE ?= ./values.yaml
# Cluster configuration
CLUSTER_RELEASE_NAME ?= cassandra-cluster
CLUSTER_CHART_REPO ?= $(HELM_REPO_NAME)/k8ssandra
CLUSTER_VALUES_FILE ?= ./cluster-values.yaml
# Enable CRD waiting
WAIT_FOR_CRD ?= true
include ../_template/base.mk
include ../_template/operator.mk

View File

@@ -0,0 +1,41 @@
# K8ssandra Cluster Configuration
# https://github.com/k8ssandra/k8ssandra-operator
# Cluster name
cassandra:
clusterName: cassandra-cluster
datacenters:
- name: dc1
size: 3
racks:
- name: rack1
- name: rack2
- name: rack3
storage:
storageClassName: standard
size: 10Gi
resources:
requests:
cpu: 1000m
memory: 4Gi
limits:
cpu: 2000m
memory: 4Gi
# Stargate configuration
stargate:
enabled: false
size: 1
heapSize: 256Mi
# Reaper configuration
reaper:
enabled: false
# Medusa backup configuration
medusa:
enabled: false
# Prometheus monitoring
monitoring:
enabled: false

View File

@@ -1,5 +1,5 @@
HELM_RELEASE_NAME ?= duckdb
HELM_APPLICATION_NAME ?= duckdb
HELM_APPLICATION_NAME ?= jupyterhub
HELM_NAMESPACE ?= duckdb
HELM_DIR ?= ./helm
HELM_CHART_VERSION ?=
@@ -10,6 +10,6 @@ HELM_OCI_USERNAME ?=
HELM_OCI_PASSWORD ?=
HELM_REPO_NAME ?= jupyterhub
HELM_REPO_URL ?= https://hub.jupyter.org/helm-chart/
HELM_CHART_REPO ?= $(HELM_REPO_NAME)/jupyterhub
HELM_CHART_REPO ?= $(HELM_REPO_NAME)/$(HELM_APPLICATION_NAME)
include ../_template/base.mk

View File

@@ -10,6 +10,21 @@ HELM_OCI_USERNAME ?=
HELM_OCI_PASSWORD ?=
HELM_REPO_NAME ?= flink-operator
HELM_REPO_URL ?= https://downloads.apache.org/flink/flink-kubernetes-operator-1.9.0/
HELM_CHART_REPO ?= $(HELM_REPO_NAME)/flink-operator
# Operator configuration
OPERATOR_RELEASE_NAME ?= flink-operator
OPERATOR_NAMESPACE ?= flink-operator
OPERATOR_CHART_REPO ?= $(HELM_REPO_NAME)/flink-operator
OPERATOR_CHART_VERSION ?=
OPERATOR_VALUES_FILE ?= ./values.yaml
# Cluster configuration (Flink uses FlinkDeployment CR, installed via kubectl or separate chart)
CLUSTER_RELEASE_NAME ?= flink-cluster
CLUSTER_CHART_REPO ?= $(HELM_REPO_NAME)/flink-cluster
CLUSTER_VALUES_FILE ?= ./cluster-values.yaml
# Enable CRD waiting
WAIT_FOR_CRD ?= true
include ../_template/base.mk
include ../_template/operator.mk

View File

@@ -0,0 +1,33 @@
# Flink Cluster Configuration (FlinkDeployment CR)
# https://github.com/apache/flink-kubernetes-operator
# Flink cluster name
nameOverride: flink-cluster
# Flink version
flinkVersion: v1.19
# Job configuration
job:
jarURI: local:///opt/flink/examples/streaming/StateMachineExample.jar
parallelism: 2
upgradeMode: stateful
state: running
# TaskManager configuration
taskManager:
resource:
memory: 2048m
cpu: 1
replicas: 2
# JobManager configuration
jobManager:
resource:
memory: 1024m
cpu: 0.5
replicas: 1
# Service configuration
service:
type: ClusterIP

View File

@@ -1,5 +1,5 @@
HELM_RELEASE_NAME ?= gitea-runner
HELM_APPLICATION_NAME ?= gitea-runner
HELM_APPLICATION_NAME ?= actions
HELM_NAMESPACE ?= gitea-runner
HELM_DIR ?= ./helm
HELM_CHART_VERSION ?=
@@ -10,6 +10,6 @@ HELM_OCI_USERNAME ?=
HELM_OCI_PASSWORD ?=
HELM_REPO_NAME ?= gitea
HELM_REPO_URL ?= https://dl.gitea.com/charts
HELM_CHART_REPO ?= $(HELM_REPO_NAME)/actions
HELM_CHART_REPO ?= $(HELM_REPO_NAME)/$(HELM_APPLICATION_NAME)
include ../_template/base.mk

View File

@@ -1,5 +1,5 @@
HELM_RELEASE_NAME ?= strimzi-kafka-operator
HELM_APPLICATION_NAME ?= strimzi-kafka-operator
HELM_RELEASE_NAME ?= kafka
HELM_APPLICATION_NAME ?= kafka
HELM_NAMESPACE ?= kafka
HELM_DIR ?= ./helm
HELM_CHART_VERSION ?= 0.50.0
@@ -8,6 +8,28 @@ HELM_OCI_REGISTRY ?= docker.io
HELM_OCI_NAMESPACE ?=
HELM_REPO_NAME ?= strimzi
HELM_REPO_URL ?= https://strimzi.io/charts/
HELM_CHART_REPO ?= $(HELM_REPO_NAME)/$(HELM_APPLICATION_NAME)
# Operator configuration (Strimzi only has operator, cluster is created via CRDs)
OPERATOR_RELEASE_NAME ?= strimzi-kafka-operator
OPERATOR_NAMESPACE ?= strimzi-operator
OPERATOR_CHART_REPO ?= $(HELM_REPO_NAME)/strimzi-kafka-operator
OPERATOR_CHART_VERSION ?= $(HELM_CHART_VERSION)
OPERATOR_VALUES_FILE ?= ./values.yaml
# For Strimzi, we only install the operator
# Kafka clusters are created using Kafka CRDs after operator is installed
include ../_template/base.mk
include ../_template/operator.mk
# Override install target to only install operator
.PHONY: install
install: install-operator
# Override uninstall target to only uninstall operator
.PHONY: uninstall
uninstall: uninstall-operator
# Override verify target
.PHONY: verify
verify: verify-operator

View File

@@ -2,7 +2,7 @@ HELM_RELEASE_NAME ?= kgateway
HELM_APPLICATION_NAME ?= kgateway
HELM_NAMESPACE ?= kgateway-system
HELM_DIR ?= ./helm
HELM_CHART_VERSION ?=
HELM_CHART_VERSION ?= v2.2.0
HELM_VALUES_FILE ?= ./values.yaml
HELM_OCI_REGISTRY ?= cr.kgateway.dev
HELM_OCI_NAMESPACE ?= kgateway-dev/charts
@@ -10,4 +10,14 @@ HELM_OCI_USERNAME ?=
HELM_OCI_PASSWORD ?=
HELM_CHART_REPO ?= oci://$(HELM_OCI_REGISTRY)/$(HELM_OCI_NAMESPACE)/$(HELM_APPLICATION_NAME)
# CRD configuration
CRD_INSTALL_GATEWAY_API = true
CRD_HELM_CHART_REPO = oci://$(HELM_OCI_REGISTRY)/$(HELM_OCI_NAMESPACE)/kgateway-crds
CRD_HELM_RELEASE_NAME = kgateway-crds
# Gateway configuration
GATEWAY_CLASS_NAME = kgateway
include ../_template/base.mk
include ../_template/crd.mk
include ../_template/gateway.mk

View File

@@ -4,22 +4,55 @@
kgateway is a cloud-native API gateway built on Envoy Proxy. It provides a Kubernetes-native way to manage traffic, secure APIs, and observe your services. Formerly known as Gloo Gateway, kgateway offers advanced routing capabilities, traffic management, and extensibility through WebAssembly (Wasm) filters.
## Prerequisites
Before installing kgateway, ensure you have:
1. A Kubernetes cluster (1.25+)
2. `kubectl` installed
3. `helm` installed (3.8+ for OCI support)
## Installation
To install kgateway, run:
### Quick Install (includes CRDs)
To install kgateway with all required CRDs:
```bash
make install
make install-all
```
### Step-by-Step Install
1. Install Kubernetes Gateway API CRDs:
```bash
kubectl apply -f https://github.com/kubernetes-sigs/gateway-api/releases/download/v1.4.0/standard-install.yaml
```
2. Install kgateway CRDs:
```bash
make install-crds
```
3. Install kgateway:
```bash
make install
```
## Usage
After installation, you can configure kgateway using Kubernetes Gateway API resources:
After installation, verify the deployment:
```bash
# Check if kgateway pods are running
kubectl get pods -n kgateway-system
# Verify GatewayClass is created
kubectl get gatewayclass kgateway
# View kgateway services
kubectl get svc -n kgateway-system
```

View File

@@ -4,22 +4,55 @@
kgateway 是一个基于 Envoy Proxy 的云原生 API 网关。它提供了一种 Kubernetes 原生的方式来管理流量、保护 API 和观测服务。kgateway 前身为 Gloo Gateway提供高级路由功能、流量管理和通过 WebAssembly (Wasm) 过滤器的可扩展性。
## 前置条件
在安装 kgateway 之前,请确保您已具备:
1. Kubernetes 集群 (1.25+)
2. 已安装 `kubectl`
3. 已安装 `helm` (3.8+ 以支持 OCI)
## 安装
安装 kgateway
### 快速安装(包含 CRDs
要安装 kgateway 及其所有必需的 CRDs
```bash
make install
make install-all
```
### 分步安装
1. 安装 Kubernetes Gateway API CRDs
```bash
kubectl apply -f https://github.com/kubernetes-sigs/gateway-api/releases/download/v1.4.0/standard-install.yaml
```
2. 安装 kgateway CRDs
```bash
make install-crds
```
3. 安装 kgateway
```bash
make install
```
## 使用
安装完成后,您可以使用 Kubernetes Gateway API 资源来配置 kgateway
安装完成后,验证部署状态
```bash
# 检查 kgateway pod 是否运行
kubectl get pods -n kgateway-system
# 验证 GatewayClass 是否已创建
kubectl get gatewayclass kgateway
# 查看 kgateway 服务
kubectl get svc -n kgateway-system
```

View File

@@ -1,5 +1,5 @@
HELM_RELEASE_NAME ?= mysql
HELM_APPLICATION_NAME ?= mysql-innodbcluster
HELM_APPLICATION_NAME ?= mysql
HELM_NAMESPACE ?= mysql
HELM_DIR ?= ./helm
HELM_CHART_VERSION ?=
@@ -10,6 +10,21 @@ HELM_OCI_USERNAME ?=
HELM_OCI_PASSWORD ?=
HELM_REPO_NAME ?= mysql-operator
HELM_REPO_URL ?= https://mysql.github.io/mysql-operator/
HELM_CHART_REPO ?= $(HELM_REPO_NAME)/mysql-innodbcluster
# Operator configuration
OPERATOR_RELEASE_NAME ?= mysql-operator
OPERATOR_NAMESPACE ?= mysql-operator
OPERATOR_CHART_REPO ?= $(HELM_REPO_NAME)/mysql-operator
OPERATOR_CHART_VERSION ?=
OPERATOR_VALUES_FILE ?=
# Cluster configuration
CLUSTER_RELEASE_NAME ?= mysql-cluster
CLUSTER_CHART_REPO ?= $(HELM_REPO_NAME)/mysql-innodbcluster
CLUSTER_VALUES_FILE ?= ./values.yaml
# Enable CRD waiting
WAIT_FOR_CRD ?= true
include ../_template/base.mk
include ../_template/operator.mk

View File

@@ -1,5 +1,5 @@
HELM_RELEASE_NAME ?= nebula
HELM_APPLICATION_NAME ?= nebula-cluster
HELM_APPLICATION_NAME ?= nebula
HELM_NAMESPACE ?= nebula
HELM_DIR ?= ./helm
HELM_CHART_VERSION ?=
@@ -10,6 +10,21 @@ HELM_OCI_USERNAME ?=
HELM_OCI_PASSWORD ?=
HELM_REPO_NAME ?= nebula-operator
HELM_REPO_URL ?= https://vesoft-inc.github.io/nebula-operator/charts
HELM_CHART_REPO ?= $(HELM_REPO_NAME)/$(HELM_APPLICATION_NAME)
# Operator configuration
OPERATOR_RELEASE_NAME ?= nebula-operator
OPERATOR_NAMESPACE ?= nebula-operator-system
OPERATOR_CHART_REPO ?= $(HELM_REPO_NAME)/nebula-operator
OPERATOR_CHART_VERSION ?=
OPERATOR_VALUES_FILE ?=
# Cluster configuration
CLUSTER_RELEASE_NAME ?= nebula-cluster
CLUSTER_CHART_REPO ?= $(HELM_REPO_NAME)/nebula-cluster
CLUSTER_VALUES_FILE ?= ./values.yaml
# Enable CRD waiting
WAIT_FOR_CRD ?= true
include ../_template/base.mk
include ../_template/operator.mk

22
src/opik/Makefile Normal file
View File

@@ -0,0 +1,22 @@
HELM_RELEASE_NAME ?= opik
HELM_APPLICATION_NAME ?= opik
HELM_NAMESPACE ?= opik
HELM_DIR ?= ./helm
HELM_CHART_VERSION ?=
HELM_VALUES_FILE ?= ./values.yaml
HELM_OCI_REGISTRY ?=
HELM_OCI_NAMESPACE ?=
HELM_REPO_NAME ?= opik
HELM_REPO_URL ?= https://comet-ml.github.io/opik
HELM_CHART_REPO ?= $(HELM_REPO_NAME)/$(HELM_APPLICATION_NAME)
# Version for all Opik components
VERSION ?= latest
HELM_INSTALL_ARGS ?= \
--set component.backend.image.tag=$(VERSION) \
--set component.python-backend.image.tag=$(VERSION) \
--set component.python-backend.env.PYTHON_CODE_EXECUTOR_IMAGE_TAG="$(VERSION)" \
--set component.frontend.image.tag=$(VERSION)
include ../_template/base.mk

127
src/opik/README.md Normal file
View File

@@ -0,0 +1,127 @@
# Opik
## Introduction
Opik is an open-source LLM evaluation framework by Comet that helps developers track, evaluate, and optimize their LLM applications. It provides comprehensive observability for LLM calls, prompt management, and evaluation metrics.
## Installation
To install Opik, run:
```bash
make install
```
By default, this will install the latest version of Opik. To install a specific version:
```bash
VERSION=1.0.0 make install
```
## Usage
After installation, verify the deployment:
```bash
kubectl get pods -n opik
```
To access Opik, port-forward the frontend service:
```bash
kubectl port-forward svc/opik-frontend 5173:5173 -n opik
```
Then access at <http://localhost:5173>
## Configuration
### Using External ClickHouse
To use an external ClickHouse installation instead of the built-in one:
```yaml
component:
backend:
waitForClickhouse:
clickhouse:
host: your-clickhouse-host
port: 8123
protocol: http
env:
ANALYTICS_DB_MIGRATIONS_URL: 'jdbc:clickhouse://your-clickhouse-host:8123'
ANALYTICS_DB_HOST: your-clickhouse-host
ANALYTICS_DB_DATABASE_NAME: opik
ANALYTICS_DB_MIGRATIONS_USER: opik
ANALYTICS_DB_USERNAME: opik
ANALYTICS_DB_MIGRATIONS_PASS: your-password
ANALYTICS_DB_PASS: your-password
clickhouse:
enabled: false
```
### Configuring S3 Storage
To use AWS S3 for storage:
```yaml
component:
backend:
env:
S3_BUCKET: your-bucket-name
S3_REGION: us-east-1
AWS_ACCESS_KEY_ID: your-access-key
AWS_SECRET_ACCESS_KEY: your-secret-key
```
### Enabling Ingress
To expose Opik via Ingress:
```yaml
component:
frontend:
ingress:
enabled: true
ingressClassName: nginx
hosts:
- host: opik.example.com
paths:
- path: /
port: 5173
pathType: Prefix
```
## Uninstallation
Before uninstalling, remove the finalizer on the ClickHouse resource:
```bash
kubectl patch -n opik chi opik-clickhouse --type json --patch='[ { "op": "remove", "path": "/metadata/finalizers" } ]'
```
Then uninstall:
```bash
make uninstall
```
## Version Compatibility
Ensure your Python SDK version matches your Kubernetes deployment version:
```bash
pip show opik
```
To update the Python SDK:
```bash
pip install --upgrade opik==<version>
```
## Documentation
- [Opik Documentation](https://www.comet.com/docs/opik/)
- [Helm Chart Documentation](https://comet-ml.github.io/opik/)

127
src/opik/README.zh.md Normal file
View File

@@ -0,0 +1,127 @@
# Opik
## 简介
Opik 是 Comet 开发的开源 LLM 评估框架,帮助开发者跟踪、评估和优化他们的 LLM 应用程序。它为 LLM 调用、提示管理和评估指标提供全面的可观测性。
## 安装
要安装 Opik请运行
```bash
make install
```
默认情况下,这将安装最新版本的 Opik。要安装特定版本
```bash
VERSION=1.0.0 make install
```
## 使用
安装后,验证部署:
```bash
kubectl get pods -n opik
```
要访问 Opik请端口转发前端服务
```bash
kubectl port-forward svc/opik-frontend 5173:5173 -n opik
```
然后在 <http://localhost:5173> 访问
## 配置
### 使用外部 ClickHouse
要使用外部 ClickHouse 安装而不是内置的:
```yaml
component:
backend:
waitForClickhouse:
clickhouse:
host: your-clickhouse-host
port: 8123
protocol: http
env:
ANALYTICS_DB_MIGRATIONS_URL: 'jdbc:clickhouse://your-clickhouse-host:8123'
ANALYTICS_DB_HOST: your-clickhouse-host
ANALYTICS_DB_DATABASE_NAME: opik
ANALYTICS_DB_MIGRATIONS_USER: opik
ANALYTICS_DB_USERNAME: opik
ANALYTICS_DB_MIGRATIONS_PASS: your-password
ANALYTICS_DB_PASS: your-password
clickhouse:
enabled: false
```
### 配置 S3 存储
要使用 AWS S3 进行存储:
```yaml
component:
backend:
env:
S3_BUCKET: your-bucket-name
S3_REGION: us-east-1
AWS_ACCESS_KEY_ID: your-access-key
AWS_SECRET_ACCESS_KEY: your-secret-key
```
### 启用 Ingress
要通过 Ingress 暴露 Opik
```yaml
component:
frontend:
ingress:
enabled: true
ingressClassName: nginx
hosts:
- host: opik.example.com
paths:
- path: /
port: 5173
pathType: Prefix
```
## 卸载
在卸载之前,请移除 ClickHouse 资源上的 finalizer
```bash
kubectl patch -n opik chi opik-clickhouse --type json --patch='[ { "op": "remove", "path": "/metadata/finalizers" } ]'
```
然后卸载:
```bash
make uninstall
```
## 版本兼容性
确保你的 Python SDK 版本与 Kubernetes 部署版本匹配:
```bash
pip show opik
```
要更新 Python SDK
```bash
pip install --upgrade opik==<version>
```
## 文档
- [Opik 文档](https://www.comet.com/docs/opik/)
- [Helm Chart 文档](https://comet-ml.github.io/opik/)

50
src/opik/values.yaml Normal file
View File

@@ -0,0 +1,50 @@
# Opik Helm Chart Values
# Documentation: https://www.comet.com/docs/opik/self-host/kubernetes/
component:
backend:
image:
tag: latest
env:
OPIK_USAGE_REPORT_ENABLED: 'false'
# S3_BUCKET: ""
# S3_REGION: ""
# AWS_ACCESS_KEY_ID: ""
# AWS_SECRET_ACCESS_KEY: ""
python-backend:
image:
tag: latest
env:
PYTHON_CODE_EXECUTOR_IMAGE_TAG: latest
frontend:
image:
tag: latest
ingress:
enabled: false
# ingressClassName: nginx
# annotations: {}
# hosts:
# - host: opik.example.com
# paths:
# - path: /
# port: 5173
# pathType: Prefix
# tls:
# enabled: true
# hosts:
# - opik.example.com
# secretName: opik-tls
# ClickHouse configuration
clickhouse:
enabled: true
# replicasCount: 1
# service:
# serviceTemplate: clickhouse-cluster-svc-lb-template
# annotations: {}
# ZooKeeper configuration (required for ClickHouse replication)
zookeeper:
enabled: true

View File

@@ -1,8 +1,8 @@
HELM_RELEASE_NAME ?= phoenix
HELM_APPLICATION_NAME ?= phoenix
HELM_APPLICATION_NAME ?= phoenix-helm
HELM_NAMESPACE ?= phoenix
HELM_DIR ?= ./helm
HELM_CHART_VERSION ?= 4.0.37
HELM_CHART_VERSION ?= 5.0.5
HELM_VALUES_FILE ?= ./values.yaml
HELM_OCI_REGISTRY ?= docker.io
HELM_OCI_NAMESPACE ?= arizephoenix
@@ -10,6 +10,6 @@ HELM_OCI_USERNAME ?=
HELM_OCI_PASSWORD ?=
HELM_REPO_NAME ?=
HELM_REPO_URL ?=
HELM_CHART_REPO ?= oci://$(HELM_OCI_REGISTRY)/$(HELM_OCI_NAMESPACE)/phoenix-helm
HELM_CHART_REPO ?= oci://$(HELM_OCI_REGISTRY)/$(HELM_OCI_NAMESPACE)/$(HELM_APPLICATION_NAME)
include ../_template/base.mk

View File

@@ -1,6 +1,6 @@
HELM_RELEASE_NAME ?= rabbitmq-cluster-operator
HELM_APPLICATION_NAME ?= rabbitmq-cluster-operator
HELM_NAMESPACE ?= rabbitmq-cluster-operator
HELM_RELEASE_NAME ?= rabbitmq
HELM_APPLICATION_NAME ?= rabbitmq
HELM_NAMESPACE ?= rabbitmq
HELM_DIR ?= ./helm
HELM_CHART_VERSION ?= 0.2.0
HELM_VALUES_FILE ?= ./values.yaml
@@ -8,8 +8,28 @@ HELM_OCI_REGISTRY ?= docker.io
HELM_OCI_NAMESPACE ?= cloudpirates
HELM_OCI_USERNAME ?=
HELM_OCI_PASSWORD ?=
HELM_REPO_NAME ?=
HELM_REPO_URL ?=
HELM_CHART_REPO ?= oci://$(HELM_OCI_REGISTRY)/$(HELM_OCI_NAMESPACE)/$(HELM_APPLICATION_NAME)
# Operator configuration
OPERATOR_RELEASE_NAME ?= rabbitmq-cluster-operator
OPERATOR_NAMESPACE ?= rabbitmq-operator
OPERATOR_CHART_REPO ?= oci://$(HELM_OCI_REGISTRY)/$(HELM_OCI_NAMESPACE)/rabbitmq-cluster-operator
OPERATOR_CHART_VERSION ?= $(HELM_CHART_VERSION)
OPERATOR_VALUES_FILE ?= ./values.yaml
# For RabbitMQ Cluster Operator, we only install the operator
# RabbitMQ clusters are created using RabbitmqCluster CRDs after operator is installed
include ../_template/base.mk
include ../_template/operator.mk
# Override install target to only install operator
.PHONY: install
install: install-operator
# Override uninstall target to only uninstall operator
.PHONY: uninstall
uninstall: uninstall-operator
# Override verify target
.PHONY: verify
verify: verify-operator

View File

@@ -10,6 +10,6 @@ HELM_OCI_USERNAME ?=
HELM_OCI_PASSWORD ?=
HELM_REPO_NAME ?= goodrain
HELM_REPO_URL ?= https://openchart.goodrain.com/goodrain/rainbond
HELM_CHART_REPO ?= goodrain/rainbond
HELM_CHART_REPO ?= $(HELM_REPO_NAME)/$(HELM_APPLICATION_NAME)
include ../_template/base.mk