feat: add portainer/...

This commit is contained in:
Sun-ZhenXing
2026-02-22 22:12:04 +08:00
parent bfa41fb903
commit 2544127de2
72 changed files with 1925 additions and 309 deletions

View File

@@ -2,13 +2,7 @@ import antfu from '@antfu/eslint-config'
export default antfu({
// Enable YAML support
yaml: {
overrides: {
// For Helm values files, allow flexible string styles
'yaml/quotes': ['error', { prefer: 'single', avoidEscape: true }],
// 'yaml/plain-scalar': 'off',
},
},
yaml: true,
// Disable other language support we don't need
typescript: false,
vue: false,
@@ -26,10 +20,6 @@ export default antfu({
'**/node_modules/**',
'**/.git/**',
],
}, {
// Disable sort keys rule for JSON files
files: ['**/package.json'],
rules: {
'jsonc/sort-keys': 'off',
},
// Lint readme files
markdown: true,
})

View File

@@ -1,19 +1,23 @@
{
"name": "helm-anything",
"version": "1.0.0",
"packageManager": "pnpm@10.25.0",
"description": "Helm command templates for quick service deployment",
"author": "",
"license": "ISC",
"keywords": [
"helm",
"kubernetes",
"deployment"
],
"main": "index.js",
"scripts": {
"lint": "eslint .",
"lint:fix": "eslint . --fix",
"test": "echo \"Error: no test specified\" && exit 1"
},
"keywords": ["helm", "kubernetes", "deployment"],
"author": "",
"license": "ISC",
"packageManager": "pnpm@10.25.0",
"devDependencies": {
"@antfu/eslint-config": "^7.4.3",
"eslint": "^10.0.0"
"eslint": "^10.0.1"
}
}

477
pnpm-lock.yaml generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,3 +1,12 @@
HELM_DIR ?= ./helm
HELM_CHART_VERSION ?=
HELM_OCI_REGISTRY ?=
HELM_OCI_NAMESPACE ?=
HELM_OCI_USERNAME ?=
HELM_OCI_PASSWORD ?=
LOGS_TAIL ?= 100
.PHONY: repo-add
repo-add:
ifdef HELM_REPO_NAME
@@ -19,7 +28,8 @@ install:
--namespace $(HELM_NAMESPACE) \
--create-namespace \
$(if $(HELM_CHART_VERSION),--version $(HELM_CHART_VERSION),) \
--values $(HELM_VALUES_FILE) > output.log 2>&1 || (cat output.log && exit 1)
--values $(HELM_VALUES_FILE) > output.log 2>&1 || (cat output.log && exit 1) \
$(if $(HELM_INSTALL_ARGS),$(HELM_INSTALL_ARGS),)
.PHONY: uninstall
uninstall:
@@ -27,7 +37,9 @@ uninstall:
.PHONY: values
values:
helm show values $(HELM_CHART_REPO) > $(HELM_APPLICATION_NAME)-values.yaml
helm show values $(HELM_CHART_REPO) \
$(if $(HELM_CHART_VERSION),--version $(HELM_CHART_VERSION),) \
> $(HELM_APPLICATION_NAME)-values.yaml
.PHONY: versions
versions:
@@ -61,4 +73,4 @@ helm-push:
.PHONY: logs
logs:
kubectl logs -n $(HELM_NAMESPACE) -l app=$(HELM_APPLICATION_NAME) --tail=100 --follow
kubectl logs -n $(HELM_NAMESPACE) -l app=$(HELM_APPLICATION_NAME) --tail=$(LOGS_TAIL) --follow

View File

@@ -11,6 +11,5 @@ HELM_OCI_PASSWORD ?=
HELM_REPO_NAME ?= apache-airflow
HELM_REPO_URL ?= https://airflow.apache.org
HELM_CHART_REPO ?= $(HELM_REPO_NAME)/$(HELM_APPLICATION_NAME)
HELM_LANE ?=
include ../_template/base.mk

View File

@@ -11,6 +11,5 @@ HELM_OCI_PASSWORD ?=
HELM_REPO_NAME ?= argo
HELM_REPO_URL ?= https://argoproj.github.io/argo-helm
HELM_CHART_REPO ?= $(HELM_REPO_NAME)/$(HELM_APPLICATION_NAME)
HELM_LANE ?=
include ../_template/base.mk

View File

@@ -9,6 +9,5 @@ HELM_OCI_NAMESPACE ?=
HELM_REPO_NAME ?= altinity
HELM_REPO_URL ?= https://helm.altinity.com
HELM_CHART_REPO ?= $(HELM_REPO_NAME)/$(HELM_APPLICATION_NAME)
HELM_LANE ?=
include ../_template/base.mk

View File

@@ -11,6 +11,5 @@ HELM_OCI_PASSWORD ?=
HELM_REPO_NAME ?= jupyterhub
HELM_REPO_URL ?= https://hub.jupyter.org/helm-chart/
HELM_CHART_REPO ?= $(HELM_REPO_NAME)/jupyterhub
HELM_LANE ?=
include ../_template/base.mk

View File

@@ -9,6 +9,5 @@ HELM_OCI_NAMESPACE ?=
HELM_REPO_NAME ?= elastic
HELM_REPO_URL ?= https://helm.elastic.co
HELM_CHART_REPO ?= $(HELM_REPO_NAME)/$(HELM_APPLICATION_NAME)
HELM_LANE ?=
include ../_template/base.mk

View File

@@ -11,6 +11,5 @@ HELM_OCI_PASSWORD ?=
HELM_REPO_NAME ?=
HELM_REPO_URL ?=
HELM_CHART_REPO ?= oci://$(HELM_OCI_REGISTRY)/$(HELM_OCI_NAMESPACE)/$(HELM_APPLICATION_NAME)
HELM_LANE ?=
include ../_template/base.mk

View File

@@ -11,6 +11,5 @@ HELM_OCI_PASSWORD ?=
HELM_REPO_NAME ?= flink-operator
HELM_REPO_URL ?= https://downloads.apache.org/flink/flink-kubernetes-operator-1.9.0/
HELM_CHART_REPO ?= $(HELM_REPO_NAME)/flink-operator
HELM_LANE ?=
include ../_template/base.mk

View File

@@ -11,6 +11,5 @@ HELM_OCI_PASSWORD ?=
HELM_REPO_NAME ?= gitea
HELM_REPO_URL ?= https://dl.gitea.com/charts
HELM_CHART_REPO ?= $(HELM_REPO_NAME)/actions
HELM_LANE ?=
include ../_template/base.mk

View File

@@ -11,6 +11,5 @@ HELM_OCI_PASSWORD ?=
HELM_REPO_NAME ?= gitea
HELM_REPO_URL ?= https://dl.gitea.com/charts
HELM_CHART_REPO ?= $(HELM_REPO_NAME)/gitea
HELM_LANE ?=
include ../_template/base.mk

View File

@@ -11,6 +11,5 @@ HELM_OCI_PASSWORD ?=
HELM_REPO_NAME ?= gitlab
HELM_REPO_URL ?= https://charts.gitlab.io
HELM_CHART_REPO ?= $(HELM_REPO_NAME)/gitlab-runner
HELM_LANE ?=
include ../_template/base.mk

View File

@@ -11,6 +11,5 @@ HELM_OCI_PASSWORD ?=
HELM_REPO_NAME ?= gitlab
HELM_REPO_URL ?= https://charts.gitlab.io
HELM_CHART_REPO ?= $(HELM_REPO_NAME)/gitlab
HELM_LANE ?=
include ../_template/base.mk

View File

@@ -11,6 +11,5 @@ HELM_OCI_PASSWORD ?=
HELM_REPO_NAME ?= grafana
HELM_REPO_URL ?= https://grafana.github.io/helm-charts
HELM_CHART_REPO ?= $(HELM_REPO_NAME)/grafana
HELM_LANE ?=
include ../_template/base.mk

View File

@@ -11,6 +11,5 @@ HELM_OCI_PASSWORD ?=
HELM_REPO_NAME ?= harbor
HELM_REPO_URL ?= https://helm.goharbor.io
HELM_CHART_REPO ?= $(HELM_REPO_NAME)/harbor
HELM_LANE ?=
include ../_template/base.mk

View File

@@ -11,6 +11,5 @@ HELM_OCI_PASSWORD ?=
HELM_REPO_NAME ?= kubeblocks
HELM_REPO_URL ?= https://kubeblocks.io/charts
HELM_CHART_REPO ?= $(HELM_REPO_NAME)/hbase-cluster
HELM_LANE ?=
include ../_template/base.mk

View File

@@ -11,6 +11,5 @@ HELM_OCI_PASSWORD ?=
HELM_REPO_NAME ?= jenkins
HELM_REPO_URL ?= https://charts.jenkins.io
HELM_CHART_REPO ?= $(HELM_REPO_NAME)/$(HELM_APPLICATION_NAME)
HELM_LANE ?=
include ../_template/base.mk

15
src/k3k/Makefile Normal file
View File

@@ -0,0 +1,15 @@
HELM_RELEASE_NAME ?= k3k
HELM_APPLICATION_NAME ?= k3k
HELM_NAMESPACE ?= k3k
HELM_DIR ?= ./helm
HELM_CHART_VERSION ?=
HELM_VALUES_FILE ?= ./values.yaml
HELM_OCI_REGISTRY ?=
HELM_OCI_NAMESPACE ?=
HELM_OCI_USERNAME ?=
HELM_OCI_PASSWORD ?=
HELM_REPO_NAME ?= k3k
HELM_REPO_URL ?= https://rancher.github.io/k3k
HELM_CHART_REPO ?= $(HELM_REPO_NAME)/$(HELM_APPLICATION_NAME)
include ../_template/base.mk

115
src/k3k/README.md Normal file
View File

@@ -0,0 +1,115 @@
# K3k
## Introduction
K3k (Kubernetes in Kubernetes) is a tool that empowers you to create and manage isolated K3s clusters within your existing Kubernetes environment. It enables efficient multi-tenancy, streamlined experimentation, and robust resource isolation, minimizing infrastructure costs by allowing you to run multiple lightweight Kubernetes clusters on the same physical host.
## Features
- **Shared Mode**: Optimized resource utilization by sharing underlying resources
- **Virtual Mode**: Complete isolation with dedicated K3s server pods
- **Rancher Integration**: Seamlessly integrates with Rancher for simplified cluster management
- **Resource Isolation**: Define resource limits and quotas for each embedded cluster
- **Lightweight**: Leverages the lightweight nature of K3s for fast cluster provisioning
## Prerequisites
- A working Kubernetes cluster (host cluster)
- Storage provider configured (or use ephemeral/static storage)
- kubectl configured to access the host cluster
## Installation
To install K3k controller, run:
```bash
make install
```
## Usage
### Install k3kcli (Optional)
Download and install the k3k CLI tool:
```bash
# Linux amd64
wget -qO k3kcli https://github.com/rancher/k3k/releases/latest/download/k3kcli-linux-amd64
chmod +x k3kcli
sudo mv k3kcli /usr/local/bin
```
### Create a K3k Cluster
Using k3kcli:
```bash
k3kcli cluster create my-cluster
```
Using kubectl (create a Cluster CR):
```bash
cat <<EOF | kubectl apply -f -
apiVersion: k3k.io/v1alpha1
kind: Cluster
metadata:
name: my-cluster
namespace: k3k
spec:
mode: shared
servers: 1
agents: 0
version: v1.28.4+k3s1
EOF
```
### Get Cluster Kubeconfig
```bash
# Using k3kcli
k3kcli kubeconfig generate --name my-cluster --namespace k3k
# Or directly from secret
kubectl get secret my-cluster-kubeconfig -n k3k -o jsonpath='{.data.value}' | base64 -d > my-cluster.yaml
```
### Delete a K3k Cluster
```bash
# Using k3kcli
k3kcli cluster delete my-cluster
# Or using kubectl
kubectl delete cluster my-cluster -n k3k
```
## Cluster Modes
### Shared Mode
- Server components run as pods in the host cluster
- Efficient resource sharing
- Suitable for development and testing
### Virtual Mode
- Each cluster has dedicated K3s server pods
- Complete network and resource isolation
- Suitable for multi-tenant environments
## Configuration
Edit `values.yaml` to configure:
- Controller resources
- Feature gates
- Image versions
- RBAC settings
## Important Notes
1. K3k creates virtual Kubernetes clusters within your existing cluster
2. The host cluster must have sufficient resources for virtual clusters
3. Storage classes must be available for persistent workloads
4. Consider network policies for isolation between virtual clusters

115
src/k3k/README.zh.md Normal file
View File

@@ -0,0 +1,115 @@
# K3k
## 简介
K3kKubernetes in Kubernetes是一个工具让您能够在现有 Kubernetes 环境中创建和管理隔离的 K3s 集群。它实现了高效的多租户、简化的实验环境和强大的资源隔离,通过在同一物理主机上运行多个轻量级 Kubernetes 集群来最小化基础设施成本。
## 功能
- **共享模式**: 通过共享底层资源优化资源利用率
- **虚拟模式**: 使用专用 K3s server pod 实现完全隔离
- **Rancher 集成**: 与 Rancher 无缝集成以简化集群管理
- **资源隔离**: 为每个嵌入式集群定义资源限制和配额
- **轻量级**: 利用 K3s 的轻量级特性实现快速集群配置
## 前置条件
- 一个正常工作的 Kubernetes 集群(主机集群)
- 配置了存储提供商(或使用临时/静态存储)
- 配置了 kubectl 以访问主机集群
## 安装
安装 K3k 控制器:
```bash
make install
```
## 使用
### 安装 k3kcli可选
下载并安装 k3k CLI 工具:
```bash
# Linux amd64
wget -qO k3kcli https://github.com/rancher/k3k/releases/latest/download/k3kcli-linux-amd64
chmod +x k3kcli
sudo mv k3kcli /usr/local/bin
```
### 创建 K3k 集群
使用 k3kcli
```bash
k3kcli cluster create my-cluster
```
使用 kubectl创建 Cluster CR
```bash
cat <<EOF | kubectl apply -f -
apiVersion: k3k.io/v1alpha1
kind: Cluster
metadata:
name: my-cluster
namespace: k3k
spec:
mode: shared
servers: 1
agents: 0
version: v1.28.4+k3s1
EOF
```
### 获取集群 Kubeconfig
```bash
# 使用 k3kcli
k3kcli kubeconfig generate --name my-cluster --namespace k3k
# 或直接从 secret 获取
kubectl get secret my-cluster-kubeconfig -n k3k -o jsonpath='{.data.value}' | base64 -d > my-cluster.yaml
```
### 删除 K3k 集群
```bash
# 使用 k3kcli
k3kcli cluster delete my-cluster
# 或使用 kubectl
kubectl delete cluster my-cluster -n k3k
```
## 集群模式
### 共享模式
- Server 组件作为 pod 运行在主机集群中
- 高效的资源共享
- 适用于开发和测试
### 虚拟模式
- 每个集群都有专用的 K3s server pod
- 完整的网络和资源隔离
- 适用于多租户环境
## 配置
编辑 `values.yaml` 以配置:
- 控制器资源
- 功能门
- 镜像版本
- RBAC 设置
## 重要提示
1. K3k 在现有集群中创建虚拟 Kubernetes 集群
2. 主机集群必须有足够的资源来运行虚拟集群
3. 持久化工作负载必须有可用的存储类
4. 考虑使用网络策略来实现虚拟集群之间的隔离

70
src/k3k/values.yaml Normal file
View File

@@ -0,0 +1,70 @@
# Default values for K3k.
# This is a YAML-formatted file.
# Number of K3k controller replicas
replicaCount: 1
image:
repository: rancher/k3k
tag: v1.0.4
pullPolicy: IfNotPresent
# Controller configuration
controller:
enabled: true
# Feature gates to enable
featureGates: {}
# VirtualCluster: true
# SharedCluster: true
# Webhook configuration
webhook:
enabled: true
port: 9443
# Resource limits and requests
resources:
limits:
cpu: 500m
memory: 512Mi
requests:
cpu: 100m
memory: 128Mi
# Service account configuration
serviceAccount:
create: true
name: ''
annotations: {}
# RBAC configuration
rbac:
create: true
# Pod security context
podSecurityContext:
runAsNonRoot: true
runAsUser: 1000
# Security context
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
# Node selector
nodeSelector: {}
# Tolerations
tolerations: []
# Affinity
affinity: {}
# Extra environment variables
extraEnv: []
# Extra arguments
extraArgs: []

View File

@@ -9,6 +9,5 @@ HELM_OCI_NAMESPACE ?=
HELM_REPO_NAME ?= strimzi
HELM_REPO_URL ?= https://strimzi.io/charts/
HELM_CHART_REPO ?= $(HELM_REPO_NAME)/$(HELM_APPLICATION_NAME)
HELM_LANE ?=
include ../_template/base.mk

View File

@@ -11,6 +11,5 @@ HELM_OCI_PASSWORD ?=
HELM_REPO_NAME ?=
HELM_REPO_URL ?=
HELM_CHART_REPO ?= oci://$(HELM_OCI_REGISTRY)/$(HELM_OCI_NAMESPACE)/$(HELM_APPLICATION_NAME)
HELM_LANE ?=
include ../_template/base.mk

View File

@@ -9,6 +9,5 @@ HELM_OCI_NAMESPACE ?=
HELM_REPO_NAME ?= elastic
HELM_REPO_URL ?= https://helm.elastic.co
HELM_CHART_REPO ?= $(HELM_REPO_NAME)/$(HELM_APPLICATION_NAME)
HELM_LANE ?=
include ../_template/base.mk

View File

@@ -11,6 +11,5 @@ HELM_OCI_PASSWORD ?=
HELM_REPO_NAME ?= kong
HELM_REPO_URL ?= https://charts.konghq.com
HELM_CHART_REPO ?= $(HELM_REPO_NAME)/kong
HELM_LANE ?=
include ../_template/base.mk

View File

@@ -9,6 +9,5 @@ HELM_OCI_NAMESPACE ?=
HELM_REPO_NAME ?= langfuse
HELM_REPO_URL ?= https://langfuse.github.io/langfuse-k8s
HELM_CHART_REPO ?= $(HELM_REPO_NAME)/$(HELM_APPLICATION_NAME)
HELM_LANE ?=
include ../_template/base.mk

View File

@@ -11,6 +11,5 @@ HELM_OCI_PASSWORD ?=
HELM_REPO_NAME ?= litellm
HELM_REPO_URL ?= https://berriai.github.io/litellm-helm
HELM_CHART_REPO ?= $(HELM_REPO_NAME)/litellm-helm
HELM_LANE ?=
include ../_template/base.mk

View File

@@ -11,6 +11,5 @@ HELM_OCI_PASSWORD ?=
HELM_REPO_NAME ?= grafana
HELM_REPO_URL ?= https://grafana.github.io/helm-charts
HELM_CHART_REPO ?= $(HELM_REPO_NAME)/loki
HELM_LANE ?=
include ../_template/base.mk

View File

@@ -11,6 +11,5 @@ HELM_OCI_PASSWORD ?=
HELM_REPO_NAME ?=
HELM_REPO_URL ?=
HELM_CHART_REPO ?= oci://$(HELM_OCI_REGISTRY)/$(HELM_OCI_NAMESPACE)/$(HELM_APPLICATION_NAME)
HELM_LANE ?=
include ../_template/base.mk

View File

@@ -9,6 +9,5 @@ HELM_OCI_NAMESPACE ?=
HELM_REPO_NAME ?= milvus
HELM_REPO_URL ?= https://zilliztech.github.io/milvus-helm
HELM_CHART_REPO ?= $(HELM_REPO_NAME)/$(HELM_APPLICATION_NAME)
HELM_LANE ?=
include ../_template/base.mk

View File

@@ -11,6 +11,5 @@ HELM_OCI_PASSWORD ?=
HELM_REPO_NAME ?=
HELM_REPO_URL ?=
HELM_CHART_REPO ?= oci://$(HELM_OCI_REGISTRY)/$(HELM_OCI_NAMESPACE)/$(HELM_APPLICATION_NAME)
HELM_LANE ?=
include ../_template/base.mk

View File

@@ -11,6 +11,5 @@ HELM_OCI_PASSWORD ?=
HELM_REPO_NAME ?= community-charts
HELM_REPO_URL ?= https://community-charts.github.io
HELM_CHART_REPO ?= $(HELM_REPO_NAME)/mlflow
HELM_LANE ?=
include ../_template/base.mk

View File

@@ -11,6 +11,5 @@ HELM_OCI_PASSWORD ?=
HELM_REPO_NAME ?=
HELM_REPO_URL ?=
HELM_CHART_REPO ?= oci://$(HELM_OCI_REGISTRY)/$(HELM_OCI_NAMESPACE)/$(HELM_APPLICATION_NAME)
HELM_LANE ?=
include ../_template/base.mk

View File

@@ -11,6 +11,5 @@ HELM_OCI_PASSWORD ?=
HELM_REPO_NAME ?= mysql-operator
HELM_REPO_URL ?= https://mysql.github.io/mysql-operator/
HELM_CHART_REPO ?= $(HELM_REPO_NAME)/mysql-innodbcluster
HELM_LANE ?=
include ../_template/base.mk

View File

@@ -11,6 +11,5 @@ HELM_OCI_PASSWORD ?=
HELM_REPO_NAME ?= nacos
HELM_REPO_URL ?= https://nacos-charts.storage.googleapis.com
HELM_CHART_REPO ?= $(HELM_REPO_NAME)/nacos
HELM_LANE ?=
include ../_template/base.mk

View File

@@ -11,6 +11,5 @@ HELM_OCI_PASSWORD ?=
HELM_REPO_NAME ?= neo4j
HELM_REPO_URL ?= https://helm.neo4j.com/neo4j
HELM_CHART_REPO ?= $(HELM_REPO_NAME)/neo4j
HELM_LANE ?=
include ../_template/base.mk

View File

@@ -11,6 +11,5 @@ HELM_OCI_PASSWORD ?=
HELM_REPO_NAME ?= sonatype
HELM_REPO_URL ?= https://sonatype.github.io/helm3-charts/
HELM_CHART_REPO ?= $(HELM_REPO_NAME)/$(HELM_APPLICATION_NAME)
HELM_LANE ?=
include ../_template/base.mk

View File

@@ -11,6 +11,5 @@ HELM_OCI_PASSWORD ?=
HELM_REPO_NAME ?=
HELM_REPO_URL ?=
HELM_CHART_REPO ?= oci://$(HELM_OCI_REGISTRY)/$(HELM_OCI_NAMESPACE)/$(HELM_APPLICATION_NAME)
HELM_LANE ?=
include ../_template/base.mk

View File

@@ -11,6 +11,5 @@ HELM_OCI_PASSWORD ?=
HELM_REPO_NAME ?= open-webui
HELM_REPO_URL ?= https://helm.openwebui.com
HELM_CHART_REPO ?= $(HELM_REPO_NAME)/open-webui
HELM_LANE ?=
include ../_template/base.mk

View File

@@ -9,6 +9,5 @@ HELM_OCI_NAMESPACE ?=
HELM_REPO_NAME ?= open-telemetry
HELM_REPO_URL ?= https://open-telemetry.github.io/opentelemetry-helm-charts
HELM_CHART_REPO ?= $(HELM_REPO_NAME)/$(HELM_APPLICATION_NAME)
HELM_LANE ?=
include ../_template/base.mk

View File

@@ -11,6 +11,5 @@ HELM_OCI_PASSWORD ?=
HELM_REPO_NAME ?=
HELM_REPO_URL ?=
HELM_CHART_REPO ?= oci://$(HELM_OCI_REGISTRY)/$(HELM_OCI_NAMESPACE)/phoenix-helm
HELM_LANE ?=
include ../_template/base.mk

15
src/portainer/Makefile Normal file
View File

@@ -0,0 +1,15 @@
HELM_RELEASE_NAME ?= portainer
HELM_APPLICATION_NAME ?= portainer
HELM_NAMESPACE ?= portainer
HELM_DIR ?= ./helm
HELM_CHART_VERSION ?=
HELM_VALUES_FILE ?= ./values.yaml
HELM_OCI_REGISTRY ?=
HELM_OCI_NAMESPACE ?=
HELM_OCI_USERNAME ?=
HELM_OCI_PASSWORD ?=
HELM_REPO_NAME ?= portainer
HELM_REPO_URL ?= https://portainer.github.io/k8s/
HELM_CHART_REPO ?= $(HELM_REPO_NAME)/$(HELM_APPLICATION_NAME)
include ../_template/base.mk

77
src/portainer/README.md Normal file
View File

@@ -0,0 +1,77 @@
# Portainer
## Introduction
Portainer is a lightweight management UI that allows you to easily manage your Docker and Kubernetes environments. It provides a simple and intuitive interface for deploying and managing containerized applications, as well as managing resources such as networks, volumes, and images.
## Installation
To install Portainer, run:
```bash
make install
```
## Usage
After installation, access Portainer:
### Using NodePort (default)
Portainer will be available on port `30777` for HTTP and `30779` for HTTPS.
```bash
# Port forward to access web UI
kubectl port-forward svc/portainer -n portainer 30777:9000 30779:9443
```
Then open `https://localhost:30779` in your browser.
### Using LoadBalancer
If you configured `service.type=LoadBalancer`, get the external IP:
```bash
kubectl get svc -n portainer
```
Then access Portainer at `https://<EXTERNAL-IP>:9443`.
### Using Ingress
If you configured ingress, access Portainer at your configured hostname.
## Initial Setup
1. Open Portainer in your browser
2. Create an admin user on the initial setup page
3. Select "Kubernetes" as the environment type
4. Start managing your cluster
## Configuration
The default configuration uses NodePort service type. You can customize:
- `service.type`: Change to `LoadBalancer` or `ClusterIP` with ingress
- `tls.force`: Enable/disable TLS
- `persistence.storageClass`: Specify a storage class
- `resources`: Adjust CPU/memory limits
## Features
- **Container Management**: Deploy, manage, and monitor containers
- **Image Management**: Pull, push, and manage container images
- **Network Management**: Create and manage Docker networks
- **Volume Management**: Manage persistent storage
- **Multi-Environment**: Manage multiple Kubernetes clusters
- **RBAC**: Role-based access control for teams
## Customization
Edit `values.yaml` to configure:
- Ingress settings
- Resource limits
- Persistence options
- TLS configuration
- Feature flags

View File

@@ -0,0 +1,77 @@
# Portainer
## 简介
Portainer 是一个轻量级的管理 UI可让您轻松管理 Docker 和 Kubernetes 环境。它提供了一个简单直观的界面,用于部署和管理容器化应用程序,以及管理网络、卷和镜像等资源。
## 安装
安装 Portainer
```bash
make install
```
## 使用
安装完成后,访问 Portainer
### 使用 NodePort默认
Portainer 将在端口 `30777`HTTP`30779`HTTPS上可用。
```bash
# 端口转发以访问 Web UI
kubectl port-forward svc/portainer -n portainer 30777:9000 30779:9443
```
然后在浏览器中打开 `https://localhost:30779`
### 使用 LoadBalancer
如果您配置了 `service.type=LoadBalancer`,获取外部 IP
```bash
kubectl get svc -n portainer
```
然后在 `https://<EXTERNAL-IP>:9443` 访问 Portainer。
### 使用 Ingress
如果您配置了 ingress请在您配置的主机名访问 Portainer。
## 初始设置
1. 在浏览器中打开 Portainer
2. 在初始设置页面创建管理员用户
3. 选择 "Kubernetes" 作为环境类型
4. 开始管理您的集群
## 配置
默认配置使用 NodePort 服务类型。您可以自定义:
- `service.type`: 更改为 `LoadBalancer` 或配合 ingress 使用 `ClusterIP`
- `tls.force`: 启用/禁用 TLS
- `persistence.storageClass`: 指定存储类
- `resources`: 调整 CPU/内存限制
## 功能
- **容器管理**: 部署、管理和监控容器
- **镜像管理**: 拉取、推送和管理容器镜像
- **网络管理**: 创建和管理 Docker 网络
- **卷管理**: 管理持久化存储
- **多环境**: 管理多个 Kubernetes 集群
- **RBAC**: 团队的基于角色的访问控制
## 自定义
编辑 `values.yaml` 以配置:
- Ingress 设置
- 资源限制
- 持久化选项
- TLS 配置
- 功能标志

65
src/portainer/values.yaml Normal file
View File

@@ -0,0 +1,65 @@
# Default values for Portainer.
# This is a YAML-formatted file.
replicaCount: 1
image:
repository: portainer/portainer-ce
tag: lts
pullPolicy: IfNotPresent
service:
type: NodePort
httpPort: 9000
httpsPort: 9443
httpNodePort: 30777
httpsNodePort: 30779
# Enable persistence using Persistent Volume Claims
persistence:
enabled: true
size: 10Gi
# storageClass: ""
# existingClaim: ""
# Resource limits and requests
resources:
limits:
cpu: 1000m
memory: 1Gi
requests:
cpu: 100m
memory: 128Mi
# Ingress configuration
ingress:
enabled: false
# ingressClassName: nginx
# annotations:
# nginx.ingress.kubernetes.io/backend-protocol: HTTPS
# hosts:
# - host: portainer.example.com
# paths:
# - path: /
# pathType: Prefix
# tls:
# - secretName: portainer-tls
# hosts:
# - portainer.example.com
# TLS configuration
tls:
force: true
# Feature flags
feature:
flags: ''
# Node selector
nodeSelector: {}
# Tolerations
tolerations: []
# Affinity
affinity: {}

View File

@@ -11,6 +11,5 @@ HELM_OCI_PASSWORD ?=
HELM_REPO_NAME ?=
HELM_REPO_URL ?=
HELM_CHART_REPO ?= oci://$(HELM_OCI_REGISTRY)/$(HELM_OCI_NAMESPACE)/$(HELM_APPLICATION_NAME)
HELM_LANE ?=
include ../_template/base.mk

View File

@@ -11,6 +11,5 @@ HELM_OCI_PASSWORD ?=
HELM_REPO_NAME ?= prometheus-community
HELM_REPO_URL ?= https://prometheus-community.github.io/helm-charts
HELM_CHART_REPO ?= $(HELM_REPO_NAME)/kube-prometheus-stack
HELM_LANE ?=
include ../_template/base.mk

View File

@@ -11,6 +11,5 @@ HELM_OCI_PASSWORD ?=
HELM_REPO_NAME ?= apache
HELM_REPO_URL ?= https://pulsar.apache.org/charts
HELM_CHART_REPO ?= $(HELM_REPO_NAME)/pulsar
HELM_LANE ?=
include ../_template/base.mk

View File

@@ -11,6 +11,5 @@ HELM_OCI_PASSWORD ?=
HELM_REPO_NAME ?=
HELM_REPO_URL ?=
HELM_CHART_REPO ?= oci://$(HELM_OCI_REGISTRY)/$(HELM_OCI_NAMESPACE)/$(HELM_APPLICATION_NAME)
HELM_LANE ?=
include ../_template/base.mk

View File

@@ -11,6 +11,5 @@ HELM_OCI_PASSWORD ?=
HELM_REPO_NAME ?=
HELM_REPO_URL ?=
HELM_CHART_REPO ?= oci://$(HELM_OCI_REGISTRY)/$(HELM_OCI_NAMESPACE)/$(HELM_APPLICATION_NAME)
HELM_LANE ?=
include ../_template/base.mk

15
src/rainbond/Makefile Normal file
View File

@@ -0,0 +1,15 @@
HELM_RELEASE_NAME ?= rainbond
HELM_APPLICATION_NAME ?= rainbond
HELM_NAMESPACE ?= rbd-system
HELM_DIR ?= ./helm
HELM_CHART_VERSION ?=
HELM_VALUES_FILE ?= ./values.yaml
HELM_OCI_REGISTRY ?=
HELM_OCI_NAMESPACE ?=
HELM_OCI_USERNAME ?=
HELM_OCI_PASSWORD ?=
HELM_REPO_NAME ?= goodrain
HELM_REPO_URL ?= https://openchart.goodrain.com/goodrain/rainbond
HELM_CHART_REPO ?= goodrain/rainbond
include ../_template/base.mk

67
src/rainbond/README.md Normal file
View File

@@ -0,0 +1,67 @@
# Rainbond
## Introduction
Rainbond is a cloud-native application management platform that enables easy deployment, management, and scaling of applications on Kubernetes. It provides a complete PaaS (Platform as a Service) solution with:
- **Application Management**: Easy deployment and lifecycle management of applications
- **Service Mesh**: Built-in service discovery and traffic management
- **Multi-tenant**: Support for multiple teams and users
- **CI/CD Integration**: Automated build and deployment pipelines
- **Marketplace**: Pre-built application templates and components
## Installation
To install Rainbond, run:
```bash
make install
```
This will deploy Rainbond in the `rbd-system` namespace.
## Usage
After installation, verify the deployment:
```bash
kubectl get pods -n rbd-system
```
To access the Rainbond console, you need to get the gateway IP:
```bash
kubectl get pod -n rbd-system -l name=rbd-gateway -o wide
```
Then visit: `http://<GATEWAY_NODE_IP>:7070`
Or if you have configured an ingress:
```bash
kubectl get ingress -n rbd-system
```
## Configuration
You can customize the installation by modifying the `values.yaml` file:
- `Cluster.gatewayIngressIPs`: Set the gateway ingress IPs
- `Cluster.nodesForGateway`: Configure nodes for gateway
- `pvc.storageClassName`: Set the storage class for PVCs
- `resources`: Configure resource limits and requests
## Uninstall
To uninstall Rainbond:
```bash
make uninstall
```
## Documentation
For more information, please refer to the official documentation:
- [Rainbond Documentation](https://www.rainbond.com/docs/)
- [Helm Chart Repository](https://github.com/goodrain/rainbond-chart)

67
src/rainbond/README.zh.md Normal file
View File

@@ -0,0 +1,67 @@
# Rainbond
## 简介
Rainbond 是一个云原生应用管理平台,可以轻松地在 Kubernetes 上部署、管理和扩展应用程序。它提供了一个完整的 PaaS平台即服务解决方案包括
- **应用管理**:轻松部署和管理应用程序生命周期
- **服务网格**:内置服务发现和流量管理
- **多租户**:支持多个团队和用户
- **CI/CD 集成**:自动化构建和部署流水线
- **应用市场**:预构建的应用模板和组件
## 安装
要安装 Rainbond请运行
```bash
make install
```
这将在 `rbd-system` 命名空间中部署 Rainbond。
## 使用
安装完成后,验证部署状态:
```bash
kubectl get pods -n rbd-system
```
要访问 Rainbond 控制台,您需要获取网关 IP
```bash
kubectl get pod -n rbd-system -l name=rbd-gateway -o wide
```
然后访问:`http://<GATEWAY_NODE_IP>:7070`
如果您配置了 ingress
```bash
kubectl get ingress -n rbd-system
```
## 配置
您可以通过修改 `values.yaml` 文件来自定义安装:
- `Cluster.gatewayIngressIPs`:设置网关入口 IP
- `Cluster.nodesForGateway`:配置网关节点
- `pvc.storageClassName`:设置 PVC 的存储类
- `resources`:配置资源限制和请求
## 卸载
要卸载 Rainbond
```bash
make uninstall
```
## 文档
更多信息请参阅官方文档:
- [Rainbond 文档](https://www.rainbond.com/docs/)
- [Helm Chart 仓库](https://github.com/goodrain/rainbond-chart)

78
src/rainbond/values.yaml Normal file
View File

@@ -0,0 +1,78 @@
# Rainbond Helm Chart Values
# https://github.com/goodrain/rainbond-chart
Cluster:
# Gateway ingress IPs
gatewayIngressIPs: []
# Nodes for gateway
nodesForGateway: []
# Containerd runtime path, will auto-detect if not set
containerdRuntimePath: ''
# Enable UI
ui:
enabled: true
# Enable hub
hub:
enabled: true
# Enable region
region:
enabled: true
# Enable console
console:
enabled: true
# Enable chaos
chaos:
enabled: true
# Enable monitor
monitor:
enabled: true
# Enable worker
worker:
enabled: true
# Enable mq
mq:
enabled: true
# Enable eventlog
eventlog:
enabled: true
# Enable webcli
webcli:
enabled: true
# Enable api
api:
enabled: true
# Enable grctl
grctl:
enabled: true
# PVC configuration
pvc:
# Storage class name
storageClassName: ''
# Access mode
accessMode: ReadWriteOnce
# Storage size
storageSize: 10Gi
# Resource configuration
resources:
limits:
cpu: '2'
memory: 4Gi
requests:
cpu: '0.5'
memory: 1Gi

15
src/rancher/Makefile Normal file
View File

@@ -0,0 +1,15 @@
HELM_RELEASE_NAME ?= rancher
HELM_APPLICATION_NAME ?= rancher
HELM_NAMESPACE ?= cattle-system
HELM_DIR ?= ./helm
HELM_CHART_VERSION ?=
HELM_VALUES_FILE ?= ./values.yaml
HELM_OCI_REGISTRY ?=
HELM_OCI_NAMESPACE ?=
HELM_OCI_USERNAME ?=
HELM_OCI_PASSWORD ?=
HELM_REPO_NAME ?= rancher-stable
HELM_REPO_URL ?= https://releases.rancher.com/server-charts/stable
HELM_CHART_REPO ?= $(HELM_REPO_NAME)/$(HELM_APPLICATION_NAME)
include ../_template/base.mk

88
src/rancher/README.md Normal file
View File

@@ -0,0 +1,88 @@
# Rancher
## Introduction
Rancher is a complete container management platform that makes it easy to run Kubernetes everywhere. It solves three major operational challenges: provisioning Kubernetes clusters on any infrastructure, unifying disparate Kubernetes clusters under centralized authentication and access control, and workload management across clusters.
## Prerequisites
- A Kubernetes cluster (RKE2, K3s, or any certified Kubernetes distribution)
- cert-manager installed (v1.8.0 or higher recommended)
- Default StorageClass configured
## Installation
To install Rancher, run:
```bash
make install
```
## Usage
After installation, access Rancher:
### Get the Rancher URL
```bash
# For LoadBalancer service type
kubectl get svc -n cattle-system
# For NodePort service type
kubectl get svc -n cattle-system
```
### Get the Bootstrap Password
```bash
kubectl get secret --namespace cattle-system bootstrap-secret -o go-template='{{.data.bootstrapPassword|base64decode}}{{ "\n" }}'
```
### Access the UI
1. Open the Rancher URL in your browser
2. Log in with username `admin` and the bootstrap password
3. Follow the setup wizard to configure Rancher
## Configuration
### SSL/TLS Configuration
Rancher supports multiple SSL/TLS configurations:
- **rancher**: Use Rancher-generated CA certificate (default)
- **letsEncrypt**: Use Let's Encrypt for automatic certificate management
- **secret**: Use your own TLS certificate
### Service Type
- **LoadBalancer**: Recommended for production (requires cloud provider or MetalLB)
- **NodePort**: For local/development environments
- **ClusterIP**: For ingress controller configuration
## Features
- **Multi-Cluster Management**: Manage multiple Kubernetes clusters from a single interface
- **User Management**: Centralized authentication and RBAC
- **App Catalog**: Deploy applications from Helm charts
- **Monitoring**: Built-in monitoring with Prometheus and Grafana
- **Logging**: Centralized logging with Fluentd
- **Istio Service Mesh**: Deploy and manage Istio
- **CI/CD Pipelines**: Integrated CI/CD with Fleet
## Customization
Edit `values.yaml` to configure:
- `hostname`: The FQDN for Rancher
- `ingress.tls.source`: SSL certificate source
- `replicas`: Number of Rancher replicas
- `resources`: CPU/memory limits
- `auditLog`: Audit logging configuration
## Important Notes
1. Rancher should only be installed on a supported Kubernetes cluster
2. The bootstrap password is generated automatically on first install
3. For production, use a proper SSL certificate and hostname
4. Consider using Rancher Backup operator for disaster recovery

88
src/rancher/README.zh.md Normal file
View File

@@ -0,0 +1,88 @@
# Rancher
## 简介
Rancher 是一个完整的容器管理平台,可让您轻松地在任何地方运行 Kubernetes。它解决了三个主要的运营挑战在任何基础设施上配置 Kubernetes 集群、将不同的 Kubernetes 集群统一到集中式认证和访问控制下,以及跨集群的工作负载管理。
## 前置条件
- Kubernetes 集群RKE2、K3s 或任何经过认证的 Kubernetes 发行版)
- 已安装 cert-manager建议 v1.8.0 或更高版本)
- 配置了默认 StorageClass
## 安装
安装 Rancher
```bash
make install
```
## 使用
安装完成后,访问 Rancher
### 获取 Rancher URL
```bash
# 对于 LoadBalancer 服务类型
kubectl get svc -n cattle-system
# 对于 NodePort 服务类型
kubectl get svc -n cattle-system
```
### 获取引导密码
```bash
kubectl get secret --namespace cattle-system bootstrap-secret -o go-template='{{.data.bootstrapPassword|base64decode}}{{ "\n" }}'
```
### 访问 UI
1. 在浏览器中打开 Rancher URL
2. 使用用户名 `admin` 和引导密码登录
3. 按照设置向导配置 Rancher
## 配置
### SSL/TLS 配置
Rancher 支持多种 SSL/TLS 配置:
- **rancher**: 使用 Rancher 生成的 CA 证书(默认)
- **letsEncrypt**: 使用 Let's Encrypt 进行自动证书管理
- **secret**: 使用您自己的 TLS 证书
### 服务类型
- **LoadBalancer**: 推荐用于生产环境(需要云提供商或 MetalLB
- **NodePort**: 用于本地/开发环境
- **ClusterIP**: 用于 ingress 控制器配置
## 功能
- **多集群管理**: 从单一界面管理多个 Kubernetes 集群
- **用户管理**: 集中式认证和 RBAC
- **应用目录**: 从 Helm charts 部署应用程序
- **监控**: 内置 Prometheus 和 Grafana 监控
- **日志**: 使用 Fluentd 的集中式日志
- **Istio 服务网格**: 部署和管理 Istio
- **CI/CD 流水线**: 与 Fleet 集成的 CI/CD
## 自定义
编辑 `values.yaml` 以配置:
- `hostname`: Rancher 的 FQDN
- `ingress.tls.source`: SSL 证书源
- `replicas`: Rancher 副本数
- `resources`: CPU/内存限制
- `auditLog`: 审计日志配置
## 重要提示
1. Rancher 只能安装在支持的 Kubernetes 集群上
2. 引导密码在安装时自动生成
3. 对于生产环境,使用适当的 SSL 证书和主机名
4. 考虑使用 Rancher Backup operator 进行灾难恢复

75
src/rancher/values.yaml Normal file
View File

@@ -0,0 +1,75 @@
# Default values for Rancher.
# This is a YAML-formatted file.
# Rancher server hostname (required for production)
hostname: ''
# Number of Rancher server replicas
replicas: 3
# Image configuration
image:
repository: rancher/rancher
tag: v2.9.2
pullPolicy: IfNotPresent
# Ingress configuration
ingress:
enabled: true
includeDefaultExtraAnnotations: true
extraAnnotations: {}
# ingressClassName: nginx
tls:
# Options: rancher, letsEncrypt, secret
source: rancher
# secretName: tls-rancher-ingress
# Service configuration
service:
type: ClusterIP
# type: LoadBalancer
# Resource limits and requests
resources:
limits:
cpu: 2000m
memory: 2Gi
requests:
cpu: 500m
memory: 1Gi
# Audit Log configuration
auditLog:
enabled: false
# level: 1
# maxAge: 10
# maxBackup: 10
# maxSize: 100
# path: /var/log/auditlog/rancher-api-audit.log
# Extra environment variables
extraEnv: []
# - name: CATTLE_SERVER_URL
# value: "https://rancher.example.com"
# Bootstrap password (auto-generated if not set)
# bootstrapPassword: "admin"
# Private CA certificates
privateCA: false
# cacerts: ""
# Additional trusted CAs
additionalTrustedCAs: false
# Debug mode
debug: false
# Node selector
nodeSelector: {}
# Tolerations
tolerations: []
# Affinity
affinity: {}

View File

@@ -11,6 +11,5 @@ HELM_OCI_PASSWORD ?=
HELM_REPO_NAME ?=
HELM_REPO_URL ?=
HELM_CHART_REPO ?= oci://$(HELM_OCI_REGISTRY)/$(HELM_OCI_NAMESPACE)/$(HELM_APPLICATION_NAME)
HELM_LANE ?=
include ../_template/base.mk

15
src/sealos/Makefile Normal file
View File

@@ -0,0 +1,15 @@
HELM_RELEASE_NAME ?= sealos
HELM_APPLICATION_NAME ?= sealos
HELM_NAMESPACE ?= sealos
HELM_DIR ?= ./helm
HELM_CHART_VERSION ?=
HELM_VALUES_FILE ?= ./values.yaml
HELM_OCI_REGISTRY ?=
HELM_OCI_NAMESPACE ?=
HELM_OCI_USERNAME ?=
HELM_OCI_PASSWORD ?=
HELM_REPO_NAME ?= seal-io
HELM_REPO_URL ?= https://seal-io.github.io/helm-charts
HELM_CHART_REPO ?= seal-io/sealos
include ../_template/base.mk

80
src/sealos/README.md Normal file
View File

@@ -0,0 +1,80 @@
# Sealos
## Introduction
Sealos is a cloud-native Kubernetes distribution and cluster management platform that provides:
- **Kubernetes Lifecycle Management**: Easy installation and management of Kubernetes clusters
- **Application Management**: Deploy and manage applications with a simple command
- **Cloud Shell**: Built-in cloud shell for cluster operations
- **Multi-cluster Management**: Manage multiple Kubernetes clusters from a single interface
- **App Store**: Built-in marketplace for popular applications
## Installation
To install Sealos, run:
```bash
make install
```
This will deploy Sealos in the `sealos` namespace.
## Prerequisites
- Kubernetes v1.27+
- Helm v3.14+
- Cluster-admin privileges
## Usage
After installation, verify the deployment:
```bash
kubectl get pods -n sealos
```
To access the Sealos console:
```bash
kubectl get svc -n sealos
```
If you are using LoadBalancer service type, get the external IP:
```bash
kubectl get svc sealos -n sealos
```
Or use port-forward for local access:
```bash
kubectl port-forward -n sealos svc/sealos 8080:6443
```
Then visit: `http://localhost:8080`
## Configuration
You can customize the installation by modifying the `values.yaml` file:
- `image.repository` and `image.tag`: Change the Sealos image
- `persistence`: Configure persistent storage
- `service.type`: Change service type (ClusterIP, NodePort, LoadBalancer)
- `auth.enableTLS`: Enable TLS for the API endpoint
- `ingress`: Configure ingress for external access
## Uninstall
To uninstall Sealos:
```bash
make uninstall
```
## Documentation
For more information, please refer to the official documentation:
- [Sealos Documentation](https://sealos.io/docs/)
- [Helm Chart Repository](https://github.com/seal-io/helm-charts)

80
src/sealos/README.zh.md Normal file
View File

@@ -0,0 +1,80 @@
# Sealos
## 简介
Sealos 是一个云原生 Kubernetes 发行版和集群管理平台,提供以下功能:
- **Kubernetes 生命周期管理**:轻松安装和管理 Kubernetes 集群
- **应用管理**:使用简单命令部署和管理应用程序
- **云终端**:内置云终端用于集群操作
- **多集群管理**:从单一界面管理多个 Kubernetes 集群
- **应用商店**:内置流行应用市场
## 安装
要安装 Sealos请运行
```bash
make install
```
这将在 `sealos` 命名空间中部署 Sealos。
## 前置条件
- Kubernetes v1.27+
- Helm v3.14+
- 集群管理员权限
## 使用
安装完成后,验证部署状态:
```bash
kubectl get pods -n sealos
```
要访问 Sealos 控制台:
```bash
kubectl get svc -n sealos
```
如果您使用的是 LoadBalancer 服务类型,获取外部 IP
```bash
kubectl get svc sealos -n sealos
```
或者使用 port-forward 进行本地访问:
```bash
kubectl port-forward -n sealos svc/sealos 8080:6443
```
然后访问:`http://localhost:8080`
## 配置
您可以通过修改 `values.yaml` 文件来自定义安装:
- `image.repository``image.tag`:更改 Sealos 镜像
- `persistence`:配置持久化存储
- `service.type`更改服务类型ClusterIP、NodePort、LoadBalancer
- `auth.enableTLS`:为 API 端点启用 TLS
- `ingress`:配置外部访问的 ingress
## 卸载
要卸载 Sealos
```bash
make uninstall
```
## 文档
更多信息请参阅官方文档:
- [Sealos 文档](https://sealos.io/docs/)
- [Helm Chart 仓库](https://github.com/seal-io/helm-charts)

55
src/sealos/values.yaml Normal file
View File

@@ -0,0 +1,55 @@
# Sealos Helm Chart Values
# https://github.com/seal-io/helm-charts
# Image configuration
image:
repository: sealos
tag: ''
pullPolicy: IfNotPresent
# Service configuration
service:
type: ClusterIP
port: 6443
# Persistence configuration
persistence:
enabled: true
storageClass: ''
accessMode: ReadWriteOnce
size: 10Gi
# Authentication configuration
auth:
enableTLS: false
tlsSecret: ''
# Resource configuration
resources:
limits:
cpu: '2'
memory: 4Gi
requests:
cpu: '0.5'
memory: 1Gi
# Node selector
nodeSelector: {}
# Tolerations
tolerations: []
# Affinity
affinity: {}
# Ingress configuration
ingress:
enabled: false
className: ''
annotations: {}
hosts:
- host: sealos.local
paths:
- path: /
pathType: Prefix
tls: []

View File

@@ -11,6 +11,5 @@ HELM_OCI_PASSWORD ?=
HELM_REPO_NAME ?=
HELM_REPO_URL ?=
HELM_CHART_REPO ?= oci://$(HELM_OCI_REGISTRY)/$(HELM_OCI_NAMESPACE)/$(HELM_APPLICATION_NAME)
HELM_LANE ?=
include ../_template/base.mk

View File

@@ -11,6 +11,5 @@ HELM_OCI_PASSWORD ?=
HELM_REPO_NAME ?= hashicorp
HELM_REPO_URL ?= https://helm.releases.hashicorp.com
HELM_CHART_REPO ?= $(HELM_REPO_NAME)/$(HELM_APPLICATION_NAME)
HELM_LANE ?=
include ../_template/base.mk

15
src/vcluster/Makefile Normal file
View File

@@ -0,0 +1,15 @@
HELM_RELEASE_NAME ?= vcluster
HELM_APPLICATION_NAME ?= vcluster
HELM_NAMESPACE ?= vcluster
HELM_DIR ?= ./helm
HELM_CHART_VERSION ?=
HELM_VALUES_FILE ?= ./values.yaml
HELM_OCI_REGISTRY ?=
HELM_OCI_NAMESPACE ?=
HELM_OCI_USERNAME ?=
HELM_OCI_PASSWORD ?=
HELM_REPO_NAME ?= loft-sh
HELM_REPO_URL ?= https://charts.loft.sh
HELM_CHART_REPO ?= $(HELM_REPO_NAME)/$(HELM_APPLICATION_NAME)
include ../_template/base.mk

125
src/vcluster/README.md Normal file
View File

@@ -0,0 +1,125 @@
# vCluster
## Introduction
vCluster (Virtual Cluster) is an open-source tool that allows you to create and manage fully functional Kubernetes clusters within a namespace of another Kubernetes cluster. It provides strong isolation, reduced operational overhead, and better resource utilization compared to separate physical clusters.
## Features
- **Lightweight**: Run isolated Kubernetes clusters without the overhead of separate control planes
- **Strong Isolation**: Each vCluster has its own API server and control plane
- **Cost Effective**: Share infrastructure while maintaining isolation
- **RBAC & Network Policies**: Full support for native Kubernetes security features
- **Persistent Storage**: Support for persistent volumes and storage classes
- **Multi-Tenancy**: Ideal for development, testing, and CI/CD pipelines
## Prerequisites
- A working Kubernetes cluster (host cluster)
- kubectl configured to access the host cluster
- Default StorageClass configured (for persistent workloads)
## Installation
To install vCluster, run:
```bash
make install
```
## Usage
### Create a Virtual Cluster
After installing vCluster, create a virtual cluster:
```bash
# Create a virtual cluster named 'my-vcluster'
vcluster create my-vcluster
# Or specify a namespace
vcluster create my-vcluster -n vcluster
```
### Connect to a Virtual Cluster
```bash
# Connect to the virtual cluster (updates kubeconfig)
vcluster connect my-vcluster
# Or get the kubeconfig without switching
vcluster connect my-vcluster --update-current=false
```
### List Virtual Clusters
```bash
vcluster list
```
### Delete a Virtual Cluster
```bash
vcluster delete my-vcluster
```
## vCluster CLI Installation
Install the vCluster CLI tool:
```bash
# macOS
curl -L -o vcluster "https://github.com/loft-sh/vcluster/releases/latest/download/vcluster-darwin-amd64" && chmod +x vcluster && sudo mv vcluster /usr/local/bin
# Linux
curl -L -o vcluster "https://github.com/loft-sh/vcluster/releases/latest/download/vcluster-linux-amd64" && chmod +x vcluster && sudo mv vcluster /usr/local/bin
# Windows (PowerShell)
md -Force "$Env:APPDATA\vcluster"; [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.SecurityProtocolType]'Tls,Tls11,Tls12'; Invoke-WebRequest -URI "https://github.com/loft-sh/vcluster/releases/latest/download/vcluster-windows-amd64.exe" -outfile "$Env:APPDATA\vcluster\vcluster.exe"; $env:Path += ";" + $Env:APPDATA + "\vcluster"; [Environment]::SetEnvironmentVariable("Path", $env:Path, [EnvironmentVariableTarget]::User);
```
## Storage
vCluster supports:
- **Ephemeral Storage**: Data is lost when the vCluster is deleted
- **Persistent Storage**: Use PVCs for persistent workloads
- **HostPath**: Direct access to host node storage (not recommended for production)
## Networking
vCluster creates an isolated network within the host cluster:
- Each vCluster has its own ClusterIP services
- Ingress can be configured to route traffic to vCluster services
- Network policies can be used for additional isolation
## Configuration
Edit `values.yaml` to configure:
- vCluster version (Kubernetes version)
- Resource limits
- Storage backend
- Synced resources
- Isolation settings
## vCluster Platform (Optional)
For additional features like UI, multi-cluster management, and advanced networking:
```bash
helm upgrade vcluster-platform vcluster-platform \
--install \
--repo https://charts.loft.sh \
--namespace vcluster-platform \
--create-namespace
```
## Important Notes
1. vCluster creates lightweight, isolated Kubernetes clusters
2. The host cluster must have sufficient resources
3. Consider using vCluster Platform for enterprise features
4. Persistent storage requires proper StorageClass configuration
5. Network policies can enhance isolation between vClusters

125
src/vcluster/README.zh.md Normal file
View File

@@ -0,0 +1,125 @@
# vCluster
## 简介
vCluster虚拟集群是一个开源工具允许您在另一个 Kubernetes 集群的命名空间中创建和管理功能完整的 Kubernetes 集群。与单独的物理集群相比,它提供了强大的隔离性、减少的运营开销和更好的资源利用率。
## 功能
- **轻量级**: 无需单独控制平面的开销即可运行隔离的 Kubernetes 集群
- **强隔离**: 每个 vCluster 都有自己的 API server 和控制平面
- **成本效益**: 共享基础设施同时保持隔离
- **RBAC 和网络策略**: 完全支持原生 Kubernetes 安全功能
- **持久化存储**: 支持持久卷和存储类
- **多租户**: 适用于开发、测试和 CI/CD 流水线
## 前置条件
- 一个正常工作的 Kubernetes 集群(主机集群)
- 配置了 kubectl 以访问主机集群
- 配置了默认 StorageClass用于持久化工作负载
## 安装
安装 vCluster
```bash
make install
```
## 使用
### 创建虚拟集群
安装 vCluster 后,创建虚拟集群:
```bash
# 创建一个名为 'my-vcluster' 的虚拟集群
vcluster create my-vcluster
# 或指定命名空间
vcluster create my-vcluster -n vcluster
```
### 连接到虚拟集群
```bash
# 连接到虚拟集群(更新 kubeconfig
vcluster connect my-vcluster
# 或不切换的情况下获取 kubeconfig
vcluster connect my-vcluster --update-current=false
```
### 列出虚拟集群
```bash
vcluster list
```
### 删除虚拟集群
```bash
vcluster delete my-vcluster
```
## vCluster CLI 安装
安装 vCluster CLI 工具:
```bash
# macOS
curl -L -o vcluster "https://github.com/loft-sh/vcluster/releases/latest/download/vcluster-darwin-amd64" && chmod +x vcluster && sudo mv vcluster /usr/local/bin
# Linux
curl -L -o vcluster "https://github.com/loft-sh/vcluster/releases/latest/download/vcluster-linux-amd64" && chmod +x vcluster && sudo mv vcluster /usr/local/bin
# Windows (PowerShell)
md -Force "$Env:APPDATA\vcluster"; [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.SecurityProtocolType]'Tls,Tls11,Tls12'; Invoke-WebRequest -URI "https://github.com/loft-sh/vcluster/releases/latest/download/vcluster-windows-amd64.exe" -outfile "$Env:APPDATA\vcluster\vcluster.exe"; $env:Path += ";" + $Env:APPDATA + "\vcluster"; [Environment]::SetEnvironmentVariable("Path", $env:Path, [EnvironmentVariableTarget]::User);
```
## 存储
vCluster 支持:
- **临时存储**: vCluster 删除时数据丢失
- **持久化存储**: 对持久化工作负载使用 PVC
- **HostPath**: 直接访问主机节点存储(不推荐用于生产环境)
## 网络
vCluster 在主机集群中创建隔离的网络:
- 每个 vCluster 都有自己的 ClusterIP 服务
- 可以配置 Ingress 将流量路由到 vCluster 服务
- 可以使用网络策略进行额外的隔离
## 配置
编辑 `values.yaml` 以配置:
- vCluster 版本Kubernetes 版本)
- 资源限制
- 存储后端
- 同步资源
- 隔离设置
## vCluster Platform可选
对于额外的功能,如 UI、多集群管理和高级网络
```bash
helm upgrade vcluster-platform vcluster-platform \
--install \
--repo https://charts.loft.sh \
--namespace vcluster-platform \
--create-namespace
```
## 重要提示
1. vCluster 创建轻量级、隔离的 Kubernetes 集群
2. 主机集群必须有足够的资源
3. 考虑使用 vCluster Platform 获取企业级功能
4. 持久化存储需要正确的 StorageClass 配置
5. 网络策略可以增强 vCluster 之间的隔离

128
src/vcluster/values.yaml Normal file
View File

@@ -0,0 +1,128 @@
# Default values for vCluster.
# This is a YAML-formatted file.
# vCluster version (Kubernetes version to use)
vcluster:
image: rancher/k3s:v1.30.2-k3s2
# Control plane configuration
controlPlane:
distro:
k3s:
enabled: true
# k3s version
image: rancher/k3s:v1.30.2-k3s2
# Backing store configuration
backingStore:
etcd:
enabled: true
# embeddedEtcd:
# enabled: false
# API server configuration
coredns:
enabled: true
# replicas: 1
# Sync configuration
sync:
# From host to virtual cluster
toHost:
pods:
enabled: true
secrets:
enabled: true
all: false
configmaps:
enabled: true
all: false
endpoints:
enabled: true
persistentVolumeClaims:
enabled: true
ingresses:
enabled: true
services:
enabled: true
# From virtual cluster to host
fromHost:
nodes:
enabled: false
# selector: ""
events:
enabled: true
storageClasses:
enabled: true
# Networking configuration
networking:
# Replicate services from host to virtual cluster
replicateServices:
fromHost: []
toHost: []
# Advanced cluster settings
advanced:
proxyKubelets:
byHostname: false
byIP: false
# Resource limits
resources:
limits:
cpu: 1000m
memory: 1Gi
requests:
cpu: 200m
memory: 256Mi
# RBAC configuration
rbac:
clusterRole:
create: true
# Service account
serviceAccount:
create: true
name: ''
# Security context
security:
podSecurityStandard: baseline
# Run as user and group
podSecurityContext: {}
containerSecurityContext: {}
# Persistence configuration
persistence:
enabled: true
size: 5Gi
# storageClass: ""
# Service configuration
service:
type: ClusterIP
# Ingress configuration
ingress:
enabled: false
# host: vcluster.example.com
# annotations: {}
# tls: []
# Multi-namespace mode
multiNamespaceMode:
enabled: false
# Isolation settings
isolation:
enabled: false
podSecurityStandard: baseline
# nodeProxyPermission:
# enabled: true
# Telemetry
telemetry:
disabled: false

View File

@@ -11,6 +11,5 @@ HELM_OCI_PASSWORD ?=
HELM_REPO_NAME ?=
HELM_REPO_URL ?=
HELM_CHART_REPO ?= oci://$(HELM_OCI_REGISTRY)/$(HELM_OCI_NAMESPACE)/$(HELM_APPLICATION_NAME)
HELM_LANE ?=
include ../_template/base.mk