init repo

This commit is contained in:
Sun-ZhenXing
2026-02-16 09:12:50 +08:00
commit 3360ca01a2
71 changed files with 1934 additions and 0 deletions

14
src/kafka/Makefile Normal file
View File

@@ -0,0 +1,14 @@
HELM_RELEASE_NAME ?= strimzi-kafka-operator
HELM_APPLICATION_NAME ?= strimzi-kafka-operator
HELM_NAMESPACE ?= kafka
HELM_DIR ?= ./helm
HELM_CHART_VERSION ?= 0.50.0
HELM_VALUES_FILE ?= ./values.yaml
HELM_OCI_REGISTRY ?= docker.io
HELM_OCI_NAMESPACE ?=
HELM_REPO_NAME ?= strimzi
HELM_REPO_URL ?= https://strimzi.io/charts/
HELM_CHART_REPO ?= $(HELM_REPO_NAME)/$(HELM_APPLICATION_NAME)
HELM_LANE ?=
include ../_template/base.mk

27
src/kafka/README.md Normal file
View File

@@ -0,0 +1,27 @@
# Kafka
## Introduction
Apache Kafka is an open-source distributed event streaming platform used for high-performance data pipelines, streaming analytics, data integration, and mission-critical applications.
## Installation
To install Kafka, run:
```bash
make install
```
## Usage
After installation, verify the deployment:
```bash
kubectl get pods -n kafka
```
To produce and consume messages, use Kafka tools:
```bash
kubectl -n kafka exec -it kafka-cluster-kafka-0 -- kafka-console-producer.sh --broker-list kafka-cluster-kafka-bootstrap:9092 --topic test
```

27
src/kafka/README.zh.md Normal file
View File

@@ -0,0 +1,27 @@
# Kafka
## 简介
Apache Kafka 是一个开源的分布式事件流平台,用于高性能数据管道、流分析、数据集成和关键任务应用。
## 安装
要安装 Kafka请运行
```bash
make install
```
## 使用
安装后,验证部署:
```bash
kubectl get pods -n kafka
```
要生产和消费消息,使用 Kafka 工具:
```bash
kubectl -n kafka exec -it kafka-cluster-kafka-0 -- kafka-console-producer.sh --broker-list kafka-cluster-kafka-bootstrap:9092 --topic test
```

View File

@@ -0,0 +1,68 @@
apiVersion: kafka.strimzi.io/v1beta2
kind: Kafka
metadata:
name: kafka-cluster
spec:
kafka:
version: 3.6.0
replicas: 3
listeners:
- name: plain
port: 9092
type: internal
tls: false
- name: tls
port: 9093
type: internal
tls: true
config:
offsets.topic.replication.factor: 3
transaction.state.log.replication.factor: 3
transaction.state.log.min.isr: 2
default.replication.factor: 3
min.insync.replicas: 2
inter.broker.protocol.version: "3.6"
storage:
type: jbod
volumes:
- id: 0
type: persistent-claim
size: 100Gi
deleteClaim: false
resources:
requests:
memory: 2Gi
cpu: "1"
limits:
memory: 2Gi
cpu: "2"
zookeeper:
replicas: 3
storage:
type: persistent-claim
size: 10Gi
deleteClaim: false
resources:
requests:
memory: 1Gi
cpu: "0.5"
limits:
memory: 1Gi
cpu: "1"
entityOperator:
topicOperator:
resources:
requests:
memory: 512Mi
cpu: "0.2"
limits:
memory: 512Mi
cpu: "0.5"
userOperator:
resources:
requests:
memory: 512Mi
cpu: "0.2"
limits:
memory: 512Mi
cpu: "0.5"

12
src/kafka/values.yaml Normal file
View File

@@ -0,0 +1,12 @@
watchNamespaces: []
resources:
limits:
cpu: 500m
memory: 512Mi
requests:
cpu: 100m
memory: 128Mi
logLevel: INFO
generateNetworkPolicy: false