feat: add more services

This commit is contained in:
Sun-ZhenXing
2025-10-02 17:46:58 +08:00
parent 30014852ca
commit f330e00fa0
24 changed files with 1489 additions and 0 deletions

93
src/kafka/README.md Normal file
View File

@@ -0,0 +1,93 @@
# Apache Kafka
[English](./README.md) | [中文](./README.zh.md)
This service deploys Apache Kafka, a distributed streaming platform, along with Zookeeper and optional Kafka UI.
## Services
- `zookeeper`: Zookeeper service for Kafka coordination.
- `kafka`: The Kafka broker service.
- `kafka-ui`: Optional web UI for Kafka management (profile: `ui`).
## Environment Variables
| Variable Name | Description | Default Value |
| -------------------------------- | ---------------------------------------------------- | --------------- |
| KAFKA_VERSION | Kafka image version | `7.8.0` |
| KAFKA_UI_VERSION | Kafka UI image version | `latest` |
| ZOOKEEPER_CLIENT_PORT_OVERRIDE | Host port mapping for Zookeeper (maps to port 2181) | 2181 |
| KAFKA_BROKER_PORT_OVERRIDE | Host port mapping for Kafka (maps to port 9092) | 9092 |
| KAFKA_JMX_PORT_OVERRIDE | Host port mapping for JMX (maps to port 9999) | 9999 |
| KAFKA_UI_PORT_OVERRIDE | Host port mapping for Kafka UI (maps to port 8080) | 8080 |
| KAFKA_NUM_PARTITIONS | Default number of partitions for auto-created topics | 3 |
| KAFKA_DEFAULT_REPLICATION_FACTOR | Default replication factor | 1 |
| KAFKA_AUTO_CREATE_TOPICS_ENABLE | Enable automatic topic creation | `true` |
| KAFKA_DELETE_TOPIC_ENABLE | Enable topic deletion | `true` |
| KAFKA_LOG_RETENTION_HOURS | Log retention time in hours | 168 |
| KAFKA_LOG_SEGMENT_BYTES | Log segment size in bytes | 1073741824 |
| KAFKA_HEAP_OPTS | JVM heap options for Kafka | `-Xmx1G -Xms1G` |
| KAFKA_UI_READONLY | Set Kafka UI to readonly mode | `false` |
Please modify the `.env` file as needed for your use case.
## Volumes
- `zookeeper_data`: Zookeeper data directory.
- `zookeeper_log`: Zookeeper log directory.
- `kafka_data`: Kafka data directory.
## Usage
1. Start Kafka with Zookeeper:
```bash
docker compose up -d
```
2. Start with Kafka UI (optional):
```bash
docker compose --profile ui up -d
```
3. Access Kafka UI at `http://localhost:8080` (if enabled).
## Testing Kafka
1. Create a topic:
```bash
docker exec kafka kafka-topics --create --topic test-topic --bootstrap-server localhost:9092 --partitions 3 --replication-factor 1
```
2. List topics:
```bash
docker exec kafka kafka-topics --list --bootstrap-server localhost:9092
```
3. Produce messages:
```bash
docker exec -it kafka kafka-console-producer --topic test-topic --bootstrap-server localhost:9092
```
4. Consume messages:
```bash
docker exec -it kafka kafka-console-consumer --topic test-topic --from-beginning --bootstrap-server localhost:9092
```
## Configuration
- Kafka is configured for single-node deployment by default
- For production, consider adjusting replication factor and other settings
- Custom Kafka configuration can be added via environment variables
## Security Notes
- This configuration is for development/testing purposes
- For production, enable SSL/SASL authentication
- Secure Zookeeper communication
- Regularly update Kafka version for security patches

View File

@@ -0,0 +1,122 @@
x-default: &default
restart: unless-stopped
volumes:
- &localtime /etc/localtime:/etc/localtime:ro
- &timezone /etc/timezone:/etc/timezone:ro
logging:
driver: json-file
options:
max-size: 100m
services:
# Zookeeper for Kafka coordination
zookeeper:
<<: *default
image: confluentinc/cp-zookeeper:${KAFKA_VERSION:-7.8.0}
container_name: zookeeper
ports:
- "${ZOOKEEPER_CLIENT_PORT_OVERRIDE:-2181}:2181"
volumes:
- *localtime
- *timezone
- zookeeper_data:/var/lib/zookeeper/data
- zookeeper_log:/var/lib/zookeeper/log
environment:
- ZOOKEEPER_CLIENT_PORT=2181
- ZOOKEEPER_TICK_TIME=2000
- ZOOKEEPER_SYNC_LIMIT=5
- ZOOKEEPER_INIT_LIMIT=10
- ZOOKEEPER_MAX_CLIENT_CNXNS=60
- ZOOKEEPER_AUTOPURGE_SNAP_RETAIN_COUNT=3
- ZOOKEEPER_AUTOPURGE_PURGE_INTERVAL=24
deploy:
resources:
limits:
cpus: '1.00'
memory: 1G
reservations:
cpus: '0.25'
memory: 256M
# Kafka broker
kafka:
<<: *default
image: confluentinc/cp-kafka:${KAFKA_VERSION:-7.8.0}
container_name: kafka
depends_on:
- zookeeper
ports:
- "${KAFKA_BROKER_PORT_OVERRIDE:-9092}:9092"
- "${KAFKA_JMX_PORT_OVERRIDE:-9999}:9999"
volumes:
- *localtime
- *timezone
- kafka_data:/var/lib/kafka/data
environment:
- KAFKA_BROKER_ID=1
- KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181
- KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://localhost:9092
- KAFKA_LISTENER_SECURITY_PROTOCOL_MAP=PLAINTEXT:PLAINTEXT
- KAFKA_INTER_BROKER_LISTENER_NAME=PLAINTEXT
- KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR=1
- KAFKA_TRANSACTION_STATE_LOG_MIN_ISR=1
- KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR=1
- KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS=0
- KAFKA_NUM_PARTITIONS=${KAFKA_NUM_PARTITIONS:-3}
- KAFKA_DEFAULT_REPLICATION_FACTOR=${KAFKA_DEFAULT_REPLICATION_FACTOR:-1}
- KAFKA_AUTO_CREATE_TOPICS_ENABLE=${KAFKA_AUTO_CREATE_TOPICS_ENABLE:-true}
- KAFKA_DELETE_TOPIC_ENABLE=${KAFKA_DELETE_TOPIC_ENABLE:-true}
- KAFKA_LOG_RETENTION_HOURS=${KAFKA_LOG_RETENTION_HOURS:-168}
- KAFKA_LOG_SEGMENT_BYTES=${KAFKA_LOG_SEGMENT_BYTES:-1073741824}
- KAFKA_LOG_RETENTION_CHECK_INTERVAL_MS=300000
- KAFKA_JMX_PORT=9999
- KAFKA_JMX_HOSTNAME=localhost
- KAFKA_HEAP_OPTS=${KAFKA_HEAP_OPTS:--Xmx1G -Xms1G}
deploy:
resources:
limits:
cpus: '2.00'
memory: 2G
reservations:
cpus: '0.50'
memory: 1G
healthcheck:
test: ["CMD-SHELL", "kafka-broker-api-versions --bootstrap-server localhost:9092"]
interval: 30s
timeout: 10s
retries: 5
start_period: 60s
# Kafka UI (optional)
kafka-ui:
<<: *default
image: provectuslabs/kafka-ui:${KAFKA_UI_VERSION:-latest}
container_name: kafka-ui
depends_on:
- kafka
- zookeeper
ports:
- "${KAFKA_UI_PORT_OVERRIDE:-8080}:8080"
volumes:
- *localtime
- *timezone
environment:
- KAFKA_CLUSTERS_0_NAME=local
- KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS=kafka:9092
- KAFKA_CLUSTERS_0_ZOOKEEPER=zookeeper:2181
- KAFKA_CLUSTERS_0_READONLY=${KAFKA_UI_READONLY:-false}
deploy:
resources:
limits:
cpus: '0.50'
memory: 512M
reservations:
cpus: '0.10'
memory: 128M
profiles:
- ui
volumes:
zookeeper_data:
zookeeper_log:
kafka_data: