Compare commits

..

6 Commits

Author SHA1 Message Date
Alex Sun
72700c4db0 Merge pull request #2 from Sun-ZhenXing/dev
feat: add opik & agentgateway
2026-02-28 17:35:04 +08:00
Sun-ZhenXing
343aac9aed feat: update Makefiles to use dynamic chart repository names and URLs 2026-02-27 18:07:15 +08:00
Sun-ZhenXing
4773d8bc7c feat: add opik 2026-02-27 09:36:17 +08:00
Sun-ZhenXing
ce2e1c4f4f feat: add agentgateway template and update kgateway template 2026-02-25 14:54:54 +08:00
Alex Sun
d39d336e2c Merge pull request #1 from Sun-ZhenXing/dev
feat: add more services
2026-02-23 18:11:32 +08:00
Sun-ZhenXing
530e6d7f5d feat: add more services 2026-02-23 18:10:02 +08:00
70 changed files with 4054 additions and 23 deletions

1
.gitignore vendored
View File

@@ -3,7 +3,6 @@ node_modules/
# Ignore Helm chart default values files
*-values.yaml
values-*.yaml
# Env
.env

View File

@@ -1,5 +1,6 @@
{
"recommendations": [
"yzhang.markdown-all-in-one",
"dbaeumer.vscode-eslint"
]
}

21
LICENSE Normal file
View File

@@ -0,0 +1,21 @@
MIT License
Copyright (c) 2025 Alex Sun
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@@ -14,10 +14,18 @@
"scripts": {
"lint": "eslint .",
"lint:fix": "eslint . --fix",
"test": "echo \"Error: no test specified\" && exit 1"
"prepare": "simple-git-hooks"
},
"devDependencies": {
"@antfu/eslint-config": "^7.4.3",
"eslint": "^10.0.1"
"eslint": "^10.0.1",
"lint-staged": "^16.2.7",
"simple-git-hooks": "^2.13.1"
},
"simple-git-hooks": {
"pre-commit": "pnpm lint-staged"
},
"lint-staged": {
"*": "pnpm lint:fix"
}
}

274
pnpm-lock.yaml generated
View File

@@ -14,6 +14,12 @@ importers:
eslint:
specifier: ^10.0.1
version: 10.0.1
lint-staged:
specifier: ^16.2.7
version: 16.2.7
simple-git-hooks:
specifier: ^2.13.1
version: 2.13.1
packages:
@@ -337,6 +343,18 @@ packages:
ajv@6.14.0:
resolution: {integrity: sha512-IWrosm/yrn43eiKqkfkHis7QioDleaXQHdDVPKg0FSwwd/DuvyX79TZnFOnYpB7dcsFAMmtFztZuXPDvSePkFw==}
ansi-escapes@7.3.0:
resolution: {integrity: sha512-BvU8nYgGQBxcmMuEeUEmNTvrMVjJNSH7RgW24vXexN4Ven6qCvy4TntnvlnwnMLTVlcRQQdbRY8NKnaIoeWDNg==}
engines: {node: '>=18'}
ansi-regex@6.2.2:
resolution: {integrity: sha512-Bq3SmSpyFHaWjPk8If9yc6svM8c56dB5BAtW4Qbw5jHTwwXXcTLoRMkpDJp6VL0XzlWaCHTXrkFURMYmD0sLqg==}
engines: {node: '>=12'}
ansi-styles@6.2.3:
resolution: {integrity: sha512-4Dj6M28JB+oAH8kFkTLUo+a2jwOFkuqb3yucU0CANcRRUbxS0cP0nZYCGjcc3BNXwRIsUVmDGgzawme7zvJHvg==}
engines: {node: '>=12'}
ansis@4.2.0:
resolution: {integrity: sha512-HqZ5rWlFjGiV0tDm3UxxgNRqsOTniqoKZu0pIAfh7TZQMGuZK+hH0drySty0si0QXj1ieop4+SkSfPZBPPkHig==}
engines: {node: '>=14'}
@@ -361,6 +379,10 @@ packages:
resolution: {integrity: sha512-fy6KJm2RawA5RcHkLa1z/ScpBeA762UF9KmZQxwIbDtRJrgLzM10depAiEQ+CXYcoiqW1/m96OAAoke2nE9EeA==}
engines: {node: 18 || 20 || >=22}
braces@3.0.3:
resolution: {integrity: sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==}
engines: {node: '>=8'}
browserslist@4.28.1:
resolution: {integrity: sha512-ZC5Bd0LgJXgwGqUknZY/vkUQ04r8NXnJZ3yYi4vDmSiZmC/pdSN0NbNRPxZpbtO4uAfDUAFffO8IZoM3Gj8IkA==}
engines: {node: ^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7}
@@ -394,6 +416,21 @@ packages:
resolution: {integrity: sha512-GfisEZEJvzKrmGWkvfhgzcz/BllN1USeqD2V6tg14OAOgaCD2Z/PUEuxnAZ/nPvmaHRG7a8y77p1T/IRQ4D1Hw==}
engines: {node: '>=4'}
cli-cursor@5.0.0:
resolution: {integrity: sha512-aCj4O5wKyszjMmDT4tZj93kxyydN/K5zPWSCe6/0AV/AA1pqe5ZBIw0a2ZfPQV7lL5/yb5HsUreJ6UFAF1tEQw==}
engines: {node: '>=18'}
cli-truncate@5.1.1:
resolution: {integrity: sha512-SroPvNHxUnk+vIW/dOSfNqdy1sPEFkrTk6TUtqLCnBlo3N7TNYYkzzN7uSD6+jVjrdO4+p8nH7JzH6cIvUem6A==}
engines: {node: '>=20'}
colorette@2.0.20:
resolution: {integrity: sha512-IfEDxwoWIjkeXL1eXcDiow4UbKjhLdq6/EuSVR9GMN7KVH3r9gQ83e73hsz1Nd1T3ijd5xv1wcWRYO+D6kCI2w==}
commander@14.0.3:
resolution: {integrity: sha512-H+y0Jo/T1RZ9qPP4Eh1pkcQcLRglraJaSLoyOtHxu6AapkjWVCy2Sit1QQ4x3Dng8qDlSsZEet7g5Pq06MvTgw==}
engines: {node: '>=20'}
comment-parser@1.4.1:
resolution: {integrity: sha512-buhp5kePrmda3vhc5B9t7pUQXAb2Tnd0qgpkIhPhkHXxJpiPJ11H0ZEU0oBpJ2QztSbzG/ZxMj/CHsYJqRHmyg==}
engines: {node: '>= 12.0.0'}
@@ -453,6 +490,9 @@ packages:
electron-to-chromium@1.5.302:
resolution: {integrity: sha512-sM6HAN2LyK82IyPBpznDRqlTQAtuSaO+ShzFiWTvoMJLHyZ+Y39r8VMfHzwbU8MVBzQ4Wdn85+wlZl2TLGIlwg==}
emoji-regex@10.6.0:
resolution: {integrity: sha512-toUI84YS5YmxW219erniWD0CIVOo46xGKColeNQRgOzDorgBi1v4D71/OFzgD9GO2UGKIv1C3Sp8DAn0+j5w7A==}
empathic@2.0.0:
resolution: {integrity: sha512-i6UzDscO/XfAcNYD75CfICkmfLedpyPDdozrLMmQc5ORaQcdMoc21OnlEylMIqI7U8eniKrPMxxtj8k0vhmJhA==}
engines: {node: '>=14'}
@@ -465,6 +505,10 @@ packages:
resolution: {integrity: sha512-TWrgLOFUQTH994YUyl1yT4uyavY5nNB5muff+RtWaqNVCAK408b5ZnnbNAUEWLTCpum9w6arT70i1XdQ4UeOPA==}
engines: {node: '>=0.12'}
environment@1.1.0:
resolution: {integrity: sha512-xUtoPkMggbz0MPyPiIWr1Kp4aeWJjDZ6SMvURhimjdZgsRuDplF5/s9hcgGhyXMhs+6vpnuoiZ2kFiu3FMnS8Q==}
engines: {node: '>=18'}
escalade@3.2.0:
resolution: {integrity: sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==}
engines: {node: '>=6'}
@@ -682,6 +726,9 @@ packages:
resolution: {integrity: sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==}
engines: {node: '>=0.10.0'}
eventemitter3@5.0.4:
resolution: {integrity: sha512-mlsTRyGaPBjPedk6Bvw+aqbsXDtoAyAzm5MO7JgU+yVRyMQ5O8bD4Kcci7BS85f93veegeCPkL8R4GLClnjLFw==}
exsolve@1.0.8:
resolution: {integrity: sha512-LmDxfWXwcTArk8fUEnOfSZpHOJ6zOMUJKOtFLFqJLoKJetuQG874Uc7/Kki7zFLzYybmZhp1M7+98pfMqeX8yA==}
@@ -710,6 +757,10 @@ packages:
resolution: {integrity: sha512-XXTUwCvisa5oacNGRP9SfNtYBNAMi+RPwBFmblZEF7N7swHYQS6/Zfk7SRwx4D5j3CH211YNRco1DEMNVfZCnQ==}
engines: {node: '>=16.0.0'}
fill-range@7.1.1:
resolution: {integrity: sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==}
engines: {node: '>=8'}
find-up-simple@1.0.1:
resolution: {integrity: sha512-afd4O7zpqHeRyg4PfDQsXmlDe2PfdHtJt6Akt8jOWaApLOZk5JXs6VMR29lz03pRe9mpykrRCYIYxaJYcfpncQ==}
engines: {node: '>=18'}
@@ -729,6 +780,10 @@ packages:
resolution: {integrity: sha512-wzsgA6WOq+09wrU1tsJ09udeR/YZRaeArL9e1wPbFg3GG2yDnC2ldKpxs4xunpFF9DgqCqOIra3bc1HWrJ37Ww==}
engines: {node: '>=0.4.x'}
get-east-asian-width@1.5.0:
resolution: {integrity: sha512-CQ+bEO+Tva/qlmw24dCejulK5pMzVnUOFOijVogd3KQs07HnRIgp8TGipvCCRT06xeYEbpbgwaCxglFyiuIcmA==}
engines: {node: '>=18'}
get-tsconfig@4.13.6:
resolution: {integrity: sha512-shZT/QMiSHc/YBLxxOkMtgSid5HFoauqCE3/exfsEcwg1WkeqjG+V40yBbBrsD+jW2HDXcs28xOfcbm2jI8Ddw==}
@@ -787,10 +842,18 @@ packages:
resolution: {integrity: sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==}
engines: {node: '>=0.10.0'}
is-fullwidth-code-point@5.1.0:
resolution: {integrity: sha512-5XHYaSyiqADb4RnZ1Bdad6cPp8Toise4TzEjcOYDHZkTCbKgiUl7WTUCpNWHuxmDt91wnsZBc9xinNzopv3JMQ==}
engines: {node: '>=18'}
is-glob@4.0.3:
resolution: {integrity: sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==}
engines: {node: '>=0.10.0'}
is-number@7.0.0:
resolution: {integrity: sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==}
engines: {node: '>=0.12.0'}
isexe@2.0.0:
resolution: {integrity: sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==}
@@ -827,6 +890,15 @@ packages:
resolution: {integrity: sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==}
engines: {node: '>= 0.8.0'}
lint-staged@16.2.7:
resolution: {integrity: sha512-lDIj4RnYmK7/kXMya+qJsmkRFkGolciXjrsZ6PC25GdTfWOAWetR0ZbsNXRAj1EHHImRSalc+whZFg56F5DVow==}
engines: {node: '>=20.17'}
hasBin: true
listr2@9.0.5:
resolution: {integrity: sha512-ME4Fb83LgEgwNw96RKNvKV4VTLuXfoKudAmm2lP8Kk87KaMK0/Xrx/aAkMWmT8mDb+3MlFDspfbCs7adjRxA2g==}
engines: {node: '>=20.0.0'}
local-pkg@1.1.2:
resolution: {integrity: sha512-arhlxbFRmoQHl33a0Zkle/YWlmNwoyt6QNZEIJcqNbdrsix5Lvc4HyyI3EnwxTYlZYc32EbYrQ8SzEZ7dqgg9A==}
engines: {node: '>=14'}
@@ -835,6 +907,10 @@ packages:
resolution: {integrity: sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==}
engines: {node: '>=10'}
log-update@6.1.0:
resolution: {integrity: sha512-9ie8ItPR6tjY5uYJh8K/Zrv/RMZ5VOlOWvtZdEHYSTFKZfIBPQa9tOAEeAWhd+AnIneLJ22w5fjOYtoutpWq5w==}
engines: {node: '>=18'}
longest-streak@3.1.0:
resolution: {integrity: sha512-9Ri+o0JYgehTaVBBDoMqIl8GXtbWg711O3srftcHhZ0dqnETqLaoIK0x17fUw9rFSlK/0NlsKe0Ahhyl5pXE2g==}
@@ -967,6 +1043,14 @@ packages:
micromark@4.0.2:
resolution: {integrity: sha512-zpe98Q6kvavpCr1NPVSCMebCKfD7CA2NqZ+rykeNhONIJBpc1tFKt9hucLGwha3jNTNI8lHpctWJWoimVF4PfA==}
micromatch@4.0.8:
resolution: {integrity: sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==}
engines: {node: '>=8.6'}
mimic-function@5.0.1:
resolution: {integrity: sha512-VP79XUPxV2CigYP3jWwAUFSku2aKqBH7uTAapFWCBqutsbmDo96KY5o8uh6U+/YSIn5OxJnXp73beVkpqMIGhA==}
engines: {node: '>=18'}
minimatch@10.2.2:
resolution: {integrity: sha512-+G4CpNBxa5MprY+04MbgOw1v7So6n5JY166pFi9KfYwT78fxScCeSNQSNzp6dpPSW2rONOps6Ocam1wFhCgoVw==}
engines: {node: 18 || 20 || >=22}
@@ -981,6 +1065,10 @@ packages:
ms@2.1.3:
resolution: {integrity: sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==}
nano-spawn@2.0.0:
resolution: {integrity: sha512-tacvGzUY5o2D8CBh2rrwxyNojUsZNU2zjNTzKQrkgGJQTbGAfArVWXSKMBokBeeg6C7OLRGUEyoFlYbfeWQIqw==}
engines: {node: '>=20.17'}
nanoid@3.3.11:
resolution: {integrity: sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w==}
engines: {node: ^10 || ^12 || ^13.7 || ^14 || >=15.0.1}
@@ -1002,6 +1090,10 @@ packages:
object-deep-merge@2.0.0:
resolution: {integrity: sha512-3DC3UMpeffLTHiuXSy/UG4NOIYTLlY9u3V82+djSCLYClWobZiS4ivYzpIUWrRY/nfsJ8cWsKyG3QfyLePmhvg==}
onetime@7.0.0:
resolution: {integrity: sha512-VXJjc87FScF88uafS3JllDgvAm+c/Slfz06lorj2uAY34rlUu0Nt+v8wreiImcrgAjjIHp1rXpTDlLOGw29WwQ==}
engines: {node: '>=18'}
optionator@0.9.4:
resolution: {integrity: sha512-6IpQ7mKUxRcZNLIObR0hz7lxsapSSIYNZJwXPGeF0mTVqGKFIXj1DQcMoT22S3ROcLyY/rz0PWaWZ9ayWmad9g==}
engines: {node: '>= 0.8.0'}
@@ -1041,10 +1133,19 @@ packages:
picocolors@1.1.1:
resolution: {integrity: sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==}
picomatch@2.3.1:
resolution: {integrity: sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==}
engines: {node: '>=8.6'}
picomatch@4.0.3:
resolution: {integrity: sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==}
engines: {node: '>=12'}
pidtree@0.6.0:
resolution: {integrity: sha512-eG2dWTVw5bzqGRztnHExczNxt5VGsE6OwTeCG3fdUf9KBsZzO3R5OIIIzWR+iZA0NtZ+RDVdaoE2dK1cn6jH4g==}
engines: {node: '>=0.10'}
hasBin: true
pkg-types@1.3.1:
resolution: {integrity: sha512-/Jm5M4RvtBFVkKWRu2BLUTNP8/M2a+UwuAX+ae4770q1qVGtfjG+WTCupoZixokjmHiry8uI+dlY8KXYV5HVVQ==}
@@ -1100,6 +1201,13 @@ packages:
resolve-pkg-maps@1.0.0:
resolution: {integrity: sha512-seS2Tj26TBVOC2NIc2rOe2y2ZO7efxITtLZcGSOnHHNOQ7CkiUBfw0Iw2ck6xkIhPwLhKNLS8BO+hEpngQlqzw==}
restore-cursor@5.1.0:
resolution: {integrity: sha512-oMA2dcrw6u0YfxJQXm342bFKX/E4sG9rbTzO9ptUcR/e8A33cHuvStiYOwH7fszkZlZ1z/ta9AAoPk2F4qIOHA==}
engines: {node: '>=18'}
rfdc@1.4.1:
resolution: {integrity: sha512-q1b3N5QkRUWUl7iyylaaj3kOpIT0N2i9MqIEQXP73GVsN9cw3fdx8X63cEmWhJGi2PPCF23Ijp7ktmd39rawIA==}
scslre@0.3.0:
resolution: {integrity: sha512-3A6sD0WYP7+QrjbfNA2FN3FsOaGGFoekCVgTyypy53gPxhbkCIjtO6YWgdrfM+n/8sI8JeXZOIxsHjMTNxQ4nQ==}
engines: {node: ^14.0.0 || >=16.0.0}
@@ -1117,9 +1225,21 @@ packages:
resolution: {integrity: sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==}
engines: {node: '>=8'}
signal-exit@4.1.0:
resolution: {integrity: sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==}
engines: {node: '>=14'}
simple-git-hooks@2.13.1:
resolution: {integrity: sha512-WszCLXwT4h2k1ufIXAgsbiTOazqqevFCIncOuUBZJ91DdvWcC5+OFkluWRQPrcuSYd8fjq+o2y1QfWqYMoAToQ==}
hasBin: true
sisteransi@1.0.5:
resolution: {integrity: sha512-bLGGlR1QxBcynn2d5YmDX4MGjlZvy2MRBDRNHLJ8VI6l6+9FUiyTFNJ0IveOSP0bcXgVDPRcfGqA0pjaqUpfVg==}
slice-ansi@7.1.2:
resolution: {integrity: sha512-iOBWFgUX7caIZiuutICxVgX1SdxwAVFFKwt1EvMYYec/NWO5meOJ6K5uQxhrYBdQJne4KxiqZc+KptFOWFSI9w==}
engines: {node: '>=18'}
source-map-js@1.2.1:
resolution: {integrity: sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==}
engines: {node: '>=0.10.0'}
@@ -1133,6 +1253,22 @@ packages:
spdx-license-ids@3.0.23:
resolution: {integrity: sha512-CWLcCCH7VLu13TgOH+r8p1O/Znwhqv/dbb6lqWy67G+pT1kHmeD/+V36AVb/vq8QMIQwVShJ6Ssl5FPh0fuSdw==}
string-argv@0.3.2:
resolution: {integrity: sha512-aqD2Q0144Z+/RqG52NeHEkZauTAUWJO8c6yTftGJKO3Tja5tUgIfmIl6kExvhtxSDP7fXB6DvzkfMpCd/F3G+Q==}
engines: {node: '>=0.6.19'}
string-width@7.2.0:
resolution: {integrity: sha512-tsaTIkKW9b4N+AEj+SVA+WhJzV7/zMhcSu78mLKWSk7cXMOSHsBKFWUs0fWwq8QyK3MgJBQRX6Gbi4kYbdvGkQ==}
engines: {node: '>=18'}
string-width@8.2.0:
resolution: {integrity: sha512-6hJPQ8N0V0P3SNmP6h2J99RLuzrWz2gvT7VnK5tKvrNqJoyS9W4/Fb8mo31UiPvy00z7DQXkP2hnKBVav76thw==}
engines: {node: '>=20'}
strip-ansi@7.1.2:
resolution: {integrity: sha512-gmBGslpoQJtgnMAvOVqGZpEz9dyoKTCzy2nfz/n8aIFhN/jCE/rCmcxabB6jOOHV+0WNnylOxaxBQPSvcWklhA==}
engines: {node: '>=12'}
strip-indent@4.1.1:
resolution: {integrity: sha512-SlyRoSkdh1dYP0PzclLE7r0M9sgbFKKMFXpFRUMNuKhQSbC6VQIGzq3E0qsfvGJaUFJPGv6Ws1NZ/haTAjfbMA==}
engines: {node: '>=12'}
@@ -1153,6 +1289,10 @@ packages:
resolution: {integrity: sha512-j2Zq4NyQYG5XMST4cbs02Ak8iJUdxRM0XI5QyxXuZOzKOINmWurp3smXu3y5wDcJrptwpSjgXHzIQxR0omXljQ==}
engines: {node: '>=12.0.0'}
to-regex-range@5.0.1:
resolution: {integrity: sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==}
engines: {node: '>=8.0'}
to-valid-identifier@1.0.0:
resolution: {integrity: sha512-41wJyvKep3yT2tyPqX/4blcfybknGB4D+oETKLs7Q76UiPqRpUJK3hr1nxelyYO0PHKVzJwlu0aCeEAsGI6rpw==}
engines: {node: '>=20'}
@@ -1223,6 +1363,10 @@ packages:
resolution: {integrity: sha512-BN22B5eaMMI9UMtjrGd5g5eCYPpCPDUy0FJXbYsaT5zYxjFOckS53SQDE3pWkVoWpHXVb3BrYcEN4Twa55B5cA==}
engines: {node: '>=0.10.0'}
wrap-ansi@9.0.2:
resolution: {integrity: sha512-42AtmgqjV+X1VpdOfyTGOYRi0/zsoLqtXQckTmqTeybT+BDIbM/Guxo7x3pE2vtpr1ok6xRqM9OpBe+Jyoqyww==}
engines: {node: '>=18'}
xml-name-validator@4.0.0:
resolution: {integrity: sha512-ICP2e+jsHvAj2E2lIHxa5tjXRlKDJo4IdvPvCXbXQGdzSfmSpNVyIKMvoZHjDY9DP0zV17iI85o90vRFXNccRw==}
engines: {node: '>=12'}
@@ -1599,6 +1743,14 @@ snapshots:
json-schema-traverse: 0.4.1
uri-js: 4.4.1
ansi-escapes@7.3.0:
dependencies:
environment: 1.1.0
ansi-regex@6.2.2: {}
ansi-styles@6.2.3: {}
ansis@4.2.0: {}
are-docs-informative@0.0.2: {}
@@ -1613,6 +1765,10 @@ snapshots:
dependencies:
balanced-match: 4.0.4
braces@3.0.3:
dependencies:
fill-range: 7.1.1
browserslist@4.28.1:
dependencies:
baseline-browser-mapping: 2.10.0
@@ -1639,6 +1795,19 @@ snapshots:
dependencies:
escape-string-regexp: 1.0.5
cli-cursor@5.0.0:
dependencies:
restore-cursor: 5.1.0
cli-truncate@5.1.1:
dependencies:
slice-ansi: 7.1.2
string-width: 8.2.0
colorette@2.0.20: {}
commander@14.0.3: {}
comment-parser@1.4.1: {}
comment-parser@1.4.5: {}
@@ -1681,6 +1850,8 @@ snapshots:
electron-to-chromium@1.5.302: {}
emoji-regex@10.6.0: {}
empathic@2.0.0: {}
enhanced-resolve@5.19.0:
@@ -1690,6 +1861,8 @@ snapshots:
entities@7.0.1: {}
environment@1.1.0: {}
escalade@3.2.0: {}
escape-string-regexp@1.0.5: {}
@@ -1983,6 +2156,8 @@ snapshots:
esutils@2.0.3: {}
eventemitter3@5.0.4: {}
exsolve@1.0.8: {}
fast-deep-equal@3.1.3: {}
@@ -2003,6 +2178,10 @@ snapshots:
dependencies:
flat-cache: 4.0.1
fill-range@7.1.1:
dependencies:
to-regex-range: 5.0.1
find-up-simple@1.0.1: {}
find-up@5.0.0:
@@ -2019,6 +2198,8 @@ snapshots:
format@0.2.2: {}
get-east-asian-width@1.5.0: {}
get-tsconfig@4.13.6:
dependencies:
resolve-pkg-maps: 1.0.0
@@ -2057,10 +2238,16 @@ snapshots:
is-extglob@2.1.1: {}
is-fullwidth-code-point@5.1.0:
dependencies:
get-east-asian-width: 1.5.0
is-glob@4.0.3:
dependencies:
is-extglob: 2.1.1
is-number@7.0.0: {}
isexe@2.0.0: {}
jsdoc-type-pratt-parser@7.0.0: {}
@@ -2091,6 +2278,25 @@ snapshots:
prelude-ls: 1.2.1
type-check: 0.4.0
lint-staged@16.2.7:
dependencies:
commander: 14.0.3
listr2: 9.0.5
micromatch: 4.0.8
nano-spawn: 2.0.0
pidtree: 0.6.0
string-argv: 0.3.2
yaml: 2.8.2
listr2@9.0.5:
dependencies:
cli-truncate: 5.1.1
colorette: 2.0.20
eventemitter3: 5.0.4
log-update: 6.1.0
rfdc: 1.4.1
wrap-ansi: 9.0.2
local-pkg@1.1.2:
dependencies:
mlly: 1.8.0
@@ -2101,6 +2307,14 @@ snapshots:
dependencies:
p-locate: 5.0.0
log-update@6.1.0:
dependencies:
ansi-escapes: 7.3.0
cli-cursor: 5.0.0
slice-ansi: 7.1.2
strip-ansi: 7.1.2
wrap-ansi: 9.0.2
longest-streak@3.1.0: {}
magic-string@0.30.21:
@@ -2420,6 +2634,13 @@ snapshots:
transitivePeerDependencies:
- supports-color
micromatch@4.0.8:
dependencies:
braces: 3.0.3
picomatch: 2.3.1
mimic-function@5.0.1: {}
minimatch@10.2.2:
dependencies:
brace-expansion: 5.0.3
@@ -2437,6 +2658,8 @@ snapshots:
ms@2.1.3: {}
nano-spawn@2.0.0: {}
nanoid@3.3.11: {}
natural-compare@1.4.0: {}
@@ -2451,6 +2674,10 @@ snapshots:
object-deep-merge@2.0.0: {}
onetime@7.0.0:
dependencies:
mimic-function: 5.0.1
optionator@0.9.4:
dependencies:
deep-is: 0.1.4
@@ -2486,8 +2713,12 @@ snapshots:
picocolors@1.1.1: {}
picomatch@2.3.1: {}
picomatch@4.0.3: {}
pidtree@0.6.0: {}
pkg-types@1.3.1:
dependencies:
confbox: 0.1.8
@@ -2542,6 +2773,13 @@ snapshots:
resolve-pkg-maps@1.0.0: {}
restore-cursor@5.1.0:
dependencies:
onetime: 7.0.0
signal-exit: 4.1.0
rfdc@1.4.1: {}
scslre@0.3.0:
dependencies:
'@eslint-community/regexpp': 4.12.2
@@ -2556,8 +2794,17 @@ snapshots:
shebang-regex@3.0.0: {}
signal-exit@4.1.0: {}
simple-git-hooks@2.13.1: {}
sisteransi@1.0.5: {}
slice-ansi@7.1.2:
dependencies:
ansi-styles: 6.2.3
is-fullwidth-code-point: 5.1.0
source-map-js@1.2.1: {}
spdx-exceptions@2.5.0: {}
@@ -2569,6 +2816,23 @@ snapshots:
spdx-license-ids@3.0.23: {}
string-argv@0.3.2: {}
string-width@7.2.0:
dependencies:
emoji-regex: 10.6.0
get-east-asian-width: 1.5.0
strip-ansi: 7.1.2
string-width@8.2.0:
dependencies:
get-east-asian-width: 1.5.0
strip-ansi: 7.1.2
strip-ansi@7.1.2:
dependencies:
ansi-regex: 6.2.2
strip-indent@4.1.1: {}
synckit@0.11.12:
@@ -2584,6 +2848,10 @@ snapshots:
fdir: 6.5.0(picomatch@4.0.3)
picomatch: 4.0.3
to-regex-range@5.0.1:
dependencies:
is-number: 7.0.0
to-valid-identifier@1.0.0:
dependencies:
'@sindresorhus/base62': 1.0.0
@@ -2659,6 +2927,12 @@ snapshots:
word-wrap@1.2.5: {}
wrap-ansi@9.0.2:
dependencies:
ansi-styles: 6.2.3
string-width: 7.2.0
strip-ansi: 7.1.2
xml-name-validator@4.0.0: {}
yaml-eslint-parser@2.0.0:

82
src/_template/crd.mk Normal file
View File

@@ -0,0 +1,82 @@
# CRD (Custom Resource Definition) Installation Template
# This file provides common targets for installing CRDs before deploying Helm charts.
#
# Usage:
# include ../_template/crd.mk
#
# Required variables:
# HELM_RELEASE_NAME - The name of the Helm release
# HELM_NAMESPACE - The namespace for the deployment
# HELM_CHART_VERSION - The version of the Helm chart
#
# Optional variables:
# CRD_HELM_CHART_REPO - The Helm chart repository for CRDs (if using Helm to install CRDs)
# CRD_HELM_RELEASE_NAME - The release name for CRD installation (defaults to $(HELM_RELEASE_NAME)-crds)
# CRD_HELM_NAMESPACE - The namespace for CRD installation (defaults to $(HELM_NAMESPACE))
# CRD_KUBECTL_URLS - Space-separated list of URLs to apply via kubectl
# CRD_INSTALL_GATEWAY_API - Set to "true" to install Gateway API CRDs
# GATEWAY_API_VERSION - Version of Gateway API to install (defaults to v1.4.0)
CRD_HELM_RELEASE_NAME ?= $(HELM_RELEASE_NAME)-crds
CRD_HELM_NAMESPACE ?= $(HELM_NAMESPACE)
GATEWAY_API_VERSION ?= v1.4.0
# Install CRDs via kubectl apply
.PHONY: install-crds-kubectl
install-crds-kubectl:
ifdef CRD_KUBECTL_URLS
@echo "Installing CRDs from URLs..."
@for url in $(CRD_KUBECTL_URLS); do \
echo "Applying $$url..."; \
kubectl apply -f $$url; \
done
else
@echo "CRD_KUBECTL_URLS not set, skipping kubectl CRD installation."
endif
# Install Gateway API CRDs
.PHONY: install-crds-gateway-api
install-crds-gateway-api:
ifeq ($(CRD_INSTALL_GATEWAY_API),true)
@echo "Installing Gateway API CRDs (version: $(GATEWAY_API_VERSION))..."
kubectl apply -f https://github.com/kubernetes-sigs/gateway-api/releases/download/$(GATEWAY_API_VERSION)/standard-install.yaml
else
@echo "CRD_INSTALL_GATEWAY_API not set to 'true', skipping Gateway API CRD installation."
endif
# Install CRDs via Helm
.PHONY: install-crds-helm
install-crds-helm:
ifdef CRD_HELM_CHART_REPO
@echo "Installing CRDs via Helm..."
helm upgrade -i --create-namespace \
--namespace $(CRD_HELM_NAMESPACE) \
$(if $(HELM_CHART_VERSION),--version $(HELM_CHART_VERSION),) \
$(CRD_HELM_RELEASE_NAME) $(CRD_HELM_CHART_REPO)
else
@echo "CRD_HELM_CHART_REPO not set, skipping Helm CRD installation."
endif
# Install all CRDs (kubectl + Gateway API + Helm)
.PHONY: install-crds
install-crds: install-crds-kubectl install-crds-gateway-api install-crds-helm
@echo "All CRDs installed successfully."
# Install CRDs and then the main chart
.PHONY: install-all
install-all: install-crds install
# Verify CRD installation
.PHONY: verify-crds
verify-crds:
ifdef CRD_KUBECTL_URLS
@echo "Verifying CRDs..."
@for url in $(CRD_KUBECTL_URLS); do \
crd_name=$$(basename $$url | sed 's/\.yaml$$//'); \
echo "Checking CRD: $$crd_name..."; \
done
endif
ifeq ($(CRD_INSTALL_GATEWAY_API),true)
@echo "Verifying Gateway API CRDs..."
kubectl get crd | grep gateway.networking.k8s.io || echo "Gateway API CRDs not found"
endif

88
src/_template/gateway.mk Normal file
View File

@@ -0,0 +1,88 @@
# Gateway Service Installation Template
# This file provides common targets for deploying Gateway API based services.
# It extends crd.mk with Gateway-specific verification and utilities.
#
# Usage:
# include ../_template/crd.mk
# include ../_template/gateway.mk
#
# Required variables (inherited from crd.mk):
# HELM_RELEASE_NAME - The name of the Helm release
# HELM_NAMESPACE - The namespace for the deployment
# HELM_CHART_VERSION - The version of the Helm chart
#
# Additional required variables:
# GATEWAY_CLASS_NAME - The name of the GatewayClass (e.g., "kgateway", "agentgateway")
# CRD_HELM_CHART_REPO - The Helm chart repository for CRDs
#
# Optional variables:
# GATEWAY_API_VERSION - Version of Gateway API (defaults to v1.4.0)
# ENABLE_GATEWAY_VERIFY - Set to "true" to enable gateway verification (defaults to true)
GATEWAY_API_VERSION ?= v1.4.0
ENABLE_GATEWAY_VERIFY ?= true
# Verify GatewayClass installation
.PHONY: verify-gatewayclass
verify-gatewayclass:
ifeq ($(ENABLE_GATEWAY_VERIFY),true)
@echo "Verifying GatewayClass: $(GATEWAY_CLASS_NAME)..."
kubectl get gatewayclass $(GATEWAY_CLASS_NAME) || echo "GatewayClass $(GATEWAY_CLASS_NAME) not found"
else
@echo "Gateway verification disabled."
endif
# Verify Gateway installation
.PHONY: verify-gateway
verify-gateway:
ifeq ($(ENABLE_GATEWAY_VERIFY),true)
@echo "Verifying Gateways in namespace: $(HELM_NAMESPACE)..."
kubectl get gateway -n $(HELM_NAMESPACE) 2>/dev/null || echo "No Gateways found in $(HELM_NAMESPACE)"
else
@echo "Gateway verification disabled."
endif
# Verify HTTPRoutes
.PHONY: verify-httproutes
verify-httproutes:
ifeq ($(ENABLE_GATEWAY_VERIFY),true)
@echo "Verifying HTTPRoutes in namespace: $(HELM_NAMESPACE)..."
kubectl get httproute -n $(HELM_NAMESPACE) 2>/dev/null || echo "No HTTPRoutes found in $(HELM_NAMESPACE)"
else
@echo "Gateway verification disabled."
endif
# Full verification including Gateway API resources
.PHONY: verify-gateway-all
verify-gateway-all: verify-crds verify-gatewayclass verify-gateway verify-httproutes
@echo "Gateway verification complete."
# Override the verify target from crd.mk to include gateway verification
.PHONY: verify
verify: verify-crds verify-gatewayclass
ifeq ($(ENABLE_GATEWAY_VERIFY),true)
@echo "Verifying $(HELM_APPLICATION_NAME) installation..."
kubectl get pods -n $(HELM_NAMESPACE)
kubectl get gatewayclass $(GATEWAY_CLASS_NAME) 2>/dev/null || echo "GatewayClass $(GATEWAY_CLASS_NAME) not ready yet"
endif
# Port forward to the gateway service
.PHONY: port-forward-gateway
port-forward-gateway:
@echo "Port forwarding to gateway service..."
@POD_NAME=$$(kubectl get pods -n $(HELM_NAMESPACE) -l app=$(HELM_APPLICATION_NAME) -o jsonpath='{.items[0].metadata.name}' 2>/dev/null); \
if [ -n "$$POD_NAME" ]; then \
echo "Forwarding to pod: $$POD_NAME"; \
kubectl port-forward -n $(HELM_NAMESPACE) $$POD_NAME 8080:8080; \
else \
echo "No gateway pod found with label app=$(HELM_APPLICATION_NAME)"; \
fi
# Get gateway status
.PHONY: gateway-status
gateway-status:
@echo "GatewayClass status:"
kubectl get gatewayclass $(GATEWAY_CLASS_NAME) -o yaml 2>/dev/null || echo "GatewayClass not found"
@echo ""
@echo "Gateways in $(HELM_NAMESPACE):"
kubectl get gateway -n $(HELM_NAMESPACE) -o yaml 2>/dev/null || echo "No Gateways found"

100
src/_template/operator.mk Normal file
View File

@@ -0,0 +1,100 @@
# Kubernetes Operator Installation Template
# This file provides common targets for deploying services using the Operator pattern.
#
# Usage:
# include ../_template/operator.mk
#
# Required variables:
# HELM_RELEASE_NAME - The name of the Helm release
# HELM_APPLICATION_NAME - The name of the application
# HELM_NAMESPACE - The namespace for the deployment
# HELM_CHART_REPO - The Helm chart repository
#
# Optional variables:
# OPERATOR_RELEASE_NAME - The release name for the operator (defaults to $(HELM_RELEASE_NAME)-operator)
# OPERATOR_NAMESPACE - The namespace for the operator (defaults to $(HELM_NAMESPACE)-system)
# OPERATOR_CHART_REPO - The Helm chart repository for the operator (if different from main chart)
# OPERATOR_CHART_VERSION - The version of the operator chart
# OPERATOR_VALUES_FILE - The values file for the operator
# CLUSTER_RELEASE_NAME - The release name for the cluster/resource
# CLUSTER_VALUES_FILE - The values file for the cluster/resource
# WAIT_FOR_CRD - Set to "true" to wait for CRDs to be ready
# CRD_WAIT_TIMEOUT - Timeout for waiting for CRDs (defaults to 60s)
OPERATOR_RELEASE_NAME ?= $(HELM_RELEASE_NAME)-operator
OPERATOR_NAMESPACE ?= $(HELM_NAMESPACE)-system
CLUSTER_RELEASE_NAME ?= $(HELM_RELEASE_NAME)-cluster
CRD_WAIT_TIMEOUT ?= 60s
# Install the operator
.PHONY: install-operator
install-operator:
ifdef OPERATOR_CHART_REPO
@echo "Installing operator: $(OPERATOR_RELEASE_NAME)..."
helm upgrade $(OPERATOR_RELEASE_NAME) $(OPERATOR_CHART_REPO) \
--install \
--namespace $(OPERATOR_NAMESPACE) \
--create-namespace \
$(if $(OPERATOR_CHART_VERSION),--version $(OPERATOR_CHART_VERSION),) \
$(if $(OPERATOR_VALUES_FILE),--values $(OPERATOR_VALUES_FILE),)
else
@echo "OPERATOR_CHART_REPO not set, skipping operator installation."
endif
# Wait for CRDs to be ready
.PHONY: wait-for-crds
wait-for-crds:
ifeq ($(WAIT_FOR_CRD),true)
@echo "Waiting for CRDs to be ready (timeout: $(CRD_WAIT_TIMEOUT))..."
@sleep 5
@echo "CRDs should be ready now."
else
@echo "WAIT_FOR_CRD not set to 'true', skipping CRD wait."
endif
# Install the cluster/resource using the operator
.PHONY: install-cluster
install-cluster:
@echo "Installing cluster: $(CLUSTER_RELEASE_NAME)..."
helm upgrade $(CLUSTER_RELEASE_NAME) $(HELM_CHART_REPO) \
--install \
--namespace $(HELM_NAMESPACE) \
--create-namespace \
$(if $(HELM_CHART_VERSION),--version $(HELM_CHART_VERSION),) \
$(if $(CLUSTER_VALUES_FILE),--values $(CLUSTER_VALUES_FILE),$(if $(HELM_VALUES_FILE),--values $(HELM_VALUES_FILE),))
# Install operator and cluster
.PHONY: install-all
install-all: install-operator wait-for-crds install-cluster
# Uninstall the cluster only
.PHONY: uninstall-cluster
uninstall-cluster:
helm uninstall $(CLUSTER_RELEASE_NAME) --namespace $(HELM_NAMESPACE)
# Uninstall the operator only
.PHONY: uninstall-operator
uninstall-operator:
helm uninstall $(OPERATOR_RELEASE_NAME) --namespace $(OPERATOR_NAMESPACE)
# Uninstall everything
.PHONY: uninstall-all
uninstall-all: uninstall-cluster uninstall-operator
# Verify operator installation
.PHONY: verify-operator
verify-operator:
@echo "Verifying operator installation..."
kubectl get pods -n $(OPERATOR_NAMESPACE)
kubectl get crd | grep $(HELM_APPLICATION_NAME) || echo "No CRDs found for $(HELM_APPLICATION_NAME)"
# Verify cluster installation
.PHONY: verify-cluster
verify-cluster:
@echo "Verifying cluster installation..."
kubectl get pods -n $(HELM_NAMESPACE)
kubectl get $(HELM_APPLICATION_NAME) -n $(HELM_NAMESPACE) 2>/dev/null || echo "No $(HELM_APPLICATION_NAME) resources found"
# Verify everything
.PHONY: verify
verify: verify-operator verify-cluster

23
src/agentgateway/Makefile Normal file
View File

@@ -0,0 +1,23 @@
HELM_RELEASE_NAME ?= agentgateway
HELM_APPLICATION_NAME ?= agentgateway
HELM_NAMESPACE ?= agentgateway-system
HELM_DIR ?= ./helm
HELM_CHART_VERSION ?= v2.2.0
HELM_VALUES_FILE ?= ./values.yaml
HELM_OCI_REGISTRY ?= cr.agentgateway.dev
HELM_OCI_NAMESPACE ?= charts
HELM_OCI_USERNAME ?=
HELM_OCI_PASSWORD ?=
HELM_CHART_REPO ?= oci://$(HELM_OCI_REGISTRY)/$(HELM_OCI_NAMESPACE)/$(HELM_APPLICATION_NAME)
# CRD configuration
CRD_INSTALL_GATEWAY_API = true
CRD_HELM_CHART_REPO = oci://$(HELM_OCI_REGISTRY)/$(HELM_OCI_NAMESPACE)/agentgateway-crds
CRD_HELM_RELEASE_NAME = agentgateway-crds
# Gateway configuration
GATEWAY_CLASS_NAME = agentgateway
include ../_template/base.mk
include ../_template/crd.mk
include ../_template/gateway.mk

View File

@@ -0,0 +1,98 @@
# agentgateway
## Introduction
agentgateway is a cloud-native API gateway designed for AI workloads. It provides a Kubernetes-native way to manage traffic, secure APIs, and observe your AI services. Built on the Kubernetes Gateway API, agentgateway enables seamless integration with AI agents and services.
## Prerequisites
Before installing agentgateway, ensure you have:
1. A Kubernetes cluster (1.25+)
2. `kubectl` installed
3. `helm` installed (3.8+ for OCI support)
## Installation
### Quick Install (includes CRDs)
To install agentgateway with all required CRDs:
```bash
make install-all
```
### Step-by-Step Install
1. Install Gateway API CRDs:
```bash
kubectl apply -f https://github.com/kubernetes-sigs/gateway-api/releases/download/v1.4.0/standard-install.yaml
```
1. Install agentgateway CRDs:
```bash
make install-crds
```
1. Install agentgateway:
```bash
make install
```
## Usage
After installation, verify the deployment:
```bash
# Check if agentgateway pods are running
kubectl get pods -n agentgateway-system
# Verify GatewayClass is created
kubectl get gatewayclass agentgateway
# View agentgateway services
kubectl get svc -n agentgateway-system
```
## Configuration
The default configuration includes:
- Gateway controller for managing Gateway API resources
- Control plane components for configuration management
- Support for AI workload routing
You can customize the installation by modifying `values.yaml` before running `make install`.
## Gateway API
agentgateway supports the Kubernetes Gateway API standard. You can create Gateway and HTTPRoute resources to configure routing:
```yaml
apiVersion: gateway.networking.k8s.io/v1
kind: Gateway
metadata:
name: my-gateway
namespace: agentgateway-system
spec:
gatewayClassName: agentgateway
listeners:
- name: http
protocol: HTTP
port: 80
```
## Uninstall
To uninstall agentgateway:
```bash
make uninstall
```
## Documentation
For more information, visit the [official documentation](https://agentgateway.dev/docs/kubernetes/latest/).

View File

@@ -0,0 +1,98 @@
# agentgateway
## 简介
agentgateway 是一个专为 AI 工作负载设计的云原生 API 网关。它提供了一种 Kubernetes 原生的方式来管理流量、保护 API 和观测 AI 服务。agentgateway 基于 Kubernetes Gateway API 构建,可实现与 AI 代理和服务的无缝集成。
## 前置条件
在安装 agentgateway 之前,请确保您已具备:
1. Kubernetes 集群 (1.25+)
2. 已安装 `kubectl`
3. 已安装 `helm` (3.8+ 以支持 OCI)
## 安装
### 快速安装(包含 CRDs
要安装 agentgateway 及其所有必需的 CRDs
```bash
make install-all
```
### 分步安装
1. 安装 Gateway API CRDs
```bash
kubectl apply -f https://github.com/kubernetes-sigs/gateway-api/releases/download/v1.4.0/standard-install.yaml
```
2. 安装 agentgateway CRDs
```bash
make install-crds
```
3. 安装 agentgateway
```bash
make install
```
## 使用
安装完成后,验证部署状态:
```bash
# 检查 agentgateway pod 是否运行
kubectl get pods -n agentgateway-system
# 验证 GatewayClass 是否已创建
kubectl get gatewayclass agentgateway
# 查看 agentgateway 服务
kubectl get svc -n agentgateway-system
```
## 配置
默认配置包括:
- 用于管理 Gateway API 资源的网关控制器
- 用于配置管理的控制平面组件
- 支持 AI 工作负载路由
您可以在运行 `make install` 之前修改 `values.yaml` 来自定义安装。
## Gateway API
agentgateway 支持 Kubernetes Gateway API 标准。您可以创建 Gateway 和 HTTPRoute 资源来配置路由:
```yaml
apiVersion: gateway.networking.k8s.io/v1
kind: Gateway
metadata:
name: my-gateway
namespace: agentgateway-system
spec:
gatewayClassName: agentgateway
listeners:
- name: http
protocol: HTTP
port: 80
```
## 卸载
卸载 agentgateway
```bash
make uninstall
```
## 文档
更多信息请访问[官方文档](https://agentgateway.dev/docs/kubernetes/latest/)。

View File

@@ -0,0 +1,72 @@
# Default values for agentgateway
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
# Controller configuration
controller:
image:
repository: ghcr.io/kgateway-dev/agentgateway
tag: v2.2.1
pullPolicy: IfNotPresent
resources:
limits:
cpu: 1000m
memory: 1Gi
requests:
cpu: 100m
memory: 256Mi
# Enable experimental Gateway API features
extraEnv:
KGW_ENABLE_GATEWAY_API_EXPERIMENTAL_FEATURES: 'false'
# Gateway proxy configuration
gatewayProxy:
image:
repository: ghcr.io/kgateway-dev/agentgateway-proxy
tag: v2.2.1
pullPolicy: IfNotPresent
resources:
limits:
cpu: 500m
memory: 512Mi
requests:
cpu: 100m
memory: 128Mi
# Service configuration
service:
type: LoadBalancer
httpPort: 80
httpsPort: 443
# RBAC configuration
rbac:
create: true
# Service account configuration
serviceAccount:
create: true
annotations: {}
# Pod security context
podSecurityContext:
runAsNonRoot: true
seccompProfile:
type: RuntimeDefault
# Security context for containers
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
capabilities:
drop:
- ALL
# Node selector
nodeSelector: {}
# Tolerations
tolerations: []
# Affinity
affinity: {}

15
src/apisix/Makefile Normal file
View File

@@ -0,0 +1,15 @@
HELM_RELEASE_NAME ?= apisix
HELM_APPLICATION_NAME ?= apisix
HELM_NAMESPACE ?= apisix
HELM_DIR ?= ./helm
HELM_CHART_VERSION ?=
HELM_VALUES_FILE ?= ./values.yaml
HELM_OCI_REGISTRY ?=
HELM_OCI_NAMESPACE ?=
HELM_OCI_USERNAME ?=
HELM_OCI_PASSWORD ?=
HELM_REPO_NAME ?= apisix
HELM_REPO_URL ?= https://apache.github.io/apisix-helm-chart
HELM_CHART_REPO ?= $(HELM_REPO_NAME)/$(HELM_APPLICATION_NAME)
include ../_template/base.mk

70
src/apisix/README.md Normal file
View File

@@ -0,0 +1,70 @@
# Apache APISIX
## Introduction
Apache APISIX is a dynamic, real-time, high-performance API gateway. It provides rich traffic management features such as load balancing, dynamic upstream, canary release, circuit breaking, authentication, observability, and more.
## Installation
To install APISIX, run:
```bash
make install
```
## Usage
After installation, access APISIX:
```bash
# Port forward to access Admin API
kubectl port-forward svc/apisix-admin -n apisix 9180:9180
# Access Admin API
curl http://localhost:9180/apisix/admin/routes -H 'X-API-Key: edd1c9f034335f136f87ad84b625c8f1'
```
Default Admin API key: `edd1c9f034335f136f87ad84b625c8f1`
## Configuration
The default configuration includes:
- APISIX server with Admin API enabled
- etcd as configuration storage
- Dashboard (if enabled)
- Ingress Controller (if enabled)
## Components
- **APISIX**: Core API gateway server
- **etcd**: Configuration storage backend
- **Dashboard**: Web UI for management (optional)
- **Ingress Controller**: Kubernetes ingress support (optional)
## Creating Routes
Example route configuration:
```bash
curl http://localhost:9180/apisix/admin/routes/1 \
-H 'X-API-Key: edd1c9f034335f136f87ad84b625c8f1' \
-X PUT -d '
{
"uri": "/hello",
"upstream": {
"type": "roundrobin",
"nodes": {
"127.0.0.1:1980": 1
}
}
}'
```
## Uninstall
To uninstall APISIX:
```bash
make uninstall
```

70
src/apisix/README.zh.md Normal file
View File

@@ -0,0 +1,70 @@
# Apache APISIX
## 简介
Apache APISIX 是一个动态、实时、高性能的 API 网关。它提供丰富的流量管理功能,如负载均衡、动态上游、灰度发布、熔断、认证、可观测性等。
## 安装
安装 APISIX
```bash
make install
```
## 使用
安装完成后,访问 APISIX
```bash
# 端口转发以访问 Admin API
kubectl port-forward svc/apisix-admin -n apisix 9180:9180
# 访问 Admin API
curl http://localhost:9180/apisix/admin/routes -H 'X-API-Key: edd1c9f034335f136f87ad84b625c8f1'
```
默认 Admin API 密钥:`edd1c9f034335f136f87ad84b625c8f1`
## 配置
默认配置包括:
- 启用 Admin API 的 APISIX 服务器
- etcd 作为配置存储
- Dashboard如果启用
- Ingress Controller如果启用
## 组件
- **APISIX**: 核心 API 网关服务器
- **etcd**: 配置存储后端
- **Dashboard**: 管理 Web UI可选
- **Ingress Controller**: Kubernetes 入口支持(可选)
## 创建路由
路由配置示例:
```bash
curl http://localhost:9180/apisix/admin/routes/1 \
-H 'X-API-Key: edd1c9f034335f136f87ad84b625c8f1' \
-X PUT -d '
{
"uri": "/hello",
"upstream": {
"type": "roundrobin",
"nodes": {
"127.0.0.1:1980": 1
}
}
}'
```
## 卸载
卸载 APISIX
```bash
make uninstall
```

53
src/apisix/values.yaml Normal file
View File

@@ -0,0 +1,53 @@
# Apache APISIX Configuration
# https://github.com/apache/apisix-helm-chart
# APISIX server configuration
apisix:
enabled: true
image:
repository: apache/apisix
tag: 3.9.0
replicaCount: 1
resources:
requests:
cpu: 500m
memory: 512Mi
limits:
cpu: 1000m
memory: 1Gi
# Admin API configuration
admin:
enabled: true
port: 9180
credentials:
admin: edd1c9f034335f136f87ad84b625c8f1
viewer: 4054f7cf07e344346cd3f287985e76a2
# Proxy configuration
proxy:
http:
enabled: true
port: 9080
tls:
enabled: false
# etcd configuration
etcd:
enabled: true
replicaCount: 1
resources:
requests:
cpu: 200m
memory: 256Mi
limits:
cpu: 500m
memory: 512Mi
# Dashboard configuration (disabled by default)
dashboard:
enabled: false
# Ingress Controller configuration (disabled by default)
ingress-controller:
enabled: false

30
src/cassandra/Makefile Normal file
View File

@@ -0,0 +1,30 @@
HELM_RELEASE_NAME ?= cassandra
HELM_APPLICATION_NAME ?= cassandra
HELM_NAMESPACE ?= cassandra
HELM_DIR ?= ./helm
HELM_CHART_VERSION ?=
HELM_VALUES_FILE ?= ./values.yaml
HELM_OCI_REGISTRY ?=
HELM_OCI_NAMESPACE ?=
HELM_OCI_USERNAME ?=
HELM_OCI_PASSWORD ?=
HELM_REPO_NAME ?= k8ssandra
HELM_REPO_URL ?= https://helm.k8ssandra.io/stable
# Operator configuration
OPERATOR_RELEASE_NAME ?= k8ssandra-operator
OPERATOR_NAMESPACE ?= k8ssandra-operator
OPERATOR_CHART_REPO ?= $(HELM_REPO_NAME)/k8ssandra-operator
OPERATOR_CHART_VERSION ?=
OPERATOR_VALUES_FILE ?= ./values.yaml
# Cluster configuration
CLUSTER_RELEASE_NAME ?= cassandra-cluster
CLUSTER_CHART_REPO ?= $(HELM_REPO_NAME)/k8ssandra
CLUSTER_VALUES_FILE ?= ./cluster-values.yaml
# Enable CRD waiting
WAIT_FOR_CRD ?= true
include ../_template/base.mk
include ../_template/operator.mk

82
src/cassandra/README.md Normal file
View File

@@ -0,0 +1,82 @@
# Apache Cassandra (K8ssandra)
## Introduction
Apache Cassandra is an open-source distributed NoSQL database management system designed to handle large amounts of data across many commodity servers. This deployment uses K8ssandra Operator, which provides a Kubernetes-native way to manage Cassandra clusters.
K8ssandra is a cloud-native distribution of Apache Cassandra that runs on Kubernetes. It includes automation for operational tasks such as repairs, backups, and monitoring.
## Installation
To install Cassandra, run:
```bash
make install
```
## Usage
After installation, you can create a Cassandra cluster:
```bash
# Check if operator is running
kubectl get pods -n cassandra
# Create a Cassandra cluster
kubectl apply -f - <<EOF
apiVersion: k8ssandra.io/v1alpha1
kind: K8ssandraCluster
metadata:
name: demo
namespace: cassandra
spec:
cassandra:
serverVersion: "4.0.1"
datacenters:
- metadata:
name: dc1
size: 3
storageConfig:
cassandraDataVolumeClaimSpec:
storageClassName: standard
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 10Gi
config:
jvmOptions:
heapSize: 1Gi
EOF
```
## Configuration
The default configuration includes:
- K8ssandra Operator for managing Cassandra clusters
- Support for Cassandra 4.x
- Medusa for backup management
- Reaper for repair scheduling
- Metrics collection via Prometheus
## Features
- **Automated Repairs**: Reaper handles repair scheduling
- **Backup/Restore**: Medusa provides backup and restore capabilities
- **Monitoring**: Integrated Prometheus metrics
- **Multi-DC Support**: Deploy across multiple data centers
## Connecting to Cassandra
```bash
# Get CQLSH access
kubectl exec -it demo-dc1-default-sts-0 -n cassandra -c cassandra -- cqlsh
```
## Uninstall
To uninstall:
```bash
make uninstall
```

View File

@@ -0,0 +1,82 @@
# Apache Cassandra (K8ssandra)
## 简介
Apache Cassandra 是一个开源的分布式 NoSQL 数据库管理系统,设计用于在大量商用服务器上处理大量数据。此部署使用 K8ssandra Operator它提供了一种 Kubernetes 原生的方式来管理 Cassandra 集群。
K8ssandra 是 Apache Cassandra 的云原生发行版,可在 Kubernetes 上运行。它包括修复、备份和监控等运维任务的自动化。
## 安装
安装 Cassandra
```bash
make install
```
## 使用
安装完成后,您可以创建 Cassandra 集群:
```bash
# 检查 operator 是否运行
kubectl get pods -n cassandra
# 创建 Cassandra 集群
kubectl apply -f - <<EOF
apiVersion: k8ssandra.io/v1alpha1
kind: K8ssandraCluster
metadata:
name: demo
namespace: cassandra
spec:
cassandra:
serverVersion: "4.0.1"
datacenters:
- metadata:
name: dc1
size: 3
storageConfig:
cassandraDataVolumeClaimSpec:
storageClassName: standard
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 10Gi
config:
jvmOptions:
heapSize: 1Gi
EOF
```
## 配置
默认配置包括:
- 用于管理 Cassandra 集群的 K8ssandra Operator
- 支持 Cassandra 4.x
- Medusa 用于备份管理
- Reaper 用于修复调度
- 通过 Prometheus 收集指标
## 功能
- **自动修复**: Reaper 处理修复调度
- **备份/恢复**: Medusa 提供备份和恢复功能
- **监控**: 集成的 Prometheus 指标
- **多 DC 支持**: 跨多个数据中心部署
## 连接 Cassandra
```bash
# 获取 CQLSH 访问
kubectl exec -it demo-dc1-default-sts-0 -n cassandra -c cassandra -- cqlsh
```
## 卸载
卸载:
```bash
make uninstall
```

View File

@@ -0,0 +1,41 @@
# K8ssandra Cluster Configuration
# https://github.com/k8ssandra/k8ssandra-operator
# Cluster name
cassandra:
clusterName: cassandra-cluster
datacenters:
- name: dc1
size: 3
racks:
- name: rack1
- name: rack2
- name: rack3
storage:
storageClassName: standard
size: 10Gi
resources:
requests:
cpu: 1000m
memory: 4Gi
limits:
cpu: 2000m
memory: 4Gi
# Stargate configuration
stargate:
enabled: false
size: 1
heapSize: 256Mi
# Reaper configuration
reaper:
enabled: false
# Medusa backup configuration
medusa:
enabled: false
# Prometheus monitoring
monitoring:
enabled: false

28
src/cassandra/values.yaml Normal file
View File

@@ -0,0 +1,28 @@
# K8ssandra Operator Configuration
# https://github.com/k8ssandra/k8ssandra-operator
# Operator configuration
replicaCount: 1
resources:
requests:
cpu: 100m
memory: 256Mi
limits:
cpu: 500m
memory: 512Mi
# Medusa backup configuration
medusa:
enabled: false
# Prometheus monitoring
monitoring:
enabled: false
# Cluster-wide configuration
clusterScoped: false
# Webhook configuration
webhook:
enabled: false

15
src/dify/Makefile Normal file
View File

@@ -0,0 +1,15 @@
HELM_RELEASE_NAME ?= dify
HELM_APPLICATION_NAME ?= dify
HELM_NAMESPACE ?= dify
HELM_DIR ?= ./helm
HELM_CHART_VERSION ?=
HELM_VALUES_FILE ?= ./values.yaml
HELM_OCI_REGISTRY ?=
HELM_OCI_NAMESPACE ?=
HELM_OCI_USERNAME ?=
HELM_OCI_PASSWORD ?=
HELM_REPO_NAME ?= dify
HELM_REPO_URL ?= https://langgenius.github.io/dify-helm
HELM_CHART_REPO ?= $(HELM_REPO_NAME)/$(HELM_APPLICATION_NAME)
include ../_template/base.mk

103
src/dify/README.md Normal file
View File

@@ -0,0 +1,103 @@
# Dify
## Introduction
Dify is an open-source LLM (Large Language Model) application development platform. It helps developers build generative AI applications more easily by combining AI workflow, RAG pipeline, agent capabilities, model management, observability features, and more.
This Helm chart deploys Dify on Kubernetes, including:
- API Server
- Web Frontend
- Worker for background tasks
- Weaviate for vector database
- Redis for caching
- PostgreSQL for data persistence
## Installation
To install Dify, run:
```bash
make install
```
## Prerequisites
Dify requires the following components:
- PostgreSQL 14+ (or use built-in)
- Redis 6+ (or use built-in)
- Weaviate vector database (or use built-in)
## Usage
After installation:
```bash
# Check if Dify pods are running
kubectl get pods -n dify
# Port forward to access Dify
kubectl port-forward svc/dify-web -n dify 3000:3000
kubectl port-forward svc/dify-api -n dify 5001:5001
```
Then open <http://localhost:3000> in your browser.
## Configuration
The default configuration includes:
- Dify API server with default settings
- Web frontend
- Background worker
- Built-in Weaviate vector database
- Built-in Redis cache
- Built-in PostgreSQL database
## Environment Variables
Configure Dify using environment variables in values.yaml:
```yaml
api:
env:
- name: APP_API_URL
value: 'http://dify-api:5001'
- name: SECRET_KEY
value: your-secret-key
- name: CONSOLE_API_URL
value: 'http://dify-api:5001'
- name: CONSOLE_WEB_URL
value: 'http://dify-web:3000'
```
## External Dependencies
To use external PostgreSQL/Redis:
```yaml
postgresql:
enabled: false
externalPostgresql:
host: your-postgres-host
port: 5432
database: dify
username: dify
password: your-password
redis:
enabled: false
externalRedis:
host: your-redis-host
port: 6379
password: your-password
```
## Uninstall
To uninstall Dify:
```bash
make uninstall
```

103
src/dify/README.zh.md Normal file
View File

@@ -0,0 +1,103 @@
# Dify
## 简介
Dify 是一个开源的 LLM大语言模型应用开发平台。它通过结合 AI 工作流、RAG 管道、Agent 能力、模型管理、可观测性功能等,帮助开发者更轻松地构建生成式 AI 应用。
此 Helm Chart 在 Kubernetes 上部署 Dify包括
- API 服务器
- Web 前端
- 后台任务 Worker
- Weaviate 向量数据库
- Redis 缓存
- PostgreSQL 数据持久化
## 安装
安装 Dify
```bash
make install
```
## 先决条件
Dify 需要以下组件:
- PostgreSQL 14+(或使用内置的)
- Redis 6+(或使用内置的)
- Weaviate 向量数据库(或使用内置的)
## 使用
安装完成后:
```bash
# 检查 Dify pod 是否运行
kubectl get pods -n dify
# 端口转发以访问 Dify
kubectl port-forward svc/dify-web -n dify 3000:3000
kubectl port-forward svc/dify-api -n dify 5001:5001
```
然后在浏览器中打开 <http://localhost:3000>。
## 配置
默认配置包括:
- 默认设置的 Dify API 服务器
- Web 前端
- 后台 worker
- 内置 Weaviate 向量数据库
- 内置 Redis 缓存
- 内置 PostgreSQL 数据库
## 环境变量
在 values.yaml 中使用环境变量配置 Dify
```yaml
api:
env:
- name: APP_API_URL
value: 'http://dify-api:5001'
- name: SECRET_KEY
value: your-secret-key
- name: CONSOLE_API_URL
value: 'http://dify-api:5001'
- name: CONSOLE_WEB_URL
value: 'http://dify-web:3000'
```
## 外部依赖
使用外部 PostgreSQL/Redis
```yaml
postgresql:
enabled: false
externalPostgresql:
host: your-postgres-host
port: 5432
database: dify
username: dify
password: your-password
redis:
enabled: false
externalRedis:
host: your-redis-host
port: 6379
password: your-password
```
## 卸载
卸载 Dify
```bash
make uninstall
```

119
src/dify/values.yaml Normal file
View File

@@ -0,0 +1,119 @@
# Dify Configuration
# https://github.com/langgenius/dify-helm
# API Server configuration
api:
replicaCount: 1
image:
repository: langgenius/dify-api
tag: 0.6.9
service:
type: ClusterIP
port: 5001
resources:
requests:
cpu: 200m
memory: 512Mi
limits:
cpu: 500m
memory: 1Gi
env:
- name: MODE
value: api
- name: SECRET_KEY
value: sk-9f73s3ljTXVcMT3Blb3ljTqtsKiBacb72
- name: LOG_LEVEL
value: INFO
# Web Frontend configuration
web:
replicaCount: 1
image:
repository: langgenius/dify-web
tag: 0.6.9
service:
type: ClusterIP
port: 3000
resources:
requests:
cpu: 100m
memory: 256Mi
limits:
cpu: 200m
memory: 512Mi
env:
- name: CONSOLE_API_URL
value: http://dify-api:5001
- name: APP_API_URL
value: http://dify-api:5001
# Worker configuration
worker:
replicaCount: 1
image:
repository: langgenius/dify-api
tag: 0.6.9
resources:
requests:
cpu: 100m
memory: 256Mi
limits:
cpu: 200m
memory: 512Mi
env:
- name: MODE
value: worker
# PostgreSQL configuration (built-in)
postgresql:
enabled: true
auth:
username: dify
password: dify
database: dify
primary:
persistence:
enabled: true
size: 10Gi
resources:
requests:
cpu: 200m
memory: 256Mi
limits:
cpu: 500m
memory: 512Mi
# Redis configuration (built-in)
redis:
enabled: true
auth:
enabled: false
master:
persistence:
enabled: true
size: 5Gi
resources:
requests:
cpu: 100m
memory: 128Mi
limits:
cpu: 200m
memory: 256Mi
# Weaviate vector database (built-in)
weaviate:
enabled: true
persistence:
enabled: true
size: 10Gi
resources:
requests:
cpu: 200m
memory: 512Mi
limits:
cpu: 500m
memory: 1Gi
# Ingress configuration (disabled by default)
ingress:
enabled: false

View File

@@ -0,0 +1,15 @@
HELM_RELEASE_NAME ?= registry
HELM_APPLICATION_NAME ?= docker-registry
HELM_NAMESPACE ?= registry
HELM_DIR ?= ./helm
HELM_CHART_VERSION ?=
HELM_VALUES_FILE ?= ./values.yaml
HELM_OCI_REGISTRY ?=
HELM_OCI_NAMESPACE ?=
HELM_OCI_USERNAME ?=
HELM_OCI_PASSWORD ?=
HELM_REPO_NAME ?= twuni
HELM_REPO_URL ?= https://helm.twun.io
HELM_CHART_REPO ?= $(HELM_REPO_NAME)/$(HELM_APPLICATION_NAME)
include ../_template/base.mk

View File

@@ -0,0 +1,78 @@
# Docker Registry
## Introduction
Docker Registry is a stateless, highly scalable server-side application that stores and lets you distribute Docker images. It is open-source, licensed under the Apache-2.0 license.
This Helm chart deploys a private Docker Registry on Kubernetes, allowing you to store and manage your container images within your own infrastructure.
## Installation
To install Docker Registry, run:
```bash
make install
```
## Usage
After installation, you can push and pull images from the registry:
```bash
# Port forward to access registry locally
kubectl port-forward svc/registry -n registry 5000:5000
# Tag an image for your private registry
docker tag my-image localhost:5000/my-image
# Push to your private registry
docker push localhost:5000/my-image
# Pull from your private registry
docker pull localhost:5000/my-image
```
## Configuration
The default configuration includes:
- In-memory storage (for testing only)
- No authentication
- Service type: ClusterIP
## Storage Options
For production use, configure persistent storage:
```yaml
persistence:
enabled: true
size: 10Gi
storageClass: standard
```
## Authentication
Enable basic authentication:
```yaml
secrets:
htpasswd: |
admin:$2y$05$...
```
## TLS
Enable TLS for secure communication:
```yaml
tlsSecretName: registry-tls
```
## Uninstall
To uninstall Docker Registry:
```bash
make uninstall
```

View File

@@ -0,0 +1,78 @@
# Docker Registry
## 简介
Docker Registry 是一个无状态、高度可扩展的服务器端应用程序,用于存储和分发 Docker 镜像。它是开源的,采用 Apache-2.0 许可证。
此 Helm Chart 在 Kubernetes 上部署一个私有 Docker Registry允许您在自己的基础设施中存储和管理容器镜像。
## 安装
安装 Docker Registry
```bash
make install
```
## 使用
安装完成后,您可以向 registry 推送和拉取镜像:
```bash
# 端口转发以本地访问 registry
kubectl port-forward svc/registry -n registry 5000:5000
# 为私有 registry 标记镜像
docker tag my-image localhost:5000/my-image
# 推送到私有 registry
docker push localhost:5000/my-image
# 从私有 registry 拉取
docker pull localhost:5000/my-image
```
## 配置
默认配置包括:
- 内存存储(仅用于测试)
- 无认证
- 服务类型ClusterIP
## 存储选项
生产使用请配置持久存储:
```yaml
persistence:
enabled: true
size: 10Gi
storageClass: standard
```
## 认证
启用基本认证:
```yaml
secrets:
htpasswd: |
admin:$2y$05$...
```
## TLS
启用 TLS 进行安全通信:
```yaml
tlsSecretName: registry-tls
```
## 卸载
卸载 Docker Registry
```bash
make uninstall
```

View File

@@ -0,0 +1,43 @@
# Docker Registry Configuration
# https://github.com/twuni/docker-registry.helm
# Registry image
image:
repository: registry
tag: 2.8.3
# Service configuration
service:
type: ClusterIP
port: 5000
# Persistence (disabled by default - use S3 or other storage for production)
persistence:
enabled: false
size: 10Gi
# Storage configuration
storage: memory
# Authentication (disabled by default)
secrets:
htpasswd: ''
# TLS configuration (disabled by default)
tlsSecretName: ''
# Resource configuration
resources:
requests:
cpu: 100m
memory: 128Mi
limits:
cpu: 500m
memory: 512Mi
# Replicas
replicaCount: 1
# Ingress configuration (disabled by default)
ingress:
enabled: false

View File

@@ -1,5 +1,5 @@
HELM_RELEASE_NAME ?= duckdb
HELM_APPLICATION_NAME ?= duckdb
HELM_APPLICATION_NAME ?= jupyterhub
HELM_NAMESPACE ?= duckdb
HELM_DIR ?= ./helm
HELM_CHART_VERSION ?=
@@ -10,6 +10,6 @@ HELM_OCI_USERNAME ?=
HELM_OCI_PASSWORD ?=
HELM_REPO_NAME ?= jupyterhub
HELM_REPO_URL ?= https://hub.jupyter.org/helm-chart/
HELM_CHART_REPO ?= $(HELM_REPO_NAME)/jupyterhub
HELM_CHART_REPO ?= $(HELM_REPO_NAME)/$(HELM_APPLICATION_NAME)
include ../_template/base.mk

View File

@@ -10,6 +10,21 @@ HELM_OCI_USERNAME ?=
HELM_OCI_PASSWORD ?=
HELM_REPO_NAME ?= flink-operator
HELM_REPO_URL ?= https://downloads.apache.org/flink/flink-kubernetes-operator-1.9.0/
HELM_CHART_REPO ?= $(HELM_REPO_NAME)/flink-operator
# Operator configuration
OPERATOR_RELEASE_NAME ?= flink-operator
OPERATOR_NAMESPACE ?= flink-operator
OPERATOR_CHART_REPO ?= $(HELM_REPO_NAME)/flink-operator
OPERATOR_CHART_VERSION ?=
OPERATOR_VALUES_FILE ?= ./values.yaml
# Cluster configuration (Flink uses FlinkDeployment CR, installed via kubectl or separate chart)
CLUSTER_RELEASE_NAME ?= flink-cluster
CLUSTER_CHART_REPO ?= $(HELM_REPO_NAME)/flink-cluster
CLUSTER_VALUES_FILE ?= ./cluster-values.yaml
# Enable CRD waiting
WAIT_FOR_CRD ?= true
include ../_template/base.mk
include ../_template/operator.mk

View File

@@ -0,0 +1,33 @@
# Flink Cluster Configuration (FlinkDeployment CR)
# https://github.com/apache/flink-kubernetes-operator
# Flink cluster name
nameOverride: flink-cluster
# Flink version
flinkVersion: v1.19
# Job configuration
job:
jarURI: local:///opt/flink/examples/streaming/StateMachineExample.jar
parallelism: 2
upgradeMode: stateful
state: running
# TaskManager configuration
taskManager:
resource:
memory: 2048m
cpu: 1
replicas: 2
# JobManager configuration
jobManager:
resource:
memory: 1024m
cpu: 0.5
replicas: 1
# Service configuration
service:
type: ClusterIP

View File

@@ -1,5 +1,5 @@
HELM_RELEASE_NAME ?= gitea-runner
HELM_APPLICATION_NAME ?= gitea-runner
HELM_APPLICATION_NAME ?= actions
HELM_NAMESPACE ?= gitea-runner
HELM_DIR ?= ./helm
HELM_CHART_VERSION ?=
@@ -10,6 +10,6 @@ HELM_OCI_USERNAME ?=
HELM_OCI_PASSWORD ?=
HELM_REPO_NAME ?= gitea
HELM_REPO_URL ?= https://dl.gitea.com/charts
HELM_CHART_REPO ?= $(HELM_REPO_NAME)/actions
HELM_CHART_REPO ?= $(HELM_REPO_NAME)/$(HELM_APPLICATION_NAME)
include ../_template/base.mk

13
src/inngest/Makefile Normal file
View File

@@ -0,0 +1,13 @@
HELM_RELEASE_NAME ?= inngest
HELM_APPLICATION_NAME ?= inngest
HELM_NAMESPACE ?= inngest
HELM_DIR ?= ./helm
HELM_CHART_VERSION ?=
HELM_VALUES_FILE ?= ./values.yaml
HELM_OCI_REGISTRY ?= ghcr.io
HELM_OCI_NAMESPACE ?= inngest/helm-charts
HELM_OCI_USERNAME ?=
HELM_OCI_PASSWORD ?=
HELM_CHART_REPO ?= oci://$(HELM_OCI_REGISTRY)/$(HELM_OCI_NAMESPACE)/$(HELM_APPLICATION_NAME)
include ../_template/base.mk

83
src/inngest/README.md Normal file
View File

@@ -0,0 +1,83 @@
# Inngest
## Introduction
Inngest is an open-source, event-driven job queue and workflow engine. It allows you to build reliable background jobs, scheduled tasks, and event-driven workflows using a declarative approach.
This Helm chart deploys the Inngest server on Kubernetes, enabling you to self-host your event processing infrastructure.
## Installation
To install Inngest, run:
```bash
make install
```
## Usage
After installation:
```bash
# Check if Inngest server is running
kubectl get pods -n inngest
# Port forward to access the dashboard
kubectl port-forward svc/inngest -n inngest 8288:8288
```
Then open <http://localhost:8288> in your browser to access the Inngest dashboard.
## Configuration
The default configuration includes:
- Inngest server with event processing capabilities
- PostgreSQL for data persistence (when enabled)
- Redis for event streaming (when enabled)
## Prerequisites
Inngest requires:
- PostgreSQL database for persistence
- Redis for event streaming (optional but recommended)
Configure these in your values.yaml:
```yaml
postgres:
host: your-postgres-host
port: 5432
database: inngest
user: inngest
password: your-password
redis:
host: your-redis-host
port: 6379
```
## Event Processing
Inngest processes events from your applications:
```javascript
// Example: Sending events from your application
import { Inngest } from 'inngest'
const inngest = new Inngest({ id: 'my-app' })
await inngest.send({
name: 'user.signup',
data: { userId: '123' }
})
```
## Uninstall
To uninstall Inngest:
```bash
make uninstall
```

83
src/inngest/README.zh.md Normal file
View File

@@ -0,0 +1,83 @@
# Inngest
## 简介
Inngest 是一个开源的、事件驱动的作业队列和工作流引擎。它允许您使用声明式方法构建可靠的后台作业、计划任务和事件驱动工作流。
此 Helm Chart 在 Kubernetes 上部署 Inngest 服务器,使您能够自托管事件处理基础设施。
## 安装
安装 Inngest
```bash
make install
```
## 使用
安装完成后:
```bash
# 检查 Inngest 服务器是否运行
kubectl get pods -n inngest
# 端口转发以访问仪表板
kubectl port-forward svc/inngest -n inngest 8288:8288
```
然后在浏览器中打开 <http://localhost:8288> 访问 Inngest 仪表板。
## 配置
默认配置包括:
- 具有事件处理能力的 Inngest 服务器
- PostgreSQL 用于数据持久化(启用时)
- Redis 用于事件流(启用时)
## 先决条件
Inngest 需要:
- PostgreSQL 数据库用于持久化
- Redis 用于事件流(可选但推荐)
在您的 values.yaml 中配置这些:
```yaml
postgres:
host: your-postgres-host
port: 5432
database: inngest
user: inngest
password: your-password
redis:
host: your-redis-host
port: 6379
```
## 事件处理
Inngest 处理来自您应用程序的事件:
```javascript
// 示例:从您的应用程序发送事件
import { Inngest } from 'inngest'
const inngest = new Inngest({ id: 'my-app' })
await inngest.send({
name: 'user.signup',
data: { userId: '123' }
})
```
## 卸载
卸载 Inngest
```bash
make uninstall
```

49
src/inngest/values.yaml Normal file
View File

@@ -0,0 +1,49 @@
# Inngest Configuration
# https://github.com/inngest/inngest-helm
# Server configuration
replicaCount: 1
image:
repository: inngest/inngest
tag: latest
resources:
requests:
cpu: 200m
memory: 256Mi
limits:
cpu: 500m
memory: 512Mi
# Service configuration
service:
type: ClusterIP
port: 8288
# PostgreSQL configuration (required)
postgres:
enabled: false
host: ''
port: 5432
database: inngest
user: inngest
password: ''
# Redis configuration (optional but recommended)
redis:
enabled: false
host: ''
port: 6379
# Event API configuration
eventApi:
enabled: true
# Dashboard configuration
dashboard:
enabled: true
# Ingress configuration (disabled by default)
ingress:
enabled: false

View File

@@ -1,5 +1,5 @@
HELM_RELEASE_NAME ?= strimzi-kafka-operator
HELM_APPLICATION_NAME ?= strimzi-kafka-operator
HELM_RELEASE_NAME ?= kafka
HELM_APPLICATION_NAME ?= kafka
HELM_NAMESPACE ?= kafka
HELM_DIR ?= ./helm
HELM_CHART_VERSION ?= 0.50.0
@@ -8,6 +8,28 @@ HELM_OCI_REGISTRY ?= docker.io
HELM_OCI_NAMESPACE ?=
HELM_REPO_NAME ?= strimzi
HELM_REPO_URL ?= https://strimzi.io/charts/
HELM_CHART_REPO ?= $(HELM_REPO_NAME)/$(HELM_APPLICATION_NAME)
# Operator configuration (Strimzi only has operator, cluster is created via CRDs)
OPERATOR_RELEASE_NAME ?= strimzi-kafka-operator
OPERATOR_NAMESPACE ?= strimzi-operator
OPERATOR_CHART_REPO ?= $(HELM_REPO_NAME)/strimzi-kafka-operator
OPERATOR_CHART_VERSION ?= $(HELM_CHART_VERSION)
OPERATOR_VALUES_FILE ?= ./values.yaml
# For Strimzi, we only install the operator
# Kafka clusters are created using Kafka CRDs after operator is installed
include ../_template/base.mk
include ../_template/operator.mk
# Override install target to only install operator
.PHONY: install
install: install-operator
# Override uninstall target to only uninstall operator
.PHONY: uninstall
uninstall: uninstall-operator
# Override verify target
.PHONY: verify
verify: verify-operator

23
src/kgateway/Makefile Normal file
View File

@@ -0,0 +1,23 @@
HELM_RELEASE_NAME ?= kgateway
HELM_APPLICATION_NAME ?= kgateway
HELM_NAMESPACE ?= kgateway-system
HELM_DIR ?= ./helm
HELM_CHART_VERSION ?= v2.2.0
HELM_VALUES_FILE ?= ./values.yaml
HELM_OCI_REGISTRY ?= cr.kgateway.dev
HELM_OCI_NAMESPACE ?= kgateway-dev/charts
HELM_OCI_USERNAME ?=
HELM_OCI_PASSWORD ?=
HELM_CHART_REPO ?= oci://$(HELM_OCI_REGISTRY)/$(HELM_OCI_NAMESPACE)/$(HELM_APPLICATION_NAME)
# CRD configuration
CRD_INSTALL_GATEWAY_API = true
CRD_HELM_CHART_REPO = oci://$(HELM_OCI_REGISTRY)/$(HELM_OCI_NAMESPACE)/kgateway-crds
CRD_HELM_RELEASE_NAME = kgateway-crds
# Gateway configuration
GATEWAY_CLASS_NAME = kgateway
include ../_template/base.mk
include ../_template/crd.mk
include ../_template/gateway.mk

92
src/kgateway/README.md Normal file
View File

@@ -0,0 +1,92 @@
# kgateway
## Introduction
kgateway is a cloud-native API gateway built on Envoy Proxy. It provides a Kubernetes-native way to manage traffic, secure APIs, and observe your services. Formerly known as Gloo Gateway, kgateway offers advanced routing capabilities, traffic management, and extensibility through WebAssembly (Wasm) filters.
## Prerequisites
Before installing kgateway, ensure you have:
1. A Kubernetes cluster (1.25+)
2. `kubectl` installed
3. `helm` installed (3.8+ for OCI support)
## Installation
### Quick Install (includes CRDs)
To install kgateway with all required CRDs:
```bash
make install-all
```
### Step-by-Step Install
1. Install Kubernetes Gateway API CRDs:
```bash
kubectl apply -f https://github.com/kubernetes-sigs/gateway-api/releases/download/v1.4.0/standard-install.yaml
```
2. Install kgateway CRDs:
```bash
make install-crds
```
3. Install kgateway:
```bash
make install
```
## Usage
After installation, verify the deployment:
```bash
# Check if kgateway pods are running
kubectl get pods -n kgateway-system
# Verify GatewayClass is created
kubectl get gatewayclass kgateway
# View kgateway services
kubectl get svc -n kgateway-system
```
## Configuration
The default configuration includes:
- Gateway controller for managing Gateway API resources
- Envoy proxy deployment for traffic handling
- Control plane components for configuration management
## Gateway API
kgateway supports the Kubernetes Gateway API standard. You can create Gateway and HTTPRoute resources to configure routing:
```yaml
apiVersion: gateway.networking.k8s.io/v1
kind: Gateway
metadata:
name: my-gateway
namespace: kgateway-system
spec:
gatewayClassName: kgateway
listeners:
- name: http
protocol: HTTP
port: 80
```
## Uninstall
To uninstall kgateway:
```bash
make uninstall
```

92
src/kgateway/README.zh.md Normal file
View File

@@ -0,0 +1,92 @@
# kgateway
## 简介
kgateway 是一个基于 Envoy Proxy 的云原生 API 网关。它提供了一种 Kubernetes 原生的方式来管理流量、保护 API 和观测服务。kgateway 前身为 Gloo Gateway提供高级路由功能、流量管理和通过 WebAssembly (Wasm) 过滤器的可扩展性。
## 前置条件
在安装 kgateway 之前,请确保您已具备:
1. Kubernetes 集群 (1.25+)
2. 已安装 `kubectl`
3. 已安装 `helm` (3.8+ 以支持 OCI)
## 安装
### 快速安装(包含 CRDs
要安装 kgateway 及其所有必需的 CRDs
```bash
make install-all
```
### 分步安装
1. 安装 Kubernetes Gateway API CRDs
```bash
kubectl apply -f https://github.com/kubernetes-sigs/gateway-api/releases/download/v1.4.0/standard-install.yaml
```
2. 安装 kgateway CRDs
```bash
make install-crds
```
3. 安装 kgateway
```bash
make install
```
## 使用
安装完成后,验证部署状态:
```bash
# 检查 kgateway pod 是否运行
kubectl get pods -n kgateway-system
# 验证 GatewayClass 是否已创建
kubectl get gatewayclass kgateway
# 查看 kgateway 服务
kubectl get svc -n kgateway-system
```
## 配置
默认配置包括:
- 用于管理 Gateway API 资源的网关控制器
- 用于流量处理的 Envoy 代理部署
- 用于配置管理的控制平面组件
## Gateway API
kgateway 支持 Kubernetes Gateway API 标准。您可以创建 Gateway 和 HTTPRoute 资源来配置路由:
```yaml
apiVersion: gateway.networking.k8s.io/v1
kind: Gateway
metadata:
name: my-gateway
namespace: kgateway-system
spec:
gatewayClassName: kgateway
listeners:
- name: http
protocol: HTTP
port: 80
```
## 卸载
卸载 kgateway
```bash
make uninstall
```

45
src/kgateway/values.yaml Normal file
View File

@@ -0,0 +1,45 @@
# kgateway Configuration
# https://github.com/kgateway-dev/kgateway
# Controller configuration
controller:
replicas: 1
resources:
requests:
cpu: 500m
memory: 512Mi
limits:
cpu: 1000m
memory: 1Gi
# Envoy gateway configuration
gateway:
replicas: 1
resources:
requests:
cpu: 500m
memory: 512Mi
limits:
cpu: 1000m
memory: 1Gi
service:
type: LoadBalancer
# Discovery configuration
discovery:
enabled: true
resources:
requests:
cpu: 100m
memory: 256Mi
limits:
cpu: 500m
memory: 512Mi
# Observability configuration
observability:
enabled: true
# GatewayClass configuration
gatewayClass:
name: kgateway

15
src/logstash/Makefile Normal file
View File

@@ -0,0 +1,15 @@
HELM_RELEASE_NAME ?= logstash
HELM_APPLICATION_NAME ?= logstash
HELM_NAMESPACE ?= logstash
HELM_DIR ?= ./helm
HELM_CHART_VERSION ?=
HELM_VALUES_FILE ?= ./values.yaml
HELM_OCI_REGISTRY ?=
HELM_OCI_NAMESPACE ?=
HELM_OCI_USERNAME ?=
HELM_OCI_PASSWORD ?=
HELM_REPO_NAME ?= elastic
HELM_REPO_URL ?= https://helm.elastic.co
HELM_CHART_REPO ?= $(HELM_REPO_NAME)/$(HELM_APPLICATION_NAME)
include ../_template/base.mk

92
src/logstash/README.md Normal file
View File

@@ -0,0 +1,92 @@
# Logstash
## Introduction
Logstash is an open-source data processing pipeline that ingests data from multiple sources, transforms it, and then sends it to your favorite "stash" (like Elasticsearch). It is part of the Elastic Stack (ELK Stack) and is commonly used for log aggregation and analysis.
This Helm chart deploys Logstash on Kubernetes for processing and forwarding logs.
## Installation
To install Logstash, run:
```bash
make install
```
## Usage
After installation:
```bash
# Check if Logstash is running
kubectl get pods -n logstash
# View Logstash logs
kubectl logs -f -n logstash -l app=logstash
```
## Configuration
The default configuration includes:
- Logstash pipeline with basic input/output configuration
- Persistent volume for data persistence
- Resource limits for stable operation
## Pipeline Configuration
Configure your Logstash pipeline in `values.yaml`:
```yaml
logstashPipeline:
logstash.conf: |
input {
beats {
port => 5044
}
}
filter {
# Add your filters here
}
output {
elasticsearch {
hosts => ["http://elasticsearch:9200"]
index => "%{[@metadata][beat]}-%{[@metadata][version]}-%{+YYYY.MM.dd}"
}
}
```
## Inputs and Outputs
Common input plugins:
- `beats`: Receive data from Beats shippers
- `tcp`/`udp`: Receive data over network
- `http`: Receive data via HTTP
- `kafka`: Consume from Kafka topics
Common output plugins:
- `elasticsearch`: Send to Elasticsearch
- `kafka`: Produce to Kafka topics
- `s3`: Write to AWS S3
- `stdout`: Output to console (for debugging)
## Persistence
Enable persistent storage for queue data:
```yaml
persistence:
enabled: true
size: 10Gi
```
## Uninstall
To uninstall Logstash:
```bash
make uninstall
```

92
src/logstash/README.zh.md Normal file
View File

@@ -0,0 +1,92 @@
# Logstash
## 简介
Logstash 是一个开源数据处理管道,可从多个来源摄取数据,对其进行转换,然后将其发送到您喜欢的"存储"(如 Elasticsearch。它是 Elastic StackELK Stack的一部分通常用于日志聚合和分析。
此 Helm Chart 在 Kubernetes 上部署 Logstash用于处理和转发日志。
## 安装
安装 Logstash
```bash
make install
```
## 使用
安装完成后:
```bash
# 检查 Logstash 是否运行
kubectl get pods -n logstash
# 查看 Logstash 日志
kubectl logs -f -n logstash -l app=logstash
```
## 配置
默认配置包括:
- 具有基本输入/输出配置的 Logstash 管道
- 用于数据持久化的持久卷
- 稳定操作的资源限制
## 管道配置
`values.yaml` 中配置您的 Logstash 管道:
```yaml
logstashPipeline:
logstash.conf: |
input {
beats {
port => 5044
}
}
filter {
# 在此处添加您的过滤器
}
output {
elasticsearch {
hosts => ["http://elasticsearch:9200"]
index => "%{[@metadata][beat]}-%{[@metadata][version]}-%{+YYYY.MM.dd}"
}
}
```
## 输入和输出
常用输入插件:
- `beats`: 从 Beats 接收数据
- `tcp`/`udp`: 通过网络接收数据
- `http`: 通过 HTTP 接收数据
- `kafka`: 从 Kafka 主题消费
常用输出插件:
- `elasticsearch`: 发送到 Elasticsearch
- `kafka`: 生产到 Kafka 主题
- `s3`: 写入 AWS S3
- `stdout`: 输出到控制台(用于调试)
## 持久化
为队列数据启用持久存储:
```yaml
persistence:
enabled: true
size: 10Gi
```
## 卸载
卸载 Logstash
```bash
make uninstall
```

55
src/logstash/values.yaml Normal file
View File

@@ -0,0 +1,55 @@
# Logstash Configuration
# https://github.com/elastic/helm-charts
# Logstash image
image: docker.elastic.co/logstash/logstash
imageTag: 8.11.0
# Replicas
replicas: 1
# Resource configuration
resources:
requests:
cpu: 500m
memory: 1Gi
limits:
cpu: 1000m
memory: 2Gi
# Logstash Java options
logstashJavaOpts: -Xmx1g -Xms1g
# Pipeline configuration
logstashPipeline:
logstash.conf: |
input {
beats {
port => 5044
}
}
output {
stdout {
codec => rubydebug
}
}
# Service configuration
service:
type: ClusterIP
ports:
- name: beats
port: 5044
protocol: TCP
targetPort: 5044
# Persistence (disabled by default)
persistence:
enabled: false
size: 10Gi
# Volume mounts
volumeMounts: []
# Volumes
volumes: []

View File

@@ -1,5 +1,5 @@
HELM_RELEASE_NAME ?= mysql
HELM_APPLICATION_NAME ?= mysql-innodbcluster
HELM_APPLICATION_NAME ?= mysql
HELM_NAMESPACE ?= mysql
HELM_DIR ?= ./helm
HELM_CHART_VERSION ?=
@@ -10,6 +10,21 @@ HELM_OCI_USERNAME ?=
HELM_OCI_PASSWORD ?=
HELM_REPO_NAME ?= mysql-operator
HELM_REPO_URL ?= https://mysql.github.io/mysql-operator/
HELM_CHART_REPO ?= $(HELM_REPO_NAME)/mysql-innodbcluster
# Operator configuration
OPERATOR_RELEASE_NAME ?= mysql-operator
OPERATOR_NAMESPACE ?= mysql-operator
OPERATOR_CHART_REPO ?= $(HELM_REPO_NAME)/mysql-operator
OPERATOR_CHART_VERSION ?=
OPERATOR_VALUES_FILE ?=
# Cluster configuration
CLUSTER_RELEASE_NAME ?= mysql-cluster
CLUSTER_CHART_REPO ?= $(HELM_REPO_NAME)/mysql-innodbcluster
CLUSTER_VALUES_FILE ?= ./values.yaml
# Enable CRD waiting
WAIT_FOR_CRD ?= true
include ../_template/base.mk
include ../_template/operator.mk

30
src/nebulagraph/Makefile Normal file
View File

@@ -0,0 +1,30 @@
HELM_RELEASE_NAME ?= nebula
HELM_APPLICATION_NAME ?= nebula
HELM_NAMESPACE ?= nebula
HELM_DIR ?= ./helm
HELM_CHART_VERSION ?=
HELM_VALUES_FILE ?= ./values.yaml
HELM_OCI_REGISTRY ?=
HELM_OCI_NAMESPACE ?=
HELM_OCI_USERNAME ?=
HELM_OCI_PASSWORD ?=
HELM_REPO_NAME ?= nebula-operator
HELM_REPO_URL ?= https://vesoft-inc.github.io/nebula-operator/charts
# Operator configuration
OPERATOR_RELEASE_NAME ?= nebula-operator
OPERATOR_NAMESPACE ?= nebula-operator-system
OPERATOR_CHART_REPO ?= $(HELM_REPO_NAME)/nebula-operator
OPERATOR_CHART_VERSION ?=
OPERATOR_VALUES_FILE ?=
# Cluster configuration
CLUSTER_RELEASE_NAME ?= nebula-cluster
CLUSTER_CHART_REPO ?= $(HELM_REPO_NAME)/nebula-cluster
CLUSTER_VALUES_FILE ?= ./values.yaml
# Enable CRD waiting
WAIT_FOR_CRD ?= true
include ../_template/base.mk
include ../_template/operator.mk

77
src/nebulagraph/README.md Normal file
View File

@@ -0,0 +1,77 @@
# NebulaGraph
## Introduction
NebulaGraph is an open-source distributed graph database built for super large-scale graphs with milliseconds of latency. It delivers high performance, scalability, and availability for storing and processing graph data.
This Helm chart deploys NebulaGraph cluster using the NebulaGraph Operator on Kubernetes.
## Installation
To install NebulaGraph, run:
```bash
make install
```
## Prerequisites
NebulaGraph Operator must be installed first:
```bash
helm repo add nebula-operator https://vesoft-inc.github.io/nebula-operator/charts
helm install nebula-operator nebula-operator/nebula-operator --namespace nebula-operator --create-namespace
```
## Usage
After installation:
```bash
# Check if NebulaGraph cluster is running
kubectl get pods -n nebula
# Access Graphd service
kubectl port-forward svc/nebula-graphd -n nebula 9669:9669
```
## Configuration
The default configuration includes:
- NebulaGraph cluster with 3 graphd, 3 metad, and 3 storaged nodes
- Persistent storage for data
- Default port configurations
## Components
- **Graphd**: Query engine for graph processing
- **Metad**: Metadata management
- **Storaged**: Storage engine for graph data
## Connecting to NebulaGraph
Use Nebula Console to connect:
```bash
# Install nebula-console
# Connect to the cluster
./nebula-console -addr 127.0.0.1 -port 9669 -u root -p nebula
```
## Graph Data Model
NebulaGraph uses:
- **Vertices**: Represent entities
- **Edges**: Represent relationships
- **Tags**: Define vertex types
- **Edge Types**: Define relationship types
## Uninstall
To uninstall NebulaGraph:
```bash
make uninstall
```

View File

@@ -0,0 +1,77 @@
# NebulaGraph
## 简介
NebulaGraph 是一个开源的分布式图数据库,专为超大规模图数据而设计,具有毫秒级延迟。它为存储和处理图数据提供高性能、可扩展性和可用性。
此 Helm Chart 使用 NebulaGraph Operator 在 Kubernetes 上部署 NebulaGraph 集群。
## 安装
安装 NebulaGraph
```bash
make install
```
## 先决条件
必须首先安装 NebulaGraph Operator
```bash
helm repo add nebula-operator https://vesoft-inc.github.io/nebula-operator/charts
helm install nebula-operator nebula-operator/nebula-operator --namespace nebula-operator --create-namespace
```
## 使用
安装完成后:
```bash
# 检查 NebulaGraph 集群是否运行
kubectl get pods -n nebula
# 访问 Graphd 服务
kubectl port-forward svc/nebula-graphd -n nebula 9669:9669
```
## 配置
默认配置包括:
- NebulaGraph 集群,包含 3 个 graphd、3 个 metad 和 3 个 storaged 节点
- 数据持久化存储
- 默认端口配置
## 组件
- **Graphd**: 图处理查询引擎
- **Metad**: 元数据管理
- **Storaged**: 图数据存储引擎
## 连接 NebulaGraph
使用 Nebula Console 连接:
```bash
# 安装 nebula-console
# 连接到集群
./nebula-console -addr 127.0.0.1 -port 9669 -u root -p nebula
```
## 图数据模型
NebulaGraph 使用:
- **顶点**: 表示实体
- **边**: 表示关系
- **标签**: 定义顶点类型
- **边类型**: 定义关系类型
## 卸载
卸载 NebulaGraph
```bash
make uninstall
```

View File

@@ -0,0 +1,53 @@
# NebulaGraph Cluster Configuration
# https://github.com/vesoft-inc/nebula-operator
# Cluster name
nameOverride: nebula
# NebulaGraph version
nebula:
version: v3.6.0
# Graphd configuration (query engine)
graphd:
replicas: 3
resources:
requests:
cpu: 500m
memory: 512Mi
limits:
cpu: 1000m
memory: 1Gi
service:
type: ClusterIP
# Metad configuration (metadata service)
metad:
replicas: 3
resources:
requests:
cpu: 200m
memory: 256Mi
limits:
cpu: 500m
memory: 512Mi
storage:
size: 10Gi
# Storaged configuration (storage engine)
storaged:
replicas: 3
resources:
requests:
cpu: 500m
memory: 512Mi
limits:
cpu: 1000m
memory: 1Gi
storage:
size: 50Gi
# Reference to existing nebula-operator
operator:
name: nebula-operator
namespace: nebula-operator

View File

@@ -0,0 +1,13 @@
HELM_RELEASE_NAME ?= node-exporter
HELM_APPLICATION_NAME ?= prometheus-node-exporter
HELM_NAMESPACE ?= monitoring
HELM_DIR ?= ./helm
HELM_CHART_VERSION ?=
HELM_VALUES_FILE ?= ./values.yaml
HELM_OCI_REGISTRY ?= ghcr.io
HELM_OCI_NAMESPACE ?= prometheus-community/charts
HELM_OCI_USERNAME ?=
HELM_OCI_PASSWORD ?=
HELM_CHART_REPO ?= oci://$(HELM_OCI_REGISTRY)/$(HELM_OCI_NAMESPACE)/$(HELM_APPLICATION_NAME)
include ../_template/base.mk

View File

@@ -0,0 +1,81 @@
# Prometheus Node Exporter
## Introduction
Prometheus Node Exporter is a Prometheus exporter for hardware and OS metrics exposed by *NIX kernels. It collects system-level metrics such as CPU, memory, disk, and network usage from Linux/Unix systems.
This Helm chart deploys Node Exporter as a DaemonSet on Kubernetes, ensuring that one instance runs on each node to collect host-level metrics.
## Installation
To install Node Exporter, run:
```bash
make install
```
## Usage
After installation:
```bash
# Check if Node Exporter pods are running on all nodes
kubectl get pods -n monitoring -l app.kubernetes.io/name=prometheus-node-exporter
# View metrics
curl http://<node-ip>:9100/metrics
```
## Configuration
The default configuration includes:
- DaemonSet deployment (one pod per node)
- Host network access for system metrics
- Host PID access for process metrics
- Default port 9100 for metrics endpoint
## Metrics
Node Exporter exposes metrics at `:9100/metrics` including:
- **node_cpu_seconds_total**: CPU usage statistics
- **node_memory_MemAvailable_bytes**: Memory availability
- **node_filesystem_avail_bytes**: Filesystem availability
- **node_network_receive_bytes_total**: Network receive statistics
- **node_disk_io_time_seconds_total**: Disk I/O statistics
## Prometheus Integration
To scrape metrics with Prometheus, add this job configuration:
```yaml
scrape_configs:
- job_name: node-exporter
kubernetes_sd_configs:
- role: pod
namespaces:
names:
- monitoring
relabel_configs:
- source_labels: [__meta_kubernetes_pod_label_app_kubernetes_io_name]
action: keep
regex: prometheus-node-exporter
- source_labels: [__meta_kubernetes_pod_ip]
action: replace
target_label: __address__
regex: ([^:]+)(?::\d+)?;(\d+)
replacement: $1:9100
```
## RBAC
Node Exporter requires appropriate permissions to access host-level metrics. The Helm chart creates the necessary ServiceAccount, ClusterRole, and ClusterRoleBinding.
## Uninstall
To uninstall Node Exporter:
```bash
make uninstall
```

View File

@@ -0,0 +1,81 @@
# Prometheus Node Exporter
## 简介
Prometheus Node Exporter 是一个用于 *NIX 内核暴露的硬件和操作系统指标的 Prometheus 导出器。它从 Linux/Unix 系统收集系统级指标,如 CPU、内存、磁盘和网络使用情况。
此 Helm Chart 将 Node Exporter 作为 DaemonSet 部署在 Kubernetes 上,确保在每个节点上运行一个实例来收集主机级指标。
## 安装
安装 Node Exporter
```bash
make install
```
## 使用
安装完成后:
```bash
# 检查 Node Exporter pod 是否在所有节点上运行
kubectl get pods -n monitoring -l app.kubernetes.io/name=prometheus-node-exporter
# 查看指标
curl http://<node-ip>:9100/metrics
```
## 配置
默认配置包括:
- DaemonSet 部署(每个节点一个 pod
- 用于系统指标的主机网络访问
- 用于进程指标的主机 PID 访问
- 指标端点的默认端口 9100
## 指标
Node Exporter 在 `:9100/metrics` 暴露的指标包括:
- **node_cpu_seconds_total**: CPU 使用统计
- **node_memory_MemAvailable_bytes**: 内存可用性
- **node_filesystem_avail_bytes**: 文件系统可用性
- **node_network_receive_bytes_total**: 网络接收统计
- **node_disk_io_time_seconds_total**: 磁盘 I/O 统计
## Prometheus 集成
要使用 Prometheus 抓取指标,请添加此作业配置:
```yaml
scrape_configs:
- job_name: node-exporter
kubernetes_sd_configs:
- role: pod
namespaces:
names:
- monitoring
relabel_configs:
- source_labels: [__meta_kubernetes_pod_label_app_kubernetes_io_name]
action: keep
regex: prometheus-node-exporter
- source_labels: [__meta_kubernetes_pod_ip]
action: replace
target_label: __address__
regex: ([^:]+)(?::\d+)?;(\d+)
replacement: $1:9100
```
## RBAC
Node Exporter 需要适当的权限来访问主机级指标。Helm Chart 会创建必要的 ServiceAccount、ClusterRole 和 ClusterRoleBinding。
## 卸载
卸载 Node Exporter
```bash
make uninstall
```

View File

@@ -0,0 +1,59 @@
# Prometheus Node Exporter Configuration
# https://github.com/prometheus-community/helm-charts
# Image configuration
image:
repository: quay.io/prometheus/node-exporter
tag: v1.7.0
# Run as DaemonSet
resources:
requests:
cpu: 100m
memory: 128Mi
limits:
cpu: 500m
memory: 256Mi
# Service configuration
service:
type: ClusterIP
port: 9100
targetPort: 9100
# Host networking
hostNetwork: true
# Host PID
hostPID: true
# Security context
securityContext:
privileged: true
# RBAC
rbac:
create: true
pspEnabled: false
# Service account
serviceAccount:
create: true
# Pod security policy
podSecurityPolicy:
enabled: false
# Prometheus monitoring
prometheus:
monitor:
enabled: false
# Node selector
nodeSelector: {}
# Tolerations
tolerations: []
# Affinity
affinity: {}

22
src/opik/Makefile Normal file
View File

@@ -0,0 +1,22 @@
HELM_RELEASE_NAME ?= opik
HELM_APPLICATION_NAME ?= opik
HELM_NAMESPACE ?= opik
HELM_DIR ?= ./helm
HELM_CHART_VERSION ?=
HELM_VALUES_FILE ?= ./values.yaml
HELM_OCI_REGISTRY ?=
HELM_OCI_NAMESPACE ?=
HELM_REPO_NAME ?= opik
HELM_REPO_URL ?= https://comet-ml.github.io/opik
HELM_CHART_REPO ?= $(HELM_REPO_NAME)/$(HELM_APPLICATION_NAME)
# Version for all Opik components
VERSION ?= latest
HELM_INSTALL_ARGS ?= \
--set component.backend.image.tag=$(VERSION) \
--set component.python-backend.image.tag=$(VERSION) \
--set component.python-backend.env.PYTHON_CODE_EXECUTOR_IMAGE_TAG="$(VERSION)" \
--set component.frontend.image.tag=$(VERSION)
include ../_template/base.mk

127
src/opik/README.md Normal file
View File

@@ -0,0 +1,127 @@
# Opik
## Introduction
Opik is an open-source LLM evaluation framework by Comet that helps developers track, evaluate, and optimize their LLM applications. It provides comprehensive observability for LLM calls, prompt management, and evaluation metrics.
## Installation
To install Opik, run:
```bash
make install
```
By default, this will install the latest version of Opik. To install a specific version:
```bash
VERSION=1.0.0 make install
```
## Usage
After installation, verify the deployment:
```bash
kubectl get pods -n opik
```
To access Opik, port-forward the frontend service:
```bash
kubectl port-forward svc/opik-frontend 5173:5173 -n opik
```
Then access at <http://localhost:5173>
## Configuration
### Using External ClickHouse
To use an external ClickHouse installation instead of the built-in one:
```yaml
component:
backend:
waitForClickhouse:
clickhouse:
host: your-clickhouse-host
port: 8123
protocol: http
env:
ANALYTICS_DB_MIGRATIONS_URL: 'jdbc:clickhouse://your-clickhouse-host:8123'
ANALYTICS_DB_HOST: your-clickhouse-host
ANALYTICS_DB_DATABASE_NAME: opik
ANALYTICS_DB_MIGRATIONS_USER: opik
ANALYTICS_DB_USERNAME: opik
ANALYTICS_DB_MIGRATIONS_PASS: your-password
ANALYTICS_DB_PASS: your-password
clickhouse:
enabled: false
```
### Configuring S3 Storage
To use AWS S3 for storage:
```yaml
component:
backend:
env:
S3_BUCKET: your-bucket-name
S3_REGION: us-east-1
AWS_ACCESS_KEY_ID: your-access-key
AWS_SECRET_ACCESS_KEY: your-secret-key
```
### Enabling Ingress
To expose Opik via Ingress:
```yaml
component:
frontend:
ingress:
enabled: true
ingressClassName: nginx
hosts:
- host: opik.example.com
paths:
- path: /
port: 5173
pathType: Prefix
```
## Uninstallation
Before uninstalling, remove the finalizer on the ClickHouse resource:
```bash
kubectl patch -n opik chi opik-clickhouse --type json --patch='[ { "op": "remove", "path": "/metadata/finalizers" } ]'
```
Then uninstall:
```bash
make uninstall
```
## Version Compatibility
Ensure your Python SDK version matches your Kubernetes deployment version:
```bash
pip show opik
```
To update the Python SDK:
```bash
pip install --upgrade opik==<version>
```
## Documentation
- [Opik Documentation](https://www.comet.com/docs/opik/)
- [Helm Chart Documentation](https://comet-ml.github.io/opik/)

127
src/opik/README.zh.md Normal file
View File

@@ -0,0 +1,127 @@
# Opik
## 简介
Opik 是 Comet 开发的开源 LLM 评估框架,帮助开发者跟踪、评估和优化他们的 LLM 应用程序。它为 LLM 调用、提示管理和评估指标提供全面的可观测性。
## 安装
要安装 Opik请运行
```bash
make install
```
默认情况下,这将安装最新版本的 Opik。要安装特定版本
```bash
VERSION=1.0.0 make install
```
## 使用
安装后,验证部署:
```bash
kubectl get pods -n opik
```
要访问 Opik请端口转发前端服务
```bash
kubectl port-forward svc/opik-frontend 5173:5173 -n opik
```
然后在 <http://localhost:5173> 访问
## 配置
### 使用外部 ClickHouse
要使用外部 ClickHouse 安装而不是内置的:
```yaml
component:
backend:
waitForClickhouse:
clickhouse:
host: your-clickhouse-host
port: 8123
protocol: http
env:
ANALYTICS_DB_MIGRATIONS_URL: 'jdbc:clickhouse://your-clickhouse-host:8123'
ANALYTICS_DB_HOST: your-clickhouse-host
ANALYTICS_DB_DATABASE_NAME: opik
ANALYTICS_DB_MIGRATIONS_USER: opik
ANALYTICS_DB_USERNAME: opik
ANALYTICS_DB_MIGRATIONS_PASS: your-password
ANALYTICS_DB_PASS: your-password
clickhouse:
enabled: false
```
### 配置 S3 存储
要使用 AWS S3 进行存储:
```yaml
component:
backend:
env:
S3_BUCKET: your-bucket-name
S3_REGION: us-east-1
AWS_ACCESS_KEY_ID: your-access-key
AWS_SECRET_ACCESS_KEY: your-secret-key
```
### 启用 Ingress
要通过 Ingress 暴露 Opik
```yaml
component:
frontend:
ingress:
enabled: true
ingressClassName: nginx
hosts:
- host: opik.example.com
paths:
- path: /
port: 5173
pathType: Prefix
```
## 卸载
在卸载之前,请移除 ClickHouse 资源上的 finalizer
```bash
kubectl patch -n opik chi opik-clickhouse --type json --patch='[ { "op": "remove", "path": "/metadata/finalizers" } ]'
```
然后卸载:
```bash
make uninstall
```
## 版本兼容性
确保你的 Python SDK 版本与 Kubernetes 部署版本匹配:
```bash
pip show opik
```
要更新 Python SDK
```bash
pip install --upgrade opik==<version>
```
## 文档
- [Opik 文档](https://www.comet.com/docs/opik/)
- [Helm Chart 文档](https://comet-ml.github.io/opik/)

50
src/opik/values.yaml Normal file
View File

@@ -0,0 +1,50 @@
# Opik Helm Chart Values
# Documentation: https://www.comet.com/docs/opik/self-host/kubernetes/
component:
backend:
image:
tag: latest
env:
OPIK_USAGE_REPORT_ENABLED: 'false'
# S3_BUCKET: ""
# S3_REGION: ""
# AWS_ACCESS_KEY_ID: ""
# AWS_SECRET_ACCESS_KEY: ""
python-backend:
image:
tag: latest
env:
PYTHON_CODE_EXECUTOR_IMAGE_TAG: latest
frontend:
image:
tag: latest
ingress:
enabled: false
# ingressClassName: nginx
# annotations: {}
# hosts:
# - host: opik.example.com
# paths:
# - path: /
# port: 5173
# pathType: Prefix
# tls:
# enabled: true
# hosts:
# - opik.example.com
# secretName: opik-tls
# ClickHouse configuration
clickhouse:
enabled: true
# replicasCount: 1
# service:
# serviceTemplate: clickhouse-cluster-svc-lb-template
# annotations: {}
# ZooKeeper configuration (required for ClickHouse replication)
zookeeper:
enabled: true

View File

@@ -1,8 +1,8 @@
HELM_RELEASE_NAME ?= phoenix
HELM_APPLICATION_NAME ?= phoenix
HELM_APPLICATION_NAME ?= phoenix-helm
HELM_NAMESPACE ?= phoenix
HELM_DIR ?= ./helm
HELM_CHART_VERSION ?= 4.0.37
HELM_CHART_VERSION ?= 5.0.5
HELM_VALUES_FILE ?= ./values.yaml
HELM_OCI_REGISTRY ?= docker.io
HELM_OCI_NAMESPACE ?= arizephoenix
@@ -10,6 +10,6 @@ HELM_OCI_USERNAME ?=
HELM_OCI_PASSWORD ?=
HELM_REPO_NAME ?=
HELM_REPO_URL ?=
HELM_CHART_REPO ?= oci://$(HELM_OCI_REGISTRY)/$(HELM_OCI_NAMESPACE)/phoenix-helm
HELM_CHART_REPO ?= oci://$(HELM_OCI_REGISTRY)/$(HELM_OCI_NAMESPACE)/$(HELM_APPLICATION_NAME)
include ../_template/base.mk

View File

@@ -1,6 +1,6 @@
HELM_RELEASE_NAME ?= rabbitmq-cluster-operator
HELM_APPLICATION_NAME ?= rabbitmq-cluster-operator
HELM_NAMESPACE ?= rabbitmq-cluster-operator
HELM_RELEASE_NAME ?= rabbitmq
HELM_APPLICATION_NAME ?= rabbitmq
HELM_NAMESPACE ?= rabbitmq
HELM_DIR ?= ./helm
HELM_CHART_VERSION ?= 0.2.0
HELM_VALUES_FILE ?= ./values.yaml
@@ -8,8 +8,28 @@ HELM_OCI_REGISTRY ?= docker.io
HELM_OCI_NAMESPACE ?= cloudpirates
HELM_OCI_USERNAME ?=
HELM_OCI_PASSWORD ?=
HELM_REPO_NAME ?=
HELM_REPO_URL ?=
HELM_CHART_REPO ?= oci://$(HELM_OCI_REGISTRY)/$(HELM_OCI_NAMESPACE)/$(HELM_APPLICATION_NAME)
# Operator configuration
OPERATOR_RELEASE_NAME ?= rabbitmq-cluster-operator
OPERATOR_NAMESPACE ?= rabbitmq-operator
OPERATOR_CHART_REPO ?= oci://$(HELM_OCI_REGISTRY)/$(HELM_OCI_NAMESPACE)/rabbitmq-cluster-operator
OPERATOR_CHART_VERSION ?= $(HELM_CHART_VERSION)
OPERATOR_VALUES_FILE ?= ./values.yaml
# For RabbitMQ Cluster Operator, we only install the operator
# RabbitMQ clusters are created using RabbitmqCluster CRDs after operator is installed
include ../_template/base.mk
include ../_template/operator.mk
# Override install target to only install operator
.PHONY: install
install: install-operator
# Override uninstall target to only uninstall operator
.PHONY: uninstall
uninstall: uninstall-operator
# Override verify target
.PHONY: verify
verify: verify-operator

View File

@@ -10,6 +10,6 @@ HELM_OCI_USERNAME ?=
HELM_OCI_PASSWORD ?=
HELM_REPO_NAME ?= goodrain
HELM_REPO_URL ?= https://openchart.goodrain.com/goodrain/rainbond
HELM_CHART_REPO ?= goodrain/rainbond
HELM_CHART_REPO ?= $(HELM_REPO_NAME)/$(HELM_APPLICATION_NAME)
include ../_template/base.mk

15
src/rocketmq/Makefile Normal file
View File

@@ -0,0 +1,15 @@
HELM_RELEASE_NAME ?= rocketmq
HELM_APPLICATION_NAME ?= rocketmq
HELM_NAMESPACE ?= rocketmq
HELM_DIR ?= ./helm
HELM_CHART_VERSION ?=
HELM_VALUES_FILE ?= ./values.yaml
HELM_OCI_REGISTRY ?=
HELM_OCI_NAMESPACE ?=
HELM_OCI_USERNAME ?=
HELM_OCI_PASSWORD ?=
HELM_REPO_NAME ?= rocketmq
HELM_REPO_URL ?= https://apache.github.io/rocketmq-helm
HELM_CHART_REPO ?= $(HELM_REPO_NAME)/$(HELM_APPLICATION_NAME)
include ../_template/base.mk

72
src/rocketmq/README.md Normal file
View File

@@ -0,0 +1,72 @@
# Apache RocketMQ
## Introduction
Apache RocketMQ is a distributed messaging and streaming platform with low latency, high performance and reliability, trillion-level capacity and flexible scalability.
This Helm chart deploys Apache RocketMQ on Kubernetes, including NameServer, Broker, and Console components.
## Installation
To install RocketMQ, run:
```bash
make install
```
## Usage
After installation:
```bash
# Check if RocketMQ components are running
kubectl get pods -n rocketmq
# Port forward to access NameServer
kubectl port-forward svc/rocketmq-nameserver -n rocketmq 9876:9876
# Port forward to access Console (if enabled)
kubectl port-forward svc/rocketmq-console -n rocketmq 8080:8080
```
## Configuration
The default configuration includes:
- 2 NameServer replicas for high availability
- 1 Broker master with 1 slave (2m-2s sync_flush)
- Console for management UI
- Persistent storage for message data
## Components
- **NameServer**: Service discovery and routing
- **Broker**: Message storage and delivery
- **Console**: Web UI for management and monitoring
## Client Connection
```java
Properties props = new Properties();
props.setProperty("namesrvAddr", "rocketmq-nameserver.rocketmq:9876");
DefaultMQProducer producer = new DefaultMQProducer("TestProducer", props);
```
## Storage
Configure persistent storage for brokers:
```yaml
broker:
storage:
size: 50Gi
class: standard
```
## Uninstall
To uninstall RocketMQ:
```bash
make uninstall
```

72
src/rocketmq/README.zh.md Normal file
View File

@@ -0,0 +1,72 @@
# Apache RocketMQ
## 简介
Apache RocketMQ 是一个分布式消息和流平台,具有低延迟、高性能和可靠性、万亿级容量和灵活的扩展性。
此 Helm Chart 在 Kubernetes 上部署 Apache RocketMQ包括 NameServer、Broker 和 Console 组件。
## 安装
安装 RocketMQ
```bash
make install
```
## 使用
安装完成后:
```bash
# 检查 RocketMQ 组件是否运行
kubectl get pods -n rocketmq
# 端口转发以访问 NameServer
kubectl port-forward svc/rocketmq-nameserver -n rocketmq 9876:9876
# 端口转发以访问 Console如果启用
kubectl port-forward svc/rocketmq-console -n rocketmq 8080:8080
```
## 配置
默认配置包括:
- 2 个 NameServer 副本用于高可用
- 1 个 Broker master 和 1 个 slave (2m-2s sync_flush)
- 用于管理 UI 的 Console
- 消息数据的持久存储
## 组件
- **NameServer**: 服务发现和路由
- **Broker**: 消息存储和传递
- **Console**: 管理和监控的 Web UI
## 客户端连接
```java
Properties props = new Properties();
props.setProperty("namesrvAddr", "rocketmq-nameserver.rocketmq:9876");
DefaultMQProducer producer = new DefaultMQProducer("TestProducer", props);
```
## 存储
为 broker 配置持久存储:
```yaml
broker:
storage:
size: 50Gi
class: standard
```
## 卸载
卸载 RocketMQ
```bash
make uninstall
```

53
src/rocketmq/values.yaml Normal file
View File

@@ -0,0 +1,53 @@
# Apache RocketMQ Configuration
# https://github.com/apache/rocketmq-helm
# NameServer configuration
nameserver:
replicas: 2
resources:
requests:
cpu: 200m
memory: 512Mi
limits:
cpu: 500m
memory: 1Gi
# Broker configuration
broker:
size:
master: 1
replica: 1
replicas:
master: 1
slave: 1
resources:
requests:
cpu: 500m
memory: 1Gi
limits:
cpu: 1000m
memory: 2Gi
persistence:
enabled: true
size: 20Gi
# Console configuration
console:
enabled: true
replicas: 1
service:
type: ClusterIP
port: 8080
# Service configuration
service:
nameserver:
type: ClusterIP
port: 9876
broker:
type: ClusterIP
# Image configuration
image:
repository: apache/rocketmq
tag: 5.1.4

15
src/verdaccio/Makefile Normal file
View File

@@ -0,0 +1,15 @@
HELM_RELEASE_NAME ?= verdaccio
HELM_APPLICATION_NAME ?= verdaccio
HELM_NAMESPACE ?= verdaccio
HELM_DIR ?= ./helm
HELM_CHART_VERSION ?=
HELM_VALUES_FILE ?= ./values.yaml
HELM_OCI_REGISTRY ?=
HELM_OCI_NAMESPACE ?=
HELM_OCI_USERNAME ?=
HELM_OCI_PASSWORD ?=
HELM_REPO_NAME ?= verdaccio
HELM_REPO_URL ?= https://charts.verdaccio.org
HELM_CHART_REPO ?= $(HELM_REPO_NAME)/$(HELM_APPLICATION_NAME)
include ../_template/base.mk

106
src/verdaccio/README.md Normal file
View File

@@ -0,0 +1,106 @@
# Verdaccio
## Introduction
Verdaccio is a lightweight private npm proxy registry built in Node.js. It allows you to have a local npm registry with zero configuration, and provides features such as:
- Private package hosting
- Proxy to npmjs.org (caching)
- Local storage for offline access
- Plugin support for authentication and storage
This Helm chart deploys Verdaccio on Kubernetes for hosting private npm packages.
## Installation
To install Verdaccio, run:
```bash
make install
```
## Usage
After installation:
```bash
# Port forward to access Verdaccio
kubectl port-forward svc/verdaccio -n verdaccio 4873:4873
```
Then configure npm to use your private registry:
```bash
# Set registry
npm set registry http://localhost:4873
# Login (if authentication is enabled)
npm adduser --registry http://localhost:4873
# Publish a package
npm publish
# Install packages
npm install
```
## Configuration
The default configuration includes:
- Anonymous access for reading packages
- Proxy to npmjs.org for packages not found locally
- Local file system storage
- Basic web interface enabled
## Storage
Enable persistent storage for production:
```yaml
persistence:
enabled: true
size: 10Gi
```
## Authentication
Enable authentication in config.yaml:
```yaml
auth:
htpasswd:
file: /verdaccio/conf/htpasswd
max_users: 1000
```
## Uplink Configuration
Configure multiple uplinks for package proxying:
```yaml
uplinks:
npmjs:
url: https://registry.npmjs.org/
yarn:
url: https://registry.yarnpkg.com
```
## Package Access
Configure package access permissions:
```yaml
packages:
'@mycompany/*':
access: $authenticated
publish: $authenticated
```
## Uninstall
To uninstall Verdaccio:
```bash
make uninstall
```

106
src/verdaccio/README.zh.md Normal file
View File

@@ -0,0 +1,106 @@
# Verdaccio
## 简介
Verdaccio 是一个用 Node.js 构建的轻量级私有 npm 代理 registry。它允许您拥有零配置的本地 npm registry并提供以下功能
- 私有包托管
- 代理到 npmjs.org缓存
- 本地存储供离线访问
- 支持认证和存储插件
此 Helm Chart 在 Kubernetes 上部署 Verdaccio用于托管私有 npm 包。
## 安装
安装 Verdaccio
```bash
make install
```
## 使用
安装完成后:
```bash
# 端口转发以访问 Verdaccio
kubectl port-forward svc/verdaccio -n verdaccio 4873:4873
```
然后配置 npm 以使用您的私有 registry
```bash
# 设置 registry
npm set registry http://localhost:4873
# 登录(如果启用了认证)
npm adduser --registry http://localhost:4873
# 发布包
npm publish
# 安装包
npm install
```
## 配置
默认配置包括:
- 匿名访问读取包
- 代理到 npmjs.org 获取本地找不到的包
- 本地文件系统存储
- 启用基本 Web 界面
## 存储
为生产环境启用持久存储:
```yaml
persistence:
enabled: true
size: 10Gi
```
## 认证
在 config.yaml 中启用认证:
```yaml
auth:
htpasswd:
file: /verdaccio/conf/htpasswd
max_users: 1000
```
## 上行链路配置
配置多个上行链路用于包代理:
```yaml
uplinks:
npmjs:
url: https://registry.npmjs.org/
yarn:
url: https://registry.yarnpkg.com
```
## 包访问
配置包访问权限:
```yaml
packages:
'@mycompany/*':
access: $authenticated
publish: $authenticated
```
## 卸载
卸载 Verdaccio
```bash
make uninstall
```

67
src/verdaccio/values.yaml Normal file
View File

@@ -0,0 +1,67 @@
# Verdaccio Configuration
# https://github.com/verdaccio/charts
# Image configuration
image:
repository: verdaccio/verdaccio
tag: 5.29
# Replicas
replicaCount: 1
# Service configuration
service:
type: ClusterIP
port: 4873
# Resource configuration
resources:
requests:
cpu: 100m
memory: 256Mi
limits:
cpu: 500m
memory: 512Mi
# Persistence configuration
persistence:
enabled: false
size: 10Gi
# ConfigMap configuration
configMap:
enabled: true
data:
config.yaml: |
storage: /verdaccio/storage/data
web:
enable: true
title: Verdaccio
auth:
htpasswd:
file: /verdaccio/storage/htpasswd
max_users: 1000
uplinks:
npmjs:
url: https://registry.npmjs.org/
packages:
'@*/*':
access: $all
publish: $authenticated
proxy: npmjs
'**':
access: $all
publish: $authenticated
proxy: npmjs
middlewares:
audit:
enabled: true
logs:
- { type: stdout, format: pretty, level: http }
max_body_size: 100mb
listen:
- 0.0.0.0:4873
# Ingress configuration (disabled by default)
ingress:
enabled: false